From e81570ad0afc4b7e92214906ec28a820118a15db Mon Sep 17 00:00:00 2001 From: Ben Copeland Date: Thu, 2 Oct 2014 15:05:15 +0100 Subject: [PATCH 001/224] adding username/password support along with ssl/starttls --- notification/mail.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index 34cd3a09bf3..ccf53029741 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -69,6 +69,17 @@ options: - The body of the email being sent. default: $subject required: false + username: + description: + - If SMTP requires username + default: null + required: false + version_added: "1.6" + password: + - If SMTP requires password + default: null + required: false + version_added: "1.6" host: description: - The mail server @@ -122,6 +133,7 @@ EXAMPLES = ''' import os import sys import smtplib +import ssl try: from email import encoders @@ -142,6 +154,8 @@ def main(): module = AnsibleModule( argument_spec = dict( + username = dict(default=None), + password = dict(default=None), host = dict(default='localhost'), port = dict(default='25'), sender = dict(default='root', aliases=['from']), @@ -156,6 +170,8 @@ def main(): ) ) + username = module.params.get('username') + password = module.params.get('password') host = module.params.get('host') port = module.params.get('port') sender = module.params.get('sender') @@ -167,17 +183,23 @@ def main(): attach_files = module.params.get('attach') headers = module.params.get('headers') charset = module.params.get('charset') - sender_phrase, sender_addr = parseaddr(sender) if not body: body = subject try: + smtp = smtplib.SMTP_SSL(host, port=int(port)) + except (smtplib.SMTPException, ssl.SSLError): smtp = smtplib.SMTP(host, port=int(port)) - except Exception, e: - module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e)) - + smtp.ehlo() + if username and password: + if smtp.has_extn('STARTTLS'): + smtp.starttls() + try: + smtp.login(username, password) + except smtplib.SMTPAuthenticationError: + module.fail_json(msg="Authentication to %s:%s failed, please check your username and/or password" % (host, port)) msg = MIMEMultipart() msg['Subject'] = subject From 11e3b8e88740efb2d87a76ca1d52de087064903d Mon Sep 17 00:00:00 2001 From: Jens Depuydt Date: Fri, 17 Oct 2014 16:23:25 +0200 Subject: [PATCH 002/224] added postgresql_lang.py --- database/postgresql_lang.py | 256 ++++++++++++++++++++++++++++++++++++ 1 file changed, 256 insertions(+) create mode 100644 database/postgresql_lang.py diff --git a/database/postgresql_lang.py b/database/postgresql_lang.py new file mode 100644 index 00000000000..ec0507b5508 --- /dev/null +++ b/database/postgresql_lang.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jens Depuydt + +DOCUMENTATION = ''' +--- +module: postgresql_lang +short_description: Adds, removes or changes procedural languages with a PostgreSQL database. +description: + - Adds, removes or changes procedural languages with a PostgreSQL database. + - This module allows you to add a language, remote a language or change the trust + relationship with a PostgreSQL database. The module can be used on the machine + where executed or on a remote host. + - When removing a language from a database, it is possible that dependencies prevent + the database from being removed. In that case, you can specify casade to + automatically drop objects that depend on the language (such as functions in the + language). In case the language can't be deleted because it is required by the + database system, you can specify fail_on_drop=no to ignore the error. + - Be carefull when marking a language as trusted since this could be a potential + security breach. Untrusted languages allow only users with the PostgreSQL superuser + privilege to use this language to create new functions. +version_added: "1.7" +options: + lang: + description: + - name of the procedural language to add, remove or change + required: true + default: null + trust: + description: + - make this language trusted for the selected db + required: false + default: no + choices: [ "yes", "no" ] + db: + description: + - name of database where the language will be added, removed or changed + required: false + default: null + force_trust: + description: + - marks the language as trusted, even if it's marked as untrusted in pg_pltemplate. + - use with care! + required: false + default: no + choices: [ "yes", "no" ] + fail_on_drop: + description: + - if C(yes), fail when removing a language. Otherwise just log and continue + - in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade). + required: false + default: 'yes' + choices: [ "yes", "no" ] + cascade: + description: + - when dropping a language, also delete object that depend on this language. + - only used when C(state=absent). + required: false + default: no + choices: [ "yes", "no" ] + port: + description: + - Database port to connect to. + required: false + default: 5432 + login_user: + description: + - User used to authenticate with PostgreSQL + required: false + default: postgres + login_password: + description: + - Password used to authenticate with PostgreSQL (must match C(login_user)) + required: false + default: null + login_host: + description: + - Host running PostgreSQL where you want to execute the actions. + required: false + default: localhost + state: + description: + - The state of the language for the selected database + required: false + default: present + choices: [ "present", "absent" ] +notes: + - The default authentication assumes that you are either logging in as or + sudo'ing to the postgres account on the host. + - This module uses psycopg2, a Python PostgreSQL database adapter. You must + ensure that psycopg2 is installed on the host before using this module. If + the remote host is the PostgreSQL server (which is the default case), then + PostgreSQL must also be installed on the remote host. For Ubuntu-based + systems, install the postgresql, libpq-dev, and python-psycopg2 packages + on the remote host before using this module. +requirements: [ psycopg2 ] +author: Jens Depuydt +''' + +EXAMPLES = ''' +# Add language pltclu to database testdb if it doesn't exist: +- postgresql_lang db=testdb lang=pltclu state=present + +# Add language pltclu to database testdb if it doesn't exist and mark it as trusted: +# Marks the language as trusted if it exists but isn't trusted yet +# force_trust makes sure that the language will be marked as trusted +- postgresql_lang db=testdb lang=pltclu state=present trust=yes force_trust=yes + +# Remove language pltclu from database testdb: +- postgresql_lang: db=testdb lang=pltclu state=absent + +# Remove language pltclu from database testdb and remove all dependencies: +- postgresql_lang: db=testdb lang=pltclu state=absent cascade=yes + +# Remove language c from database testdb but ignore errors if something prevents the removal: +- postgresql_lang: db=testdb lang=pltclu state=absent fail_on_drop=no +''' + +try: + import psycopg2 +except ImportError: + postgresqldb_found = False +else: + postgresqldb_found = True + +def lang_exists(cursor, lang): + """Checks if language exists for db""" + query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang + cursor.execute(query) + return cursor.rowcount > 0 + +def lang_istrusted(cursor, lang): + """Checks if language is trusted for db""" + query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang + cursor.execute(query) + return cursor.fetchone()[0] + +def lang_altertrust(cursor, lang, trust): + """Changes if language is trusted for db""" + query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s" + cursor.execute(query, (trust, lang)) + return True + +def lang_add(cursor, lang, trust): + """Adds language for db""" + if trust: + query = 'CREATE TRUSTED LANGUAGE "%s"' % lang + else: + query = 'CREATE LANGUAGE "%s"' % lang + cursor.execute(query) + return True + +def lang_drop(cursor, lang, cascade): + """Drops language for db""" + cursor.execute("SAVEPOINT ansible_pgsql_lang_drop") + try: + if cascade: + cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang) + else: + cursor.execute("DROP LANGUAGE \"%s\"" % lang) + except: + cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop") + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return False + cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop") + return True + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default="postgres"), + login_password=dict(default=""), + login_host=dict(default=""), + db=dict(required=True), + port=dict(default='5432'), + lang=dict(required=True), + state=dict(default="present", choices=["absent", "present"]), + trust=dict(type='bool', default='no'), + force_trust=dict(type='bool', default='no'), + cascade=dict(type='bool', default='no'), + fail_on_drop=dict(type='bool', default='yes'), + ), + supports_check_mode = True + ) + + db = module.params["db"] + port = module.params["port"] + lang = module.params["lang"] + state = module.params["state"] + trust = module.params["trust"] + force_trust = module.params["force_trust"] + cascade = module.params["cascade"] + fail_on_drop = module.params["fail_on_drop"] + + if not postgresqldb_found: + module.fail_json(msg="the python psycopg2 module is required") + + params_map = { + "login_host":"host", + "login_user":"user", + "login_password":"password", + "port":"port", + "db":"database" + } + kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + if k in params_map and v != "" ) + try: + db_connection = psycopg2.connect(**kw) + cursor = db_connection.cursor() + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + changed = False + lang_dropped = False + kw = dict(db=db,lang=lang,trust=trust) + + if state == "present": + if lang_exists(cursor, lang): + lang_trusted = lang_istrusted(cursor, lang) + if (lang_trusted and not trust) or (not lang_trusted and trust): + if module.check_mode: + changed = True + else: + changed = lang_altertrust(cursor, lang, trust) + else: + if module.check_mode: + changed = True + else: + changed = lang_add(cursor, lang, trust) + if force_trust: + changed = lang_altertrust(cursor, lang, trust) + + else: + if lang_exists(cursor, lang): + if module.check_mode: + changed = True + kw['lang_dropped'] = True + else: + changed = lang_drop(cursor, lang, cascade) + if fail_on_drop and not changed: + msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore" + module.fail_json(msg=msg) + kw['lang_dropped'] = changed + + if changed: + if module.check_mode: + db_connection.rollback() + else: + db_connection.commit() + + kw['changed'] = changed + module.exit_json(**kw) + +# import module snippets +from ansible.module_utils.basic import * +main() From 7c5d9845574edc52df977e7d1628e0c82529ae18 Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 14:50:55 -0700 Subject: [PATCH 003/224] initial stab at pushover notification module --- notification/pushover | 127 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 notification/pushover diff --git a/notification/pushover b/notification/pushover new file mode 100644 index 00000000000..ba9bafc9792 --- /dev/null +++ b/notification/pushover @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + + +# vim: set expandtab: +### +# Copyright (c) 2012, Jim Richardson +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions, and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions, and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the author of this software nor the name of +# contributors to this software may be used to endorse or promote products +# derived from this software without specific prior written consent. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +### + + +''' +License: GPL V2 See LICENSE file +Author: Jim Richardson +email: weaselkeeper@gmail.com + +''' + +DOCUMENTATION = ''' +--- +module: pushover +version_added: "1.8" +short_description: Send notifications via u(https://pushover.net) +description: + - Send notifications via pushover, to subscriber list of devices, and email + addresses. Requires pushover app on devices. +notes: + - You will require a pushover.net account to use this module. But no account + is required to receive messages. +options: + msg: + description: + What message you wish to send. + required: true + app_token: + description: + Pushover issued token identifying your pushover app. + required: true + user_key: + description: + Pushover issued authentication key for your user. + required: true + pri: + description: Message priority (see u(https://pushover.net) for details.) + required: false + +author: Jim Richardson +''' + +EXAMPLES = ''' +- local_action: pushover msg="{{inventory_hostname}} has exploded in flames, + It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59 +''' + +import urllib +import httplib + + +class pushover(object): + ''' Instantiates a pushover object, use it to send notifications ''' + + def __init__(self): + self.host, self.port = 'api.pushover.net', 443 + + def run(self): + ''' Do, whatever it is, we do. ''' + # parse config + conn = httplib.HTTPSConnection(self.host, self.port) + conn.request("POST", "/1/messages.json", + urllib.urlencode(self.options), + {"Content-type": "application/x-www-form-urlencoded"}) + conn.getresponse() + return + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + app_token=dict(required=True), + user_key=dict(required=True), + pri=dict(required=False, default=0), + ), + ) + + msg_object = pushover() + msg_object.options = {} + msg_object.options['user'] = module.params['user_key'] + msg_object.options['token'] = module.params['app_token'] + msg_object.options['priority'] = module.params['pri'] + msg_object.options['message'] = module.params['msg'] + try: + msg_object.run() + except: + module.fail_json(msg='Wibble') + + module.exit_json(msg="OK", changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() From 64242fb1a6cfa9a06e8726ef5bb864ba49593a4a Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 14:55:46 -0700 Subject: [PATCH 004/224] slight tweak to preamble to bring into common with other ansible modules --- notification/pushover | 37 ++++++++++++------------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/notification/pushover b/notification/pushover index ba9bafc9792..a05b64d8089 100644 --- a/notification/pushover +++ b/notification/pushover @@ -1,35 +1,22 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - - -# vim: set expandtab: -### # Copyright (c) 2012, Jim Richardson # All rights reserved. # -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. # -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions, and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions, and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the author of this software nor the name of -# contributors to this software may be used to endorse or promote products -# derived from this software without specific prior written consent. +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. # -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . ### From f0d81a5290b125bb794b3737a7323eb83b092246 Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 14:57:10 -0700 Subject: [PATCH 005/224] remove extraneous info from preamble --- notification/pushover | 8 -------- 1 file changed, 8 deletions(-) diff --git a/notification/pushover b/notification/pushover index a05b64d8089..6b7c32d758d 100644 --- a/notification/pushover +++ b/notification/pushover @@ -20,14 +20,6 @@ ### - -''' -License: GPL V2 See LICENSE file -Author: Jim Richardson -email: weaselkeeper@gmail.com - -''' - DOCUMENTATION = ''' --- module: pushover From b0ec83ef08ddf845567e3b3f6eb65ef6b825e4f2 Mon Sep 17 00:00:00 2001 From: Jim Richardson Date: Sun, 19 Oct 2014 21:19:25 -0700 Subject: [PATCH 006/224] clarification of error and success messages --- notification/pushover | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notification/pushover b/notification/pushover index 6b7c32d758d..7fd66333f54 100644 --- a/notification/pushover +++ b/notification/pushover @@ -97,9 +97,9 @@ def main(): try: msg_object.run() except: - module.fail_json(msg='Wibble') + module.fail_json(msg='Unable to send msg via pushover') - module.exit_json(msg="OK", changed=False) + module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * From 4dd6c8204a70f1c53687554ae8df94f883dd3b4c Mon Sep 17 00:00:00 2001 From: Simon Aquino Date: Tue, 21 Oct 2014 23:47:03 +0100 Subject: [PATCH 007/224] Facter module should return custom facts The ansible facter module should also return puppet custom facts by default. --- system/facter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/facter.py b/system/facter.py index a72cdc6536f..a4912835447 100644 --- a/system/facter.py +++ b/system/facter.py @@ -45,7 +45,7 @@ def main(): argument_spec = dict() ) - cmd = ["/usr/bin/env", "facter", "--json"] + cmd = ["/usr/bin/env", "facter", "--puppet", "--json"] rc, out, err = module.run_command(cmd, check_rc=True) module.exit_json(**json.loads(out)) From 344713365f89bdcaa328b313949f213a6190a55c Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 23 Oct 2014 21:02:20 -0600 Subject: [PATCH 008/224] Added the download_artifact module The download_artifact module resolves a maven dependency coordinate and downloads the artifact to the target path --- packaging/download_artifact.py | 367 +++++++++++++++++++++++++++++++++ 1 file changed, 367 insertions(+) create mode 100644 packaging/download_artifact.py diff --git a/packaging/download_artifact.py b/packaging/download_artifact.py new file mode 100644 index 00000000000..16855c142b3 --- /dev/null +++ b/packaging/download_artifact.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014, Chris Schmidt +# +# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact +# as a reference and starting point. +# +# + +__author__ = 'cschmidt' + +from lxml import etree +from urllib2 import Request, urlopen, URLError, HTTPError +import os +import hashlib +import sys +import base64 + +DOCUMENTATION = ''' +--- +module: download_artifact +short_description: Downloads an Artifact from a Maven Repository +version_added: "historical" +description: + - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve + - snapshots or release versions of the artifact and will resolve the latest available version if one is not + - available. +author: Chris Schmidt +requirements: + - python libxml + - python urllib2 +options: + group_id: + description: The Maven groupId coordinate + required: true + default: null + version_added: 0.0.1 + artifact_id: + description: The maven artifactId coordinate + required: true + default: null + version_added: 0.0.1 + version: + description: The maven version coordinate + required: false + default: latest + version_added: 0.0.1 + classifier: + description: The maven classifier coordinate + required: false + default: null + version_added: 0.0.1 + extension: + description: The maven type/extension coordinate + required: false + default: jar + version_added: 0.0.1 + repository_url: + description: The URL of the Maven Repository to download from + required: false + default: http://repo1.maven.org/maven2 + version_added: 0.0.1 + username: + description: The username to authenticate as to the Maven Repository + required: false + default: null + version_added: 0.0.1 + password: + description: The passwor to authenticate with to the Maven Repository + required: false + default: null + version_added: 0.0.1 + target: + description: The path where the artifact should be written to + required: true + default: false + version_added: 0.0.1 + state: + description: The desired state of the artifact + required: true + default: present + choices: [present,absent] + version_added: 0.0.1 +''' + +EXAMPLES = ''' +# Download the latest version of the commons-collections artifact from Maven Central +- download_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar + +# Download Apache Commons-Collections 3.2 from Maven Central +- download_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar + +# Download an artifact from a private repository requiring authentication +- download_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar + +# Download a WAR File to the Tomcat webapps directory to be deployed +- download_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war +''' + + +class Artifact(object): + def __init__(self, group_id, artifact_id, version, classifier=None, extension=jar): + if not group_id: + raise ValueError("group_id must be set") + if not artifact_id: + raise ValueError("artifact_id must be set") + + self.group_id = group_id + self.artifact_id = artifact_id + self.version = version + self.classifier = classifier + + if not extension: + self.extension = "jar" + else: + self.extension = extension + + def is_snapshot(self): + return self.version and self.version.endswith("SNAPSHOT") + + def path(self, with_version=True): + base = self.group_id.replace(".", "/") + "/" + self.artifact_id + if with_version and self.version: + return base + "/" + self.version + else: + return base + + def _generate_filename(self): + if not self.classifier: + return self.artifact_id + "." + self.extension + else: + return self.artifact_id + "-" + self.classifier + "." + self.extension + + def get_filename(self, filename=None): + if not filename: + filename = self._generate_filename() + elif os.path.isdir(filename): + filename = os.path.join(filename, self._generate_filename()) + return filename + + def __str__(self): + if self.classifier: + return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version) + elif self.extension != "jar": + return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version) + else: + return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version) + + @staticmethod + def parse(input): + parts = input.split(":") + if len(parts) >= 3: + g = parts[0] + a = parts[1] + v = parts[len(parts) - 1] + t = None + c = None + if len(parts) == 4: + t = parts[2] + if len(parts) == 5: + t = parts[2] + c = parts[3] + return Artifact(g, a, v, c, t) + else: + return None + + +class MavenDownloader: + def __init__(self, base="http://repo1.maven.org/maven2", username=None, password=None): + if base.endswith("/"): + base = base.rstrip("/") + self.base = base + self.user_agent = "Maven Artifact Downloader/1.0" + self.username = username + self.password = password + + def _find_latest_version_available(self, artifact): + path = "/%s/maven-metadata.xml" % (artifact.path(False)) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") + if v: + return v[0] + + def find_uri_for_artifact(self, artifact): + if artifact.is_snapshot(): + path = "/%s/maven-metadata.xml" % (artifact.path()) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + basexpath = "/metadata/versioning/" + p = xml.xpath(basexpath + "/snapshotVersions/snapshotVersion") + if p: + return self._find_matching_artifact(p, artifact) + else: + return self._uri_for_artifact(artifact) + + def _find_matching_artifact(self, elems, artifact): + filtered = filter(lambda e: e.xpath("extension/text() = '%s'" % artifact.extension), elems) + if artifact.classifier: + filtered = filter(lambda e: e.xpath("classifier/text() = '%s'" % artifact.classifier), elems) + + if len(filtered) > 1: + print( + "There was more than one match. Selecting the first one. Try adding a classifier to get a better match.") + elif not len(filtered): + print("There were no matches.") + return None + + elem = filtered[0] + value = elem.xpath("value/text()") + return self._uri_for_artifact(artifact, value[0]) + + def _uri_for_artifact(self, artifact, version=None): + if artifact.is_snapshot() and not version: + raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact)) + elif not artifact.is_snapshot(): + version = artifact.version + if artifact.classifier: + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension + + return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "." + artifact.extension + + def _request(self, url, failmsg, f): + if not self.username: + headers = {"User-Agent": self.user_agent} + else: + headers = { + "User-Agent": self.user_agent, + "Authorization": "Basic " + base64.b64encode(self.username + ":" + self.password) + } + req = Request(url, None, headers) + try: + response = urlopen(req) + except HTTPError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + except URLError, e: + raise ValueError(failmsg + " because of " + str(e) + "for URL " + url) + else: + return f(response) + + + def download(self, artifact, filename=None): + filename = artifact.get_filename(filename) + if not artifact.version: + artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), + artifact.classifier, artifact.extension) + + url = self.find_uri_for_artifact(artifact) + if not self.verify_md5(filename, url + ".md5"): + response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) + if response: + with open(filename, 'w') as f: + # f.write(response.read()) + self._write_chunks(response, f, report_hook=self.chunk_report) + return True + else: + return False + else: + return True + + def chunk_report(self, bytes_so_far, chunk_size, total_size): + percent = float(bytes_so_far) / total_size + percent = round(percent * 100, 2) + sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % + (bytes_so_far, total_size, percent)) + + if bytes_so_far >= total_size: + sys.stdout.write('\n') + + def _write_chunks(self, response, file, chunk_size=8192, report_hook=None): + total_size = response.info().getheader('Content-Length').strip() + total_size = int(total_size) + bytes_so_far = 0 + + while 1: + chunk = response.read(chunk_size) + bytes_so_far += len(chunk) + + if not chunk: + break + + file.write(chunk) + if report_hook: + report_hook(bytes_so_far, chunk_size, total_size) + + return bytes_so_far + + def verify_md5(self, file, remote_md5): + if not os.path.exists(file): + return False + else: + local_md5 = self._local_md5(file) + remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read()) + return local_md5 == remote + + def _local_md5(self, file): + md5 = hashlib.md5() + with open(file, 'rb') as f: + for chunk in iter(lambda: f.read(8192), ''): + md5.update(chunk) + return md5.hexdigest() + + +def main(): + module = AnsibleModule( + argument_spec = dict( + group_id = dict(default=None), + artifact_id = dict(default=None), + version = dict(default=None), + classifier = dict(default=None), + extension = dict(default=None), + repository_url = dict(default=None), + username = dict(default=None), + password = dict(default=None), + state = dict(default="latest", choices=["present","absent"]), + target = dict(default=None), + ) + ) + + group_id = module.params["group_id"] + artifact_id = module.params["artifact_id"] + version = module.params["version"] + classifier = module.params["classifier"] + extension = module.params["extension"] + repository_url = module.params["repository_url"] + repository_username = module.params["username"] + repository_password = module.params["password"] + state = module.params["state"] + target = module.params["target"] + + if not repository_url: + repository_url = "http://repo1.maven.org/maven2" + + downloader = MavenDownloader(repository_url, repository_username, repository_password) + + try: + artifact = Artifact(group_id, artifact_id, version, classifier, extension) + except ValueError as e: + module.fail_json(msg=e.args[0]) + + prev_state = "absent" + if os.path.lexists(target): + prev_state = "present" + else: + path = os.path.dirname(target) + if not os.path.exists(path): + os.makedirs(path) + + if prev_state == "present": + if state == "latest": + artifact_uri = downloader.find_uri_for_artifact(artifact) + if downloader.verify_md5(target, artifact_uri + ".md5"): + module.exit_json(target=target, state=state, changed=False) + else: + module.exit_json(target=target, state=state, changed=False) + try: + if downloader.download(artifact, target): + module.exit_json(state=state, target=target, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) + else: + module.fail_json(msg="Unable to download the artifact") + except ValueError as e: + module.fail_json(msg=e.args[0]) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() \ No newline at end of file From 7e26d715d3f79d26542ff146992a8e4d5af16191 Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 23 Oct 2014 21:06:14 -0600 Subject: [PATCH 009/224] Updated w/ license MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added license Added TODO for a “latest” state Removed pending “latest” state work --- packaging/download_artifact.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) mode change 100644 => 100755 packaging/download_artifact.py diff --git a/packaging/download_artifact.py b/packaging/download_artifact.py old mode 100644 new mode 100755 index 16855c142b3..741646dcdf3 --- a/packaging/download_artifact.py +++ b/packaging/download_artifact.py @@ -6,7 +6,18 @@ # Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact # as a reference and starting point. # +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. # +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . __author__ = 'cschmidt' @@ -98,7 +109,6 @@ EXAMPLES = ''' - download_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war ''' - class Artifact(object): def __init__(self, group_id, artifact_id, version, classifier=None, extension=jar): if not group_id: @@ -311,7 +321,7 @@ def main(): repository_url = dict(default=None), username = dict(default=None), password = dict(default=None), - state = dict(default="latest", choices=["present","absent"]), + state = dict(default="latest", choices=["present","absent"]), # TODO - Implement a "latest" state target = dict(default=None), ) ) @@ -346,12 +356,8 @@ def main(): os.makedirs(path) if prev_state == "present": - if state == "latest": - artifact_uri = downloader.find_uri_for_artifact(artifact) - if downloader.verify_md5(target, artifact_uri + ".md5"): - module.exit_json(target=target, state=state, changed=False) - else: - module.exit_json(target=target, state=state, changed=False) + module.exit_json(target=target, state=state, changed=False) + try: if downloader.download(artifact, target): module.exit_json(state=state, target=target, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) From f532ef1996e3427f3674d42570148aae5558330a Mon Sep 17 00:00:00 2001 From: Ruben Van den Bossche Date: Sat, 1 Nov 2014 18:30:40 +0100 Subject: [PATCH 010/224] Added support for --name and --type arguments when following a new log --- monitoring/logentries.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/monitoring/logentries.py b/monitoring/logentries.py index 373f4f777ff..bdec2fc67b6 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -35,11 +35,20 @@ options: choices: [ 'present', 'absent' ] required: false default: present + name: + description: + - name of the log + required: false + type: + description: + - type of the log + required: false + notes: - Requires the LogEntries agent which can be installed following the instructions at logentries.com ''' EXAMPLES = ''' -- logentries: path=/var/log/nginx/access.log state=present +- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log - logentries: path=/var/log/nginx/error.log state=absent ''' @@ -53,7 +62,7 @@ def query_log_status(module, le_path, path, state="present"): return False -def follow_log(module, le_path, logs): +def follow_log(module, le_path, logs, name=None, logtype=None): """ Follows one or more logs if not already followed. """ followed_count = 0 @@ -64,7 +73,13 @@ def follow_log(module, le_path, logs): if module.check_mode: module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'follow', log]) + + cmd = [le_path, 'follow', log] + if name != None: + cmd.append('--name ' + str(name)) + if logtype != None: + cmd.append('--type ' + str(logtype)) + rc, out, err = module.run_command(' '.join(cmd)) if not query_log_status(module, le_path, log): module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) @@ -104,8 +119,10 @@ def unfollow_log(module, le_path, logs): def main(): module = AnsibleModule( argument_spec = dict( - path = dict(aliases=["name"], required=True), - state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]) + path = dict(required=True), + state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), + name = dict(required=False, default=None), + type = dict(required=False, default=None) ), supports_check_mode=True ) @@ -119,7 +136,7 @@ def main(): logs = filter(None, logs) if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs) + follow_log(module, le_path, logs, name=p['name'], logtype=p['type']) elif p["state"] in ["absent", "unfollowed"]: unfollow_log(module, le_path, logs) From 040135dbbacf51b37f0be75b5348379d8bec2815 Mon Sep 17 00:00:00 2001 From: Sebastian Gumprich Date: Thu, 20 Nov 2014 20:48:41 +0000 Subject: [PATCH 011/224] Added documentation and example for port ranges. Also added punctuation marks. --- system/firewalld.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/system/firewalld.py b/system/firewalld.py index 22db165aad3..81e7925929d 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -23,22 +23,22 @@ DOCUMENTATION = ''' module: firewalld short_description: Manage arbitrary ports/services with firewalld description: - - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules + - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules. version_added: "1.4" options: service: description: - - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services" + - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services." required: false default: null port: description: - - "Name of a port to add/remove to/from firewalld must be in the form PORT/PROTOCOL" + - "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges." required: false default: null rich_rule: description: - - "Rich rule to add/remove to/from firewalld" + - "Rich rule to add/remove to/from firewalld." required: false default: null zone: @@ -49,21 +49,21 @@ options: choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] permanent: description: - - "Should this configuration be in the running firewalld configuration or persist across reboots" + - "Should this configuration be in the running firewalld configuration or persist across reboots." required: true default: true state: description: - - "Should this port accept(enabled) or reject(disabled) connections" + - "Should this port accept(enabled) or reject(disabled) connections." required: true default: enabled timeout: description: - - "The amount of time the rule should be in effect for when non-permanent" + - "The amount of time the rule should be in effect for when non-permanent." required: false default: 0 notes: - - Not tested on any debian based system + - Not tested on any debian based system. requirements: [ firewalld >= 0.2.11 ] author: Adam Miller ''' @@ -71,6 +71,7 @@ author: Adam Miller EXAMPLES = ''' - firewalld: service=https permanent=true state=enabled - firewalld: port=8081/tcp permanent=true state=disabled +- firewalld: port=161-162/udp permanent=true state=enabled - firewalld: zone=dmz service=http permanent=true state=enabled - firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled ''' From 20ef2696bcfa2b2ed68b75d4f79b4f88f49216aa Mon Sep 17 00:00:00 2001 From: Alexander Gubin Date: Wed, 26 Nov 2014 11:27:29 +0100 Subject: [PATCH 012/224] Fix lvol: Find LVM commands in PATH env --- system/lvol.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/system/lvol.py b/system/lvol.py index 96f1b846e27..e9d477edf86 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -152,8 +152,9 @@ def main(): else: unit = size_unit + lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - "lvs --noheadings -o lv_name,size --units %s --separator ';' %s" % (unit, vg)) + "%s --noheadings -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': @@ -185,7 +186,8 @@ def main(): if module.check_mode: changed = True else: - rc, _, err = module.run_command("lvcreate -n %s -%s %s%s %s" % (lv, size_opt, size, size_unit, vg)) + lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: @@ -197,7 +199,8 @@ def main(): module.exit_json(changed=True) if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) - rc, _, err = module.run_command("lvremove --force %s/%s" % (vg, this_lv['name'])) + lvremove_cmd = module.get_bin_path("lvremove", required=True) + rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: @@ -209,11 +212,12 @@ def main(): ### resize LV tool = None if size > this_lv['size']: - tool = 'lvextend' + tool = module.get_bin_path("lvextend", required=True) elif size < this_lv['size']: if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) - tool = 'lvreduce --force' + tool = module.get_bin_path("lvextend", required=True) + tool.append("--force") if tool: if module.check_mode: From 4446e4642ec652eaa4169c3fa10771d1c778a8f9 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Thu, 27 Nov 2014 10:47:33 -0600 Subject: [PATCH 013/224] Added new module to support LXC containers The new module will allow users to control LXC containers from ansible. The module was built for use in LXC >= 1.0 or greater and implements most of what can be done using the various lxc clients with regards to running containers. This first module is geared only at managing lxc containers. The module provides: build containers destroy containers archive containers info from a single container start / stop / restart containers run commands within containers add/modify lxc config for a container supports backends including LVM --- cloud/lxc/__init__.py | 0 cloud/lxc/lxc-container.py | 1469 ++++++++++++++++++++++++++++++++++++ 2 files changed, 1469 insertions(+) create mode 100644 cloud/lxc/__init__.py create mode 100644 cloud/lxc/lxc-container.py diff --git a/cloud/lxc/__init__.py b/cloud/lxc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/lxc/lxc-container.py b/cloud/lxc/lxc-container.py new file mode 100644 index 00000000000..b72890972db --- /dev/null +++ b/cloud/lxc/lxc-container.py @@ -0,0 +1,1469 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Kevin Carter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = """ +--- +module: lxc-container +short_description: Manage LXC Containers +version_added: 1.8.0 +description: + - Management of LXC containers +author: Kevin Carter +options: + name: + description: + - Name of a container. + required: true + backing_store: + choices: + - dir + - lvm + - loop + - btrfs + description: + - Backend storage type for the container. + required: false + default: dir + template: + description: + - Name of the template to use within an LXC create. + required: false + default: ubuntu + template_options: + description: + - Template options when building the container. + required: false + config: + description: + - Path to the LXC configuration file. + required: false + default: /etc/lxc/default.conf + lv_name: + description: + - Name of the logical volume, defaults to the container name. + default: $CONTAINER_NAME + required: false + vg_name: + description: + - If Backend store is lvm, specify the name of the volume group. + default: lxc + required: false + thinpool: + description: + - Use LVM thin pool called TP. + required: false + fs_type: + description: + - Create fstype TYPE. + default: ext4 + required: false + fs_size: + description: + - File system Size. + default: 5G + required: false + directory: + description: + - Place rootfs directory under DIR. + required: false + zfs_root: + description: + - Create zfs under given zfsroot. + required: false + container_command: + description: + - Run a command within a container. + required: false + lxc_path: + description: + - Place container under PATH + required: false + container_log: + choices: + - true + - false + description: + - Enable a container log for host actions to the container. + default: false + container_log_level: + choices: + - INFO + - ERROR + - DEBUG + description: + - Set the log level for a container where *container_log* was set. + required: false + default: INFO + archive: + choices: + - true + - false + description: + - Create an archive of a container. This will create a tarball of the + running container. + default: false + archive_path: + description: + - Path the save the archived container. If the path does not exist + the archive method will attempt to create it. + default: /tmp + archive_compression: + choices: + - gzip + - bzip2 + - none + description: + - Type of compression to use when creating an archive of a running + container. + default: gzip + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Start a container right after it's created. + required: false + default: started + container_config: + description: + - list of 'key=value' options to use when configuring a container. + required: false +requirements: ['lxc >= 1.0', 'python2-lxc >= 0.1'] +notes: + - Containers must have a unique name. If you attempt to create a container + with a name that already exists in the users namespace the module will + simply return as "unchanged". + - The "container_command" can be used with any state except "absent". If + used with state "stopped" the container will be "started", the command + executed, and then the container "stopped" again. Likewise if the state + is "stopped" and the container does not exist it will be first created, + "started", the command executed, and then "stopped". If you use a "|" + in the variable you can use common script formatting within the variable + iteself The "container_command" option will always execute as BASH. + When using "container_command" a log file is created in the /tmp/ directory + which contains both stdout and stderr of any command executed. + - If "archive" is **true** the system will attempt to create a compressed + tarball of the running container. The "archive" option supports LVM backed + containers and will create a snapshot of the running container when + creating the archive. + - If your distro does not have a package for "python2-lxc", which is a + requirement for this module, it can be installed from source at + "https://github.com/lxc/python2-lxc" +""" + +EXAMPLES = """ +- name: Create a started container + lxc-container: + name: test-container-started + container_log: true + template: ubuntu + state: started + template_options: --release trusty + +- name: Create a stopped container + lxc-container: + name: test-container-stopped + container_log: true + template: ubuntu + state: stopped + template_options: --release trusty + +- name: Create a frozen container + lxc-container: + name: test-container-frozen + container_log: true + template: ubuntu + state: frozen + template_options: --release trusty + container_command: | + echo 'hello world.' | tee /opt/started-frozen + +# Create filesystem container, configure it, and archive it, and start it. +- name: Create filesystem container + lxc-container: + name: test-container-config + container_log: true + template: ubuntu + state: started + archive: true + archive_compression: none + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + template_options: --release trusty + +# Create an lvm container, run a complex command in it, add additional +# configuration to it, create an archive of it, and finally leave the container +# in a frozen state. The container archive will be compressed using bzip2 +- name: Create an lvm container + lxc-container: + name: test-container-lvm + container_log: true + template: ubuntu + state: frozen + backing_store: lvm + template_options: --release trusty + container_command: | + apt-get update + apt-get install -y vim lxc-dev + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + container_config: + - "lxc.aa_profile=unconfined" + - "lxc.cgroup.devices.allow=a *:* rmw" + archive: true + archive_compression: bzip2 + register: lvm_container_info + +- name: Debug info on container "test-container-lvm" + debug: var=lvm_container_info + +- name: Get information on a given container. + lxc-container: + name: test-container-config + register: config_container_info + +- name: debug info on container "test-container" + debug: var=config_container_info + +- name: Run a command in a container and ensure its in a "stopped" state. + lxc-container: + name: test-container-started + state: stopped + container_command: | + echo 'hello world.' | tee /opt/stopped + +- name: Run a command in a container and ensure its it in a "frozen" state. + lxc-container: + name: test-container-stopped + state: frozen + container_command: | + echo 'hello world.' | tee /opt/frozen + +- name: Start a container. + lxc-container: + name: test-container-stopped + state: started + +- name: Run a command in a container and then restart it. + lxc-container: + name: test-container-started + state: restarted + container_command: | + echo 'hello world.' | tee /opt/restarted + +- name: Run a complex command within a "running" container. + lxc-container: + name: test-container-started + container_command: | + apt-get update + apt-get install -y curl wget vim apache2 + echo 'hello world.' | tee /opt/started + if [[ -f "/opt/started" ]]; then + echo 'hello world.' | tee /opt/found-started + fi + +# Create an archive of an existing container, save the archive to a defined +# path and then destroy it. +- name: Archive container + lxc-container: + name: test-container-started + state: absent + archive: true + archive_path: /opt/archives + +- name: Destroy a container. + lxc-container: + name: "{{ item }}" + state: absent + with_items: + - test-container-stopped + - test-container-started + - test-container-frozen + - test-container-lvm + - test-container-config +""" + + +try: + import lxc +except ImportError: + msg = 'The lxc module is not importable. Check the requirements.' + print("failed=True msg='%s'" % msg) + raise SystemExit(msg) + + +# LXC_COMPRESSION_MAP is a map of available compression types when creating +# an archive of a container. +LXC_COMPRESSION_MAP = { + 'gzip': { + 'extension': 'tar.tgz', + 'argument': '-czf' + }, + 'bzip2': { + 'extension': 'tar.bz2', + 'argument': '-cjf' + }, + 'none': { + 'extension': 'tar', + 'argument': '-cf' + } +} + + +# LXC_COMMAND_MAP is a map of variables that are available to a method based +# on the state the container is in. +LXC_COMMAND_MAP = { + 'create': { + 'variables': { + 'config': '--config', + 'template': '--template', + 'backing_store': '--bdev', + 'lxc_path': '--lxcpath', + 'lv_name': '--lvname', + 'vg_name': '--vgname', + 'thinpool': '--thinpool', + 'fs_type': '--fstype', + 'fs_size': '--fssize', + 'directory': '--dir', + 'zfs_root': '--zfsroot' + } + } +} + + +# LXC_BACKING_STORE is a map of available storage backends and options that +# are incompatible with the given storage backend. +LXC_BACKING_STORE = { + 'dir': [ + 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool' + ], + 'lvm': [ + 'zfs_root' + ], + 'btrfs': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ], + 'loop': [ + 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + ] +} + + +# LXC_LOGGING_LEVELS is a map of available log levels +LXC_LOGGING_LEVELS = { + 'INFO': ['info', 'INFO', 'Info'], + 'ERROR': ['error', 'ERROR', 'Error'], + 'DEBUG': ['debug', 'DEBUG', 'Debug'] +} + + +# LXC_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXC_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen' +} + + +# This is used to attach to a running container and execute commands from +# within the container on the host. This will provide local access to a +# container without using SSH. The template will attempt to work within the +# home directory of the user that was attached to the container and source +# that users environment variables by default. +ATTACH_TEMPLATE = """#!/usr/bin/env bash +pushd "$(grep $(whoami) /etc/passwd | awk -F':' '{print $6}')" + if [[ -f ".bashrc" ]];then + source .bashrc + fi +popd + +# User defined command +%(container_command)s +""" + + +def create_script(command): + """Write out a script onto a target. + + This method should be backward compatible with Python 2.4+ when executing + from within the container. + + :param command: command to run, this can be a script and can use spacing + with newlines as separation. + :type command: ``str`` + """ + + import os + import os.path as path + import subprocess + import tempfile + + # Ensure that the directory /opt exists. + if not path.isdir('/opt'): + os.mkdir('/opt') + + # Create the script. + script_file = path.join('/opt', '.lxc-attach-script') + f = open(script_file, 'wb') + try: + f.write(ATTACH_TEMPLATE % {'container_command': command}) + f.flush() + finally: + f.close() + + # Ensure the script is executable. + os.chmod(script_file, 0755) + + # Get temporary directory. + tempdir = tempfile.gettempdir() + + # Output log file. + stdout = path.join(tempdir, 'lxc-attach-script.log') + stdout_file = open(stdout, 'ab') + + # Error log file. + stderr = path.join(tempdir, 'lxc-attach-script.err') + stderr_file = open(stderr, 'ab') + + # Execute the script command. + try: + subprocess.Popen( + [script_file], + stdout=stdout_file, + stderr=stderr_file + ).communicate() + finally: + # Close the log files. + stderr_file.close() + stdout_file.close() + + # Remove the script file upon completion of execution. + os.remove(script_file) + + +class LxcContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.state = self.module.params.get('state', None) + self.state_change = False + self.lxc_vg = None + self.container_name = self.module.params['name'] + self.container = self.get_container_bind() + self.archive_info = None + + def get_container_bind(self): + return lxc.Container(name=self.container_name) + + @staticmethod + def _roundup(num): + """Return a rounded floating point number. + + :param num: Number to round up. + :type: ``float`` + :returns: Rounded up number. + :rtype: ``int`` + """ + num, part = str(num).split('.') + num = int(num) + if int(part) != 0: + num += 1 + return num + + @staticmethod + def _container_exists(name): + """Check if a container exists. + + :param name: Name of the container. + :type: ``str`` + :returns: True or False if the container is found. + :rtype: ``bol`` + """ + if [i for i in lxc.list_containers() if i == name]: + return True + else: + return False + + @staticmethod + def _add_variables(variables_dict, build_command): + """Return a command list with all found options. + + :param variables_dict: Pre-parsed optional variables used from a + seed command. + :type variables_dict: ``dict`` + :param build_command: Command to run. + :type build_command: ``list`` + :returns: list of command options. + :rtype: ``list`` + """ + + for key, value in variables_dict.items(): + build_command.append( + '%s %s' % (key, value) + ) + else: + return build_command + + def _get_vars(self, variables): + """Return a dict of all variables as found within the module. + + :param variables: Hash of all variables to find. + :type variables: ``dict`` + """ + + # Remove incompatible storage backend options. + for v in LXC_BACKING_STORE[self.module.params['backing_store']]: + variables.pop(v, None) + + return_dict = dict() + for k, v in variables.items(): + _var = self.module.params.get(k) + if not [i for i in [None, ''] + BOOLEANS_FALSE if i == _var]: + return_dict[v] = _var + else: + return return_dict + + def _run_command(self, build_command, unsafe_shell=False, timeout=600): + """Return information from running an Ansible Command. + + This will squash the build command list into a string and then + execute the command via Ansible. The output is returned to the method. + This output is returned as `return_code`, `stdout`, `stderr`. + + Prior to running the command the method will look to see if the LXC + lockfile is present. If the lockfile "/var/lock/subsys/lxc" the method + will wait upto 10 minutes for it to be gone; polling every 5 seconds. + + :param build_command: Used for the command and all options. + :type build_command: ``list`` + :param unsafe_shell: Enable or Disable unsafe sell commands. + :type unsafe_shell: ``bol`` + :param timeout: Time before the container create process quites. + :type timeout: ``int`` + """ + + lockfile = '/var/lock/subsys/lxc' + + for _ in xrange(timeout): + if os.path.exists(lockfile): + time.sleep(1) + else: + return self.module.run_command( + ' '.join(build_command), + use_unsafe_shell=unsafe_shell + ) + else: + message = ( + 'The LXC subsystem is locked and after 5 minutes it never' + ' became unlocked. Lockfile [ %s ]' % lockfile + ) + self.failure( + error='LXC subsystem locked', + rc=0, + msg=message + ) + + def _config(self): + """Configure an LXC container. + + Write new configuration values to the lxc config file. This will + stop the container if it's running write the new options and then + restart the container upon completion. + """ + + _container_config = self.module.params.get('container_config') + if not _container_config: + return False + + container_config_file = self.container.config_file_name + with open(container_config_file, 'rb') as f: + container_config = f.readlines() + + # Note used ast literal_eval because AnsibleModule does not provide for + # adequate dictionary parsing. + # Issue: https://github.com/ansible/ansible/issues/7679 + # TODO(cloudnull) adjust import when issue has been resolved. + import ast + options_dict = ast.literal_eval(_container_config) + parsed_options = [i.split('=') for i in options_dict] + + config_change = False + for key, value in parsed_options: + new_entry = '%s = %s\n' % (key, value) + for option_line in container_config: + # Look for key in config + if option_line.startswith(key): + _, _value = option_line.split('=') + config_value = ' '.join(_value.split()) + line_index = container_config.index(option_line) + # If the sanitized values don't match replace them + if value != config_value: + line_index += 1 + if new_entry not in container_config: + config_change = True + container_config.insert(line_index, new_entry) + # Break the flow as values are written or not at this point + break + else: + config_change = True + container_config.append(new_entry) + + # If the config changed restart the container. + if config_change: + container_state = self._get_state() + if container_state != 'stopped': + self.container.stop() + + with open(container_config_file, 'wb') as f: + f.writelines(container_config) + + self.state_change = True + if container_state == 'running': + self._container_startup() + elif container_state == 'frozen': + self._container_startup() + self.container.freeze() + + def _create(self): + """Create a new LXC container. + + This method will build and execute a shell command to build the + container. It would have been nice to simply use the lxc python library + however at the time this was written the python library, in both py2 + and py3 didn't support some of the more advanced container create + processes. These missing processes mainly revolve around backing + LXC containers with block devices. + """ + + build_command = [ + self.module.get_bin_path('lxc-create', True), + '--name %s' % self.container_name, + '--quiet' + ] + + build_command = self._add_variables( + variables_dict=self._get_vars( + variables=LXC_COMMAND_MAP['create']['variables'] + ), + build_command=build_command + ) + + # Load logging for the instance when creating it. + if self.module.params.get('container_log') in BOOLEANS_TRUE: + # Set the logging path to the /var/log/lxc if uid is root. else + # set it to the home folder of the user executing. + try: + if os.getuid() != 0: + log_path = os.getenv('HOME') + else: + if not os.path.isdir('/var/log/lxc/'): + os.makedirs('/var/log/lxc/') + log_path = '/var/log/lxc/' + except OSError: + log_path = os.getenv('HOME') + + build_command.extend([ + '--logfile %s' % os.path.join( + log_path, 'lxc-%s.log' % self.container_name + ), + '--logpriority %s' % self.module.params.get( + 'container_log_level' + ).upper() + ]) + + # Add the template commands to the end of the command if there are any + template_options = self.module.params.get('template_options', None) + if template_options: + build_command.append('-- %s' % template_options) + + rc, return_data, err = self._run_command(build_command) + if rc != 0: + msg = "Failed executing lxc-create." + self.failure( + err=err, rc=rc, msg=msg, command=' '.join(build_command) + ) + else: + self.state_change = True + + def _container_data(self): + """Returns a dict of container information. + + :returns: container data + :rtype: ``dict`` + """ + + return { + 'interfaces': self.container.get_interfaces(), + 'ips': self.container.get_ips(), + 'state': self._get_state(), + 'init_pid': int(self.container.init_pid) + } + + def _unfreeze(self): + """Unfreeze a container. + + :returns: True or False based on if the container was unfrozen. + :rtype: ``bol`` + """ + + unfreeze = self.container.unfreeze() + if unfreeze: + self.state_change = True + return unfreeze + + def _get_state(self): + """Return the state of a container. + + If the container is not found the state returned is "absent" + + :returns: state of a container as a lower case string. + :rtype: ``str`` + """ + + if self._container_exists(name=self.container_name): + return str(self.container.state).lower() + else: + return str('absent') + + def _execute_command(self): + """Execute a shell command.""" + + container_command = self.module.params.get('container_command') + if container_command: + container_state = self._get_state() + if container_state == 'frozen': + self._unfreeze() + elif container_state == 'stopped': + self._container_startup() + + self.container.attach_wait(create_script, container_command) + self.state_change = True + + def _container_startup(self, timeout=60): + """Ensure a container is started. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + self.container = self.get_container_bind() + for _ in xrange(timeout): + if self._get_state() != 'running': + self.container.start() + self.state_change = True + # post startup sleep for 1 second. + time.sleep(1) + else: + return True + else: + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' + ) + + def _check_archive(self): + """Create a compressed archive of a container. + + This will store archive_info in as self.archive_info + """ + + if self.module.params.get('archive') in BOOLEANS_TRUE: + self.archive_info = { + 'archive': self._container_create_tar() + } + + def _destroyed(self, timeout=60): + """Ensure a container is destroyed. + + :param timeout: Time before the destroy operation is abandoned. + :type timeout: ``int`` + """ + + for _ in xrange(timeout): + if not self._container_exists(name=self.container_name): + break + + # Check if the container needs to have an archive created. + self._check_archive() + + if self._get_state() != 'stopped': + self.state_change = True + self.container.stop() + + if self.container.destroy(): + self.state_change = True + + # post destroy attempt sleep for 1 second. + time.sleep(1) + else: + self.failure( + lxc_container=self._container_data(), + error='Failed to destroy container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to be destroyed. Check' + ' that lxc is available and that the container is in a' + ' functional state.' % self.container_name + ) + + def _frozen(self, count=0): + """Ensure a container is frozen. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='frozen') + if self._container_exists(name=self.container_name): + self._execute_command() + + # Perform any configuration updates + self._config() + + container_state = self._get_state() + if container_state == 'frozen': + pass + elif container_state == 'running': + self.container.freeze() + self.state_change = True + else: + self._container_startup() + self.container.freeze() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._frozen(count) + + def _restarted(self, count=0): + """Ensure a container is restarted. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='restart') + if self._container_exists(name=self.container_name): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._restarted(count) + + def _stopped(self, count=0): + """Ensure a container is stopped. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='stop') + if self._container_exists(name=self.container_name): + self._execute_command() + + # Perform any configuration updates + self._config() + + if self._get_state() != 'stopped': + self.container.stop() + self.state_change = True + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._stopped(count) + + def _started(self, count=0): + """Ensure a container is started. + + If the container does not exist the container will be created. + + :param count: number of times this command has been called by itself. + :type count: ``int`` + """ + + self.check_count(count=count, method='start') + if self._container_exists(name=self.container_name): + container_state = self._get_state() + if container_state == 'running': + pass + elif container_state == 'frozen': + self._unfreeze() + elif not self._container_startup(): + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) + + # Return data + self._execute_command() + + # Perform any configuration updates + self._config() + + # Check if the container needs to have an archive created. + self._check_archive() + else: + self._create() + count += 1 + self._started(count) + + def _get_lxc_vg(self): + """Return the name of the Volume Group used in LXC.""" + + build_command = [ + self.module.get_bin_path('lxc-config', True), + "lxc.bdev.lvm.vg" + ] + rc, vg, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to read LVM VG from LXC config', + command=' '.join(build_command) + ) + else: + return str(vg.strip()) + + def _lvm_lv_list(self): + """Return a list of all lv in a current vg.""" + + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvs', True) + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to get list of LVs', + command=' '.join(build_command) + ) + + all_lvms = [i.split() for i in stdout.splitlines()][1:] + return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg] + + def _get_vg_free_pe(self, name): + """Return the available size of a given VG. + + :param name: Name of volume. + :type name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + build_command = [ + 'vgdisplay', + name + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read vg %s' % name, + command=' '.join(build_command) + ) + + vg_info = [i.strip() for i in stdout.splitlines()][1:] + free_pe = [i for i in vg_info if i.startswith('Free')] + _free_pe = free_pe[0].split() + return float(_free_pe[-2]), _free_pe[-1] + + def _get_lv_size(self, name): + """Return the available size of a given LV. + + :param name: Name of volume. + :type name: ``str`` + :returns: size and measurement of an LV + :type: ``tuple`` + """ + vg = self._get_lxc_vg() + lv = os.path.join(vg, name) + build_command = [ + 'lvdisplay', + lv + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to read lv %s' % lv, + command=' '.join(build_command) + ) + + lv_info = [i.strip() for i in stdout.splitlines()][1:] + _free_pe = [i for i in lv_info if i.startswith('LV Size')] + free_pe = _free_pe[0].split() + return self._roundup(float(free_pe[-2])), free_pe[-1] + + def _lvm_snapshot_create(self, source_lv, snapshot_name, + snapshot_size_gb=5): + """Create an LVM snapshot. + + :param source_lv: Name of lv to snapshot + :type source_lv: ``str`` + :param snapshot_name: Name of lv snapshot + :type snapshot_name: ``str`` + :param snapshot_size_gb: Size of snapshot to create + :type snapshot_size_gb: ``int`` + """ + vg = self._get_lxc_vg() + free_space, messurement = self._get_vg_free_pe(name=vg) + + if free_space < float(snapshot_size_gb): + message = ( + 'Snapshot size [ %s ] is > greater than [ %s ] on volume group' + ' [ %s ]' % (snapshot_size_gb, free_space, vg) + ) + self.failure( + error='Not enough space to create snapshot', + rc=2, + msg=message + ) + + # Create LVM Snapshot + build_command = [ + self.module.get_bin_path('lvcreate', True), + "-n", + snapshot_name, + "-s", + os.path.join(vg, source_lv), + "-L%sg" % snapshot_size_gb + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to Create LVM snapshot %s/%s --> %s' + % (vg, source_lv, snapshot_name) + ) + + def _lvm_lv_mount(self, lv_name, mount_point): + """mount an lv. + + :param lv_name: name of the logical volume to mount + :type lv_name: ``str`` + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + vg = self._get_lxc_vg() + + build_command = [ + self.module.get_bin_path('mount', True), + "/dev/%s/%s" % (vg, lv_name), + mount_point, + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to mountlvm lv %s/%s to %s' + % (vg, lv_name, mount_point) + ) + + def _create_tar(self, source_dir): + """Create an archive of a given ``source_dir`` to ``output_path``. + + :param source_dir: Path to the directory to be archived. + :type source_dir: ``str`` + """ + + archive_path = self.module.params.get('archive_path') + if not os.path.isdir(archive_path): + os.makedirs(archive_path) + + archive_compression = self.module.params.get('archive_compression') + compression_type = LXC_COMPRESSION_MAP[archive_compression] + + # remove trailing / if present. + archive_name = '%s.%s' % ( + os.path.join( + archive_path, + self.container_name + ), + compression_type['extension'] + ) + + build_command = [ + self.module.get_bin_path('tar', True), + '--directory=%s' % os.path.realpath( + os.path.expanduser(source_dir) + ), + compression_type['argument'], + archive_name, + '.' + ] + + rc, stdout, err = self._run_command( + build_command=build_command, + unsafe_shell=True + ) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to create tar archive', + command=' '.join(build_command) + ) + + return archive_name + + def _lvm_lv_remove(self, name): + """Remove an LV. + + :param name: The name of the logical volume + :type name: ``str`` + """ + vg = self._get_lxc_vg() + build_command = [ + self.module.get_bin_path('lvremove', True), + "-f", + "%s/%s" % (vg, name), + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='Failed to remove LVM LV %s/%s' % (vg, name), + command=' '.join(build_command) + ) + + def _rsync_data(self, container_path, temp_dir): + """Sync the container directory to the temp directory. + + :param container_path: path to the container container + :type container_path: ``str`` + :param temp_dir: path to the temporary local working directory + :type temp_dir: ``str`` + """ + build_command = [ + self.module.get_bin_path('rsync', True), + '-aHAX', + container_path, + temp_dir + ] + rc, stdout, err = self._run_command(build_command, unsafe_shell=True) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to perform archive', + command=' '.join(build_command) + ) + + def _unmount(self, mount_point): + """Unmount a file system. + + :param mount_point: path on the file system that is mounted. + :type mount_point: ``str`` + """ + build_command = [ + self.module.get_bin_path('umount', True), + mount_point, + ] + rc, stdout, err = self._run_command(build_command) + if rc != 0: + self.failure( + err=err, + rc=rc, + msg='failed to unmount [ %s ]' % mount_point, + command=' '.join(build_command) + ) + + def _container_create_tar(self): + """Create a tar archive from an LXC container. + + The process is as follows: + * Stop or Freeze the container + * Create temporary dir + * Copy container and config to temporary directory + * If LVM backed: + * Create LVM snapshot of LV backing the container + * Mount the snapshot to tmpdir/rootfs + * Restore the state of the container + * Create tar of tmpdir + * Clean up + """ + + # Create a temp dir + temp_dir = tempfile.mkdtemp() + + # Set the name of the working dir, temp + container_name + work_dir = os.path.join(temp_dir, self.container_name) + + # LXC container rootfs + lxc_rootfs = self.container.get_config_item('lxc.rootfs') + + # Test if the containers rootfs is a block device + block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev')) + mount_point = os.path.join(work_dir, 'rootfs') + + # Set the snapshot name if needed + snapshot_name = '%s_lxc_snapshot' % self.container_name + + # Set the path to the container data + container_path = os.path.dirname(lxc_rootfs) + container_state = self._get_state() + try: + # Ensure the original container is stopped or frozen + if container_state not in ['stopped', 'frozen']: + if container_state == 'running': + self.container.freeze() + else: + self.container.stop() + + # Sync the container data from the container_path to work_dir + self._rsync_data(container_path, temp_dir) + + if block_backed: + if snapshot_name not in self._lvm_lv_list(): + if not os.path.exists(mount_point): + os.makedirs(mount_point) + + # Take snapshot + size, measurement = self._get_lv_size( + name=self.container_name + ) + self._lvm_snapshot_create( + source_lv=self.container_name, + snapshot_name=snapshot_name, + snapshot_size_gb=size + ) + + # Mount snapshot + self._lvm_lv_mount( + lv_name=snapshot_name, + mount_point=mount_point + ) + else: + self.failure( + err='snapshot [ %s ] already exists' % snapshot_name, + rc=1, + msg='The snapshot [ %s ] already exists. Please clean' + ' up old snapshot of containers before continuing.' + % snapshot_name + ) + + # Restore original state of container + if container_state == 'running': + if self._get_state() == 'frozen': + self.container.unfreeze() + else: + self.container.start() + + # Set the state as changed and set a new fact + self.state_change = True + return self._create_tar(source_dir=work_dir) + finally: + if block_backed: + # unmount snapshot + self._unmount(mount_point) + + # Remove snapshot + self._lvm_lv_remove(snapshot_name) + + # Remove tmpdir + shutil.rmtree(temp_dir) + + def check_count(self, count, method): + if count > 1: + self.failure( + error='Failed to %s container' % method, + rc=1, + msg='The container [ %s ] failed to %s. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % (self.container_name, method) + ) + + def failure(self, **kwargs): + """Return a Failure when running an Ansible command. + + :param error: ``str`` Error that occurred. + :param rc: ``int`` Return code while executing an Ansible command. + :param msg: ``str`` Message to report. + """ + + self.module.fail_json(**kwargs) + + def run(self): + """Run the main method.""" + + action = getattr(self, LXC_ANSIBLE_STATES[self.state]) + action() + + outcome = self._container_data() + if self.archive_info: + outcome.update(self.archive_info) + + self.module.exit_json( + changed=self.state_change, + lxc_container=outcome + ) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + template=dict( + type='str', + default='ubuntu' + ), + backing_store=dict( + type='str', + choices=LXC_BACKING_STORE.keys(), + default='dir' + ), + template_options=dict( + type='str' + ), + config=dict( + type='str', + default='/etc/lxc/default.conf' + ), + vg_name=dict( + type='str', + default='lxc' + ), + thinpool=dict( + type='str' + ), + fs_type=dict( + type='str', + default='ext4' + ), + fs_size=dict( + type='str', + default='5G' + ), + directory=dict( + type='str' + ), + zfs_root=dict( + type='str' + ), + lv_name=dict( + type='str' + ), + lxc_path=dict( + type='str' + ), + state=dict( + choices=LXC_ANSIBLE_STATES.keys(), + default='started' + ), + container_command=dict( + type='str' + ), + container_config=dict( + type='str' + ), + container_log=dict( + choices=BOOLEANS, + default='false' + ), + container_log_level=dict( + choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i], + default='INFO' + ), + archive=dict( + choices=BOOLEANS, + default='false' + ), + archive_path=dict( + type='str', + default='/tmp' + ), + archive_compression=dict( + choices=LXC_COMPRESSION_MAP.keys(), + default='gzip' + ) + ), + supports_check_mode=False, + ) + + lv_name = module.params.get('lv_name') + if not lv_name: + module.params['lv_name'] = module.params.get('name') + + lxc_manage = LxcContainerManagement(module=module) + lxc_manage.run() + + +# import module bits +from ansible.module_utils.basic import * +main() + From 99359fc4e216ab85700650e6fff1c86682994390 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Tue, 2 Dec 2014 08:18:00 -0600 Subject: [PATCH 014/224] Updated volume create methods for a more consistent process The volume create methods were making an assumption on the unit sizes being presented by the `vgdisplay` and the `lvdisplay` commands. To correct the assumption the commands will now enforce a unit size of "g" which will alway convert sives to gigabytes. This was an issue brought up by @hughsaunders. --- cloud/lxc/lxc-container.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/cloud/lxc/lxc-container.py b/cloud/lxc/lxc-container.py index b72890972db..6df27867267 100644 --- a/cloud/lxc/lxc-container.py +++ b/cloud/lxc/lxc-container.py @@ -1015,9 +1015,12 @@ class LxcContainerManagement(object): :returns: size and measurement of an LV :type: ``tuple`` """ + build_command = [ 'vgdisplay', - name + name, + '--units', + 'g' ] rc, stdout, err = self._run_command(build_command) if rc != 0: @@ -1041,11 +1044,14 @@ class LxcContainerManagement(object): :returns: size and measurement of an LV :type: ``tuple`` """ + vg = self._get_lxc_vg() lv = os.path.join(vg, name) build_command = [ 'lvdisplay', - lv + lv, + '--units', + 'g' ] rc, stdout, err = self._run_command(build_command) if rc != 0: @@ -1072,6 +1078,7 @@ class LxcContainerManagement(object): :param snapshot_size_gb: Size of snapshot to create :type snapshot_size_gb: ``int`` """ + vg = self._get_lxc_vg() free_space, messurement = self._get_vg_free_pe(name=vg) @@ -1112,6 +1119,7 @@ class LxcContainerManagement(object): :param mount_point: path on the file system that is mounted. :type mount_point: ``str`` """ + vg = self._get_lxc_vg() build_command = [ @@ -1181,6 +1189,7 @@ class LxcContainerManagement(object): :param name: The name of the logical volume :type name: ``str`` """ + vg = self._get_lxc_vg() build_command = [ self.module.get_bin_path('lvremove', True), @@ -1204,6 +1213,7 @@ class LxcContainerManagement(object): :param temp_dir: path to the temporary local working directory :type temp_dir: ``str`` """ + build_command = [ self.module.get_bin_path('rsync', True), '-aHAX', @@ -1225,6 +1235,7 @@ class LxcContainerManagement(object): :param mount_point: path on the file system that is mounted. :type mount_point: ``str`` """ + build_command = [ self.module.get_bin_path('umount', True), mount_point, From 76142ddb9721143f44b36b7507393e0413fcbdbc Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 4 Dec 2014 11:25:06 +0100 Subject: [PATCH 015/224] Allow multiple versions in rpm state Fix bug in ansible get_package_state and get_current_version that breaks when there are multiple versions of a package installed and there is a list of packages to install. The previous implementation used 'zip' to match requested names to installed names which fails, because rpm outputs multiple lines per package when there are multiple versions. Testcase: Install opensuse, install multiple kernel versions (happens by update) Before patch: calling zypper: state=present for name={{item}} with_items: - kernel-desktop - git leads to ansible aborting. After the patch ansible performs as expected and makes sure both packages are present. Also the last version number is used for further update information in this version (before if only one package name was given the oldest version number was used). --- packaging/os/zypper.py | 48 ++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 87bbcd1f135..7091145423b 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -95,25 +95,31 @@ def zypper_version(module): return rc, stderr # Function used for getting versions of currently installed packages. -def get_current_version(m, name): +def get_current_version( packages): cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] - cmd.extend(name) - (rc, stdout, stderr) = m.run_command(cmd) + cmd.extend(packages) + + stdout = subprocess.check_output(cmd) current_version = {} rpmoutput_re = re.compile('^(\S+) (\S+)$') - for stdoutline, package in zip(stdout.splitlines(), name): - m = rpmoutput_re.match(stdoutline) - if m == None: + + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None - rpmpackage = m.group(1) - rpmversion = m.group(2) - if package != rpmpackage: + package = match.group(1) + version = match.group(2) + current_version[package] = version + + for package in packages: + if package not in current_version: + print package + ' was not returned by rpm \n' return None - current_version[package] = rpmversion return current_version + # Function used to find out if a package is currently installed. def get_package_state(m, packages): cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] @@ -123,19 +129,21 @@ def get_package_state(m, packages): installed_state = {} rpmoutput_re = re.compile('^package (\S+) (.*)$') - for stdoutline, name in zip(stdout.splitlines(), packages): - m = rpmoutput_re.match(stdoutline) - if m == None: - return None - package = m.group(1) - result = m.group(2) - if not name.startswith(package): - print name + ':' + package + ':' + stdoutline + '\n' + for stdoutline in stdout.splitlines(): + match = rpmoutput_re.match(stdoutline) + if match == None: return None + package = match.group(1) + result = match.group(2) if result == 'is installed': - installed_state[name] = True + installed_state[package] = True else: - installed_state[name] = False + installed_state[package] = False + + for package in packages: + if package not in installed_state: + print package + ' was not returned by rpm \n' + return None return installed_state From 7948b91bad04f0f3a13dc44bacaf74dc7464b24b Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 4 Dec 2014 11:28:18 +0100 Subject: [PATCH 016/224] fix local change --- packaging/os/zypper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 7091145423b..c848e86fcc6 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -95,11 +95,11 @@ def zypper_version(module): return rc, stderr # Function used for getting versions of currently installed packages. -def get_current_version( packages): +def get_current_version(m, packages): cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] cmd.extend(packages) - stdout = subprocess.check_output(cmd) + rc, stdout, stderr = m.run_command(cmd, check_rc=False) current_version = {} rpmoutput_re = re.compile('^(\S+) (\S+)$') From 926194f75d53846a11b85ca579b2fc451facac76 Mon Sep 17 00:00:00 2001 From: nmeum Date: Thu, 4 Dec 2014 12:21:15 +0100 Subject: [PATCH 017/224] Make sure portage doesn't ask for confirmation If EMERGE_DEFAULT_OPTS in make.conf(5) contains '--ask' then the portage module doesn't work correctly, this commit fixes that --- packaging/os/portage.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 85027bfc79b..d38062e47e5 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -335,6 +335,7 @@ def cleanup_packages(module, packages): def run_emerge(module, packages, *args): args = list(args) + args.append('--ask=n') if module.check_mode: args.append('--pretend') From 226144512da5aa3e2d2704f833c83a5228d93596 Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 11 Dec 2014 17:21:38 -0700 Subject: [PATCH 018/224] Renamted module from download_artifact to maven_artifact --- packaging/{download_artifact.py => maven_artifact.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packaging/{download_artifact.py => maven_artifact.py} (100%) diff --git a/packaging/download_artifact.py b/packaging/maven_artifact.py similarity index 100% rename from packaging/download_artifact.py rename to packaging/maven_artifact.py From 9498d3de9c09c73fcf47a30bc1db22994b04327a Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Thu, 11 Dec 2014 17:23:25 -0700 Subject: [PATCH 019/224] Renamd Module File --- packaging/maven_artifact.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index 741646dcdf3..0c8070d1e46 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -30,7 +30,7 @@ import base64 DOCUMENTATION = ''' --- -module: download_artifact +module: maven_artifact short_description: Downloads an Artifact from a Maven Repository version_added: "historical" description: @@ -97,20 +97,20 @@ options: EXAMPLES = ''' # Download the latest version of the commons-collections artifact from Maven Central -- download_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar # Download Apache Commons-Collections 3.2 from Maven Central -- download_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar # Download an artifact from a private repository requiring authentication -- download_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar +- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed -- download_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war +- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war ''' class Artifact(object): - def __init__(self, group_id, artifact_id, version, classifier=None, extension=jar): + def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'): if not group_id: raise ValueError("group_id must be set") if not artifact_id: From d611f86e5b3644421b2f24eb1b5af2c6eb7be61d Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 29 Sep 2014 19:49:42 -0400 Subject: [PATCH 020/224] Addes login_port feature, similiar to how mysql_db.py works --- database/mysql/mysql_replication.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 1d61436a523..c5f2cc3a726 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -54,6 +54,12 @@ options: description: - mysql host to connect required: False + login_port: + description: + - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used + required: False + default: 3306 + version_added: "1.9" login_unix_socket: description: - unix socket to connect mysql server @@ -115,6 +121,9 @@ EXAMPLES = ''' # Change master to master server 192.168.1.1 and use binary log 'mysql-bin.000009' with position 4578 - mysql_replication: mode=changemaster master_host=192.168.1.1 master_log_file=mysql-bin.000009 master_log_pos=4578 + +# Check slave status using port 3308 +- mysql_replication: mode=getslave login_host=ansible.example.com login_port=3308 ''' import ConfigParser @@ -229,6 +238,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), + login_port=dict(default="3306"), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), master_host=dict(default=None), @@ -251,6 +261,7 @@ def main(): user = module.params["login_user"] password = module.params["login_password"] host = module.params["login_host"] + port = module.params["login_port"] mode = module.params["mode"] master_host = module.params["master_host"] master_user = module.params["master_user"] @@ -292,8 +303,10 @@ def main(): try: if module.params["login_unix_socket"]: db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password) + elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": + module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password) + db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password) except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") try: @@ -366,4 +379,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * main() -warnings.simplefilter("ignore") \ No newline at end of file +warnings.simplefilter("ignore") From 1be1aacadf9a7ceb2bffd4f22b7b8a185e3ded58 Mon Sep 17 00:00:00 2001 From: Nikolai Lifanov Date: Wed, 17 Dec 2014 10:09:36 -0500 Subject: [PATCH 021/224] fix at.py module on non-glibc systems --- system/at.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/at.py b/system/at.py index c63527563fd..770148991f1 100644 --- a/system/at.py +++ b/system/at.py @@ -78,7 +78,7 @@ import tempfile def add_job(module, result, at_cmd, count, units, command, script_file): - at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file) + at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units) rc, out, err = module.run_command(at_command, check_rc=True) if command: os.unlink(script_file) From 899f1c0a0b478e1353bf2cde00215d1ea63d3c94 Mon Sep 17 00:00:00 2001 From: Jeroen Thora Date: Fri, 19 Dec 2014 23:43:21 +0100 Subject: [PATCH 022/224] Fixed small typo in zabbix group docs --- monitoring/zabbix_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index a316405456b..489a8617f54 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -36,7 +36,7 @@ options: choices: [ 'present', 'absent' ] host_group: description: - - Name of the host groupto be added or removed. + - Name of the host group to be added or removed. required: true default: null aliases: [ ] From 2e1b703b3c3ddc13e8c15407404ff5498c94f9da Mon Sep 17 00:00:00 2001 From: Julien Pepy Date: Mon, 28 Jul 2014 08:26:38 +0200 Subject: [PATCH 023/224] Fix composer module checkmode and change detection --- packaging/language/composer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/language/composer.py b/packaging/language/composer.py index 5d8ba563c8b..f788f53dd5c 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -138,6 +138,7 @@ def main(): if module.check_mode: options.add("--dry-run") + del module.params['CHECKMODE'] # Get composer command with fallback to default command = module.params['command'] From 2e6b94047d8579490d289827b7a6f134f1bbffde Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 30 Dec 2014 19:53:59 -0500 Subject: [PATCH 024/224] fixed doc bug --- system/gluster_volume.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 8709cf778b1..087a4a0f6de 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -21,7 +21,7 @@ DOCUMENTATION = """ module: gluster_volume -short_description: Manage GlusterFs volumes +short_description: Manage GlusterFS volumes description: - Create, remove, start, stop and tune GlusterFS volumes options: @@ -33,8 +33,8 @@ options: required: true choices: [ 'present', 'absent', 'started', 'stopped' ] description: - - Mode of operation: present/absent ensure if a module exists or not. - started/stopped make sure it is enabled or not. + - Use present/absent ensure if a volume exists or not, + use started/stopped to control it's availability. cluster: required: false description: From 07815bde3b2aa730df74046105add8b81d4e375a Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Wed, 31 Dec 2014 11:59:40 +0000 Subject: [PATCH 025/224] Add Solaris 11 package management - Module pkg5 handles installing and uninstalling packages. - Module pkg5_publisher manages repository configuration. --- packaging/os/pkg5.py | 109 ++++++++++++++++++++ packaging/os/pkg5_publisher.py | 176 +++++++++++++++++++++++++++++++++ 2 files changed, 285 insertions(+) create mode 100644 packaging/os/pkg5.py create mode 100644 packaging/os/pkg5_publisher.py diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py new file mode 100644 index 00000000000..ffe9e083a63 --- /dev/null +++ b/packaging/os/pkg5.py @@ -0,0 +1,109 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: pkg5 +author: Peter Oliver +short_description: Manages packages with the Solaris 11 Image Packaging System +description: + - IPS packages are the native packages in Solaris 11 and higher. +notes: + - The naming of IPS packages is explained at http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html. +options: + name: + description: + - An FRMI of the package(s) to be installed/removed/updated. + - Multiple packages may be specified, separated by C(,). If C(,) + appears in an FRMI, you can replace it with C(-). + required: true + state: + description: + - Whether to install (C(present), C(latest)), or remove (C(absent)) a + package. + required: false + default: present + choices: [ present, latest, absent ] +''' +EXAMPLES = ''' +# Install Vim: +- pkg5: name=editor/vim + +# Remove finger daemon: +- pkg5: name=service/network/finger state=absent +''' + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, type='list'), + state=dict( + default='present', + choices=[ + 'present', + 'installed', + 'latest', + 'absent', + 'uninstalled', + 'removed', + ] + ), + ) + ) + + params = module.params + if params['state'] in ['present', 'installed']: + ensure(module, 'present', params['name']) + elif params['state'] in ['latest']: + ensure(module, 'latest', params['name']) + elif params['state'] in ['absent', 'uninstalled', 'removed']: + ensure(module, 'absent', params['name']) + + +def ensure(module, state, packages): + response = { + 'results': [], + 'msg': '', + } + behaviour = { + 'present': { + 'filter': lambda p: not is_installed(module, p), + 'subcommand': 'install', + }, + 'latest': { + 'filter': lambda p: not is_latest(module, p), + 'subcommand': 'install', + }, + 'absent': { + 'filter': lambda p: is_installed(module, p), + 'subcommand': 'uninstall', + }, + } + + to_modify = filter(behaviour[state]['filter'], packages) + if to_modify: + rc, out, err = module.run_command( + ['pkg', behaviour[state]['subcommand'], '-q', '--'] + to_modify + ) + response['rc'] = rc + response['results'].append(out) + response['msg'] += err + response['changed'] = True + if rc != 0: + module.fail_json(**response) + + module.exit_json(**response) + + +def is_installed(module, package): + rc, out, err = module.run_command(['pkg', 'list', '--', package]) + return True if rc == 0 else False + + +def is_latest(module, package): + rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package]) + return True if rc == 1 else False + + +from ansible.module_utils.basic import * +main() diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py new file mode 100644 index 00000000000..46bb6c6407b --- /dev/null +++ b/packaging/os/pkg5_publisher.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: pkg5_publisher +author: Peter Oliver +short_description: Manages Solaris 11 Image Packaging System publishers +description: + - IPS packages are the native packages in Solaris 11 and higher. + - This modules will configure which publishers a client will download IPS + packages from. +options: + name: + description: + - The publisher's name. + required: true + aliases: [ publisher ] + state: + description: + - Whether to ensure that a publisher is present or absent. + required: false + default: present + choices: [ present, absent ] + sticky: + description: + - Packages installed from a sticky repository can only receive updates + from that repository. + required: false + default: null + choices: [ true, false ] + enabled: + description: + - Is the repository enabled or disabled? + required: false + default: null + choices: [ true, false ] + origin: + description: + - A path or URL to the repository. + - Multiple values may be provided. + required: false + default: null + mirror: + description: + - A path or URL to the repository mirror. + - Multiple values may be provided. + required: false + default: null +''' +EXAMPLES = ''' +# Fetch packages for the solaris publisher direct from Oracle: +- pkg5_publisher: name=solaris sticky=true origin=https://pkg.oracle.com/solaris/support/ + +# Configure a publisher for locally-produced packages: +- pkg5_publisher: name=site origin=https://pkg.example.com/site/ +''' + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['publisher']), + state=dict(default='present', choices=['present', 'absent']), + sticky=dict(choices=BOOLEANS), + enabled=dict(choices=BOOLEANS), + # search_after=dict(), + # search_before=dict(), + origin=dict(type='list'), + mirror=dict(type='list'), + ) + ) + + for option in ['origin', 'mirror']: + if module.params[option] == ['']: + module.params[option] = [] + + if module.params['state'] == 'present': + modify_publisher(module, module.params) + else: + unset_publisher(module, module.params['name']) + + +def modify_publisher(module, params): + name = params['name'] + existing = get_publishers(module) + + if name in existing: + for option in ['origin', 'mirror', 'sticky', 'enabled']: + if params[option] != None: + if params[option] != existing[name][option]: + return set_publisher(module, params) + else: + return set_publisher(module, params) + + module.exit_json() + + +def set_publisher(module, params): + name = params['name'] + args = [] + + if params['origin'] != None: + args.append('--remove-origin=*') + args.extend(['--add-origin=' + u for u in params['origin']]) + if params['mirror'] != None: + args.append('--remove-mirror=*') + args.extend(['--add-mirror=' + u for u in params['mirror']]) + + if params['sticky'] != None: + args.append('--sticky' if params['sticky'] else '--non-sticky') + if params['enabled'] != None: + args.append('--enable' if params['enabled'] else '--disable') + + rc, out, err = module.run_command( + ["pkg", "set-publisher"] + args + [name], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + module.exit_json(**response) + + +def unset_publisher(module, publisher): + rc, out, err = module.run_command( + ["pkg", "unset-publisher", publisher], + check_rc=True + ) + response = { + 'rc': rc, + 'results': [out], + 'msg': err, + 'changed': True, + } + module.exit_json(**response) + + +def get_publishers(module): + rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True) + + lines = out.splitlines() + keys = lines.pop(0).lower().split("\t") + + publishers = {} + for line in lines: + values = dict(zip(keys, map(unstringify, line.split("\t")))) + name = values['publisher'] + + if not name in publishers: + publishers[name] = dict( + (k, values[k]) for k in ['sticky', 'enabled'] + ) + publishers[name]['origin'] = [] + publishers[name]['mirror'] = [] + + publishers[name][values['type']].append(values['uri']) + + return publishers + + +def unstringify(val): + if val == "-": + return None + elif val == "true": + return True + elif val == "false": + return False + else: + return val + + +from ansible.module_utils.basic import * +main() From ec54b00fdfa93f8d64a6c18631e01addb2b09170 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Wed, 31 Dec 2014 12:48:59 +0000 Subject: [PATCH 026/224] Add missing boilerplate. --- packaging/os/pkg5.py | 15 +++++++++++++++ packaging/os/pkg5_publisher.py | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index ffe9e083a63..271b95fffe6 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -1,6 +1,21 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright 2014 Peter Oliver +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + DOCUMENTATION = ''' --- module: pkg5 diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 46bb6c6407b..20b0c0a659c 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -1,6 +1,21 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright 2014 Peter Oliver +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + DOCUMENTATION = ''' --- module: pkg5_publisher From 5a7695c440956d881ce22a04bb2c37b7d60ea7ee Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Wed, 31 Dec 2014 13:27:21 +0000 Subject: [PATCH 027/224] Try to fix up commas in version numbers. --- packaging/os/pkg5.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index 271b95fffe6..bbdf3eb006b 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -48,6 +48,7 @@ EXAMPLES = ''' - pkg5: name=service/network/finger state=absent ''' + def main(): module = AnsibleModule( argument_spec=dict( @@ -67,12 +68,26 @@ def main(): ) params = module.params + packages = [] + + # pkg(5) FRMIs include a comma before the release number, but + # AnsibleModule will have split this into multiple items for us. + # Try to spot where this has happened and fix it. + for fragment in params['name']: + if ( + re.search('^\d+(?:\.\d+)*', fragment) + and packages and re.search('@[^,]*$', packages[-1]) + ): + packages[-1] += ',' + fragment + else: + packages.append(fragment) + if params['state'] in ['present', 'installed']: - ensure(module, 'present', params['name']) + ensure(module, 'present', packages) elif params['state'] in ['latest']: - ensure(module, 'latest', params['name']) + ensure(module, 'latest', packages) elif params['state'] in ['absent', 'uninstalled', 'removed']: - ensure(module, 'absent', params['name']) + ensure(module, 'absent', packages) def ensure(module, state, packages): From 9efac37b1be30cdbdb0cbfef7dd02455ec98de14 Mon Sep 17 00:00:00 2001 From: John Barker Date: Wed, 31 Dec 2014 22:10:18 +0000 Subject: [PATCH 028/224] Add a note about debconf passwords always being recorded as changed --- system/debconf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/debconf.py b/system/debconf.py index 7f5ea0368ca..592c2c865c7 100644 --- a/system/debconf.py +++ b/system/debconf.py @@ -34,6 +34,7 @@ notes: - A number of questions have to be answered (depending on the package). Use 'debconf-show ' on any Debian or derivative with the package installed to see questions/settings available. + - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords. requirements: [ debconf, debconf-utils ] options: name: From c9656ff3b4f00b0b3919a13c3cd3e0ac6229d92e Mon Sep 17 00:00:00 2001 From: Sterfield Date: Sat, 3 Jan 2015 18:01:13 +0100 Subject: [PATCH 029/224] Now allow every type of locales + archlinux fix The previous version of this code was supporting only locales using the format "_.". But all the locales that doesn't have this format were not installable (such as "fr_FR" or "fr_FR@euro"). Also, if an invalid locales was provided, the module kept sending a "changed" status. Now : * if the user provides an invalid locales, the module failed. Locales are verified using /etc/locale.gen or /usr/share/i18n/SUPPORTED if Ubuntu * Every types of valid locales are now supported. * The locale module was not working on Archlinux, as there's no space between the "#" and the locale. This is now supported. Credits goes to danderson189, this is his code. This module was tested on debian jessie, ubuntu 14 LTS and last Archlinux. --- system/locale_gen.py | 62 +++++++++++++++++++++++++++++++++----------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/system/locale_gen.py b/system/locale_gen.py index 9ff0a87f36a..a698c620311 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -36,6 +36,26 @@ EXAMPLES = ''' # location module specific support methods. # +def is_available(name, ubuntuMode): + """Check if the given locale is available on the system. This is done by + checking either : + * if the locale is present in /etc/locales.gen + * or if the locale is present in /usr/share/i18n/SUPPORTED""" + if ubuntuMode: + __regexp = '^(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/usr/share/i18n/SUPPORTED' + else: + __regexp = '^#{0,1}\s*(?P\S+_\S+) (?P\S+)\s*$' + __locales_available = '/etc/locale.gen' + + re_compiled = re.compile(__regexp) + with open(__locales_available, 'r') as fd: + for line in fd: + result = re_compiled.match(line) + if result and result.group('locale') == name: + return True + return False + def is_present(name): """Checks if the given locale is currently installed.""" output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] @@ -53,32 +73,42 @@ def replace_line(existing_line, new_line): with open("/etc/locale.gen", "w") as f: f.write("".join(lines)) -def apply_change(targetState, name, encoding): +def set_locale(name, enabled=True): + """ Sets the state of the locale. Defaults to enabled. """ + search_string = '#{0,1}\s*%s (?P.+)' % name + if enabled: + new_string = '%s \g' % (name) + else: + new_string = '# %s \g' % (name) + with open("/etc/locale.gen", "r") as f: + lines = [re.sub(search_string, new_string, line) for line in f] + with open("/etc/locale.gen", "w") as f: + f.write("".join(lines)) + +def apply_change(targetState, name): """Create or remove locale. - + Keyword arguments: targetState -- Desired state, either present or absent. name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. """ if targetState=="present": # Create locale. - replace_line("# "+name+" "+encoding, name+" "+encoding) + set_locale(name, enabled=True) else: # Delete locale. - replace_line(name+" "+encoding, "# "+name+" "+encoding) + set_locale(name, enabled=False) localeGenExitValue = call("locale-gen") if localeGenExitValue!=0: raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) -def apply_change_ubuntu(targetState, name, encoding): +def apply_change_ubuntu(targetState, name): """Create or remove locale. Keyword arguments: targetState -- Desired state, either present or absent. name -- Name including encoding such as de_CH.UTF-8. - encoding -- Encoding such as UTF-8. """ if targetState=="present": # Create locale. @@ -90,7 +120,8 @@ def apply_change_ubuntu(targetState, name, encoding): content = f.readlines() with open("/var/lib/locales/supported.d/local", "w") as f: for line in content: - if line!=(name+" "+encoding+"\n"): + locale, charset = line.split(' ') + if locale != name: f.write(line) # Purge locales and regenerate. # Please provide a patch if you know how to avoid regenerating the locales to keep! @@ -113,8 +144,6 @@ def main(): ) name = module.params['name'] - if not "." in name: - module.fail_json(msg="Locale does not match pattern. Did you specify the encoding?") state = module.params['state'] if not os.path.exists("/etc/locale.gen"): @@ -126,23 +155,26 @@ def main(): else: # We found the common way to manage locales. ubuntuMode = False - + + if not is_available(name, ubuntuMode): + module.fail_json(msg="The locales you've entered is not available " + "on your system.") + prev_state = "present" if is_present(name) else "absent" changed = (prev_state!=state) if module.check_mode: module.exit_json(changed=changed) else: - encoding = name.split(".")[1] if changed: try: if ubuntuMode==False: - apply_change(state, name, encoding) + apply_change(state, name) else: - apply_change_ubuntu(state, name, encoding) + apply_change_ubuntu(state, name) except EnvironmentError as e: module.fail_json(msg=e.strerror, exitValue=e.errno) - + module.exit_json(name=name, changed=changed, msg="OK") # import module snippets From 48ecbd037e3ef33bdc5c5c1ad9dbf253457168da Mon Sep 17 00:00:00 2001 From: Sterfield Date: Sat, 3 Jan 2015 22:24:27 +0100 Subject: [PATCH 030/224] Missing import re I don't even know how it magically worked, but the fact is the code was correct, and ran OK without the import re. --- system/locale_gen.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/locale_gen.py b/system/locale_gen.py index a698c620311..3dc163d4872 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -4,6 +4,7 @@ import os import os.path from subprocess import Popen, PIPE, call +import re DOCUMENTATION = ''' --- From 5acc63454907bcb7e7aed77b3089a66866be8a0e Mon Sep 17 00:00:00 2001 From: Misho Krastev Date: Mon, 5 Jan 2015 01:52:20 -0800 Subject: [PATCH 031/224] fix lvol module to properly query the size of logical volumes --- system/lvol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index 96f1b846e27..93f95a6a238 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -153,7 +153,7 @@ def main(): unit = size_unit rc, current_lvs, err = module.run_command( - "lvs --noheadings -o lv_name,size --units %s --separator ';' %s" % (unit, vg)) + "lvs --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (unit, vg)) if rc != 0: if state == 'absent': From c4986bf78d7e7ab96527c4d64546584bbff83e40 Mon Sep 17 00:00:00 2001 From: Stanislav Antic Date: Mon, 5 Jan 2015 14:46:27 +0100 Subject: [PATCH 032/224] Fixes #155 --- database/mysql/mysql_replication.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index b93150a43b5..f75d6a05b5d 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -235,12 +235,12 @@ def main(): master_host=dict(default=None), master_user=dict(default=None), master_password=dict(default=None), - master_port=dict(default=None), - master_connect_retry=dict(default=None), + master_port=dict(default=None, type='int'), + master_connect_retry=dict(default=None, type='int'), master_log_file=dict(default=None), - master_log_pos=dict(default=None), + master_log_pos=dict(default=None, type='int'), relay_log_file=dict(default=None), - relay_log_pos=dict(default=None), + relay_log_pos=dict(default=None, type='int'), master_ssl=dict(default=False, type='bool'), master_ssl_ca=dict(default=None), master_ssl_capath=dict(default=None), From d4f5b6d41a707f0f89d79882ac2e68864935184f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 5 Jan 2015 08:40:57 -0800 Subject: [PATCH 033/224] Fixes for docs building --- system/gluster_volume.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 087a4a0f6de..00e2cdeba65 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -24,6 +24,7 @@ module: gluster_volume short_description: Manage GlusterFS volumes description: - Create, remove, start, stop and tune GlusterFS volumes +version_added: "1.9" options: name: required: true @@ -34,7 +35,7 @@ options: choices: [ 'present', 'absent', 'started', 'stopped' ] description: - Use present/absent ensure if a volume exists or not, - use started/stopped to control it's availability. + use started/stopped to control it's availability. cluster: required: false description: From 3981872375debccb3798a0ee2fc886f3b86fbb72 Mon Sep 17 00:00:00 2001 From: Bert Mertens Date: Tue, 6 Jan 2015 09:57:30 +0100 Subject: [PATCH 034/224] Fix getent behaviour with fail_key=False --- system/getent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/getent.py b/system/getent.py index 7da1be45fae..bb6d162398c 100644 --- a/system/getent.py +++ b/system/getent.py @@ -86,7 +86,7 @@ def main(): database = dict(required=True), key = dict(required=False, default=None), split = dict(required=False, default=None), - fail_key = dict(required=False, default=True), + fail_key = dict(required=False, type='bool', default=True), ), supports_check_mode = True, ) From 1bb8abffa367d9c186dd3814bc7c5221dd885526 Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Wed, 7 Jan 2015 00:11:16 -0700 Subject: [PATCH 035/224] Changed "target" to "dest" --- packaging/maven_artifact.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index 0c8070d1e46..f02ad166d2b 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -82,7 +82,7 @@ options: required: false default: null version_added: 0.0.1 - target: + dest: description: The path where the artifact should be written to required: true default: false @@ -97,16 +97,16 @@ options: EXAMPLES = ''' # Download the latest version of the commons-collections artifact from Maven Central -- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections target=/tmp/commons-collections-latest.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections dest=/tmp/commons-collections-latest.jar # Download Apache Commons-Collections 3.2 from Maven Central -- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 target=/tmp/commons-collections-3.2.jar +- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 dest=/tmp/commons-collections-3.2.jar # Download an artifact from a private repository requiring authentication -- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass target=/tmp/library-name-latest.jar +- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed -- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven target=/var/lib/tomcat7/webapps/web-app.war +- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war ''' class Artifact(object): @@ -321,8 +321,8 @@ def main(): repository_url = dict(default=None), username = dict(default=None), password = dict(default=None), - state = dict(default="latest", choices=["present","absent"]), # TODO - Implement a "latest" state - target = dict(default=None), + state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state + dest = dict(default=None), ) ) @@ -335,7 +335,7 @@ def main(): repository_username = module.params["username"] repository_password = module.params["password"] state = module.params["state"] - target = module.params["target"] + dest = module.params["dest"] if not repository_url: repository_url = "http://repo1.maven.org/maven2" @@ -348,19 +348,19 @@ def main(): module.fail_json(msg=e.args[0]) prev_state = "absent" - if os.path.lexists(target): + if os.path.lexists(dest): prev_state = "present" else: - path = os.path.dirname(target) + path = os.path.dirname(dest) if not os.path.exists(path): os.makedirs(path) if prev_state == "present": - module.exit_json(target=target, state=state, changed=False) + module.exit_json(dest=dest, state=state, changed=False) try: if downloader.download(artifact, target): - module.exit_json(state=state, target=target, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) + module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) else: module.fail_json(msg="Unable to download the artifact") except ValueError as e: From 44bfe5a7d6c85ad2a94cda25fcff28af5d115c7b Mon Sep 17 00:00:00 2001 From: Chris Schmidt Date: Wed, 7 Jan 2015 00:35:48 -0700 Subject: [PATCH 036/224] Fixed bug where passing a directory as dest failed --- packaging/maven_artifact.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index f02ad166d2b..bf4ca59f92c 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -348,6 +348,9 @@ def main(): module.fail_json(msg=e.args[0]) prev_state = "absent" + if os.path.isdir(dest): + dest = dest + "/" + artifact_id + "-" + version + ".jar" + if os.path.lexists(dest): prev_state = "present" else: @@ -359,7 +362,7 @@ def main(): module.exit_json(dest=dest, state=state, changed=False) try: - if downloader.download(artifact, target): + if downloader.download(artifact, dest): module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True) else: module.fail_json(msg="Unable to download the artifact") From a32869d492f5e2adbfda4a132d565c8219d18890 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 7 Jan 2015 09:20:45 -0800 Subject: [PATCH 037/224] Allow 0 to be specified --- database/mysql/mysql_replication.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index f75d6a05b5d..70e226a6f5b 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -317,7 +317,6 @@ def main(): module.fail_json(msg="Server is not configured as mysql slave") elif mode in "changemaster": - print "Change master" chm=[] chm_params = {} if master_host: @@ -329,22 +328,22 @@ def main(): if master_password: chm.append("MASTER_PASSWORD=%(master_password)s") chm_params['master_password'] = master_password - if master_port: + if master_port is not None: chm.append("MASTER_PORT=%(master_port)s") chm_params['master_port'] = master_port - if master_connect_retry: + if master_connect_retry is not None: chm.append("MASTER_CONNECT_RETRY=%(master_connect_retry)s") chm_params['master_connect_retry'] = master_connect_retry if master_log_file: chm.append("MASTER_LOG_FILE=%(master_log_file)s") chm_params['master_log_file'] = master_log_file - if master_log_pos: + if master_log_pos is not None: chm.append("MASTER_LOG_POS=%(master_log_pos)s") chm_params['master_log_pos'] = master_log_pos if relay_log_file: chm.append("RELAY_LOG_FILE=%(relay_log_file)s") chm_params['relay_log_file'] = relay_log_file - if relay_log_pos: + if relay_log_pos is not None: chm.append("RELAY_LOG_POS=%(relay_log_pos)s") chm_params['relay_log_pos'] = relay_log_pos if master_ssl: From 1d60d33dc1287235f96fab937708292705303e95 Mon Sep 17 00:00:00 2001 From: Yuri Kunde Schlesner Date: Thu, 8 Jan 2015 21:44:58 -0200 Subject: [PATCH 038/224] crypttab: Fix parameter checking with state=absent Only the `name` parameter is required when removing an entry, but the module tried to ensure at least one other parameter was set. --- system/crypttab.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/crypttab.py b/system/crypttab.py index 70230fa53e1..ffb60516f3d 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -103,7 +103,7 @@ def main(): state = module.params['state'] path = module.params['path'] - if backing_device is None and password is None and opts is None: + if state != 'absent' and backing_device is None and password is None and opts is None: module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", **module.params) From 7248c0861da31c3c6e0155c9df478cc0b4a22804 Mon Sep 17 00:00:00 2001 From: Justin Lecher Date: Fri, 9 Jan 2015 16:07:42 +0100 Subject: [PATCH 039/224] Allow disabling of autorefresh for zypper repositories In case of release repositories or other special cases you might not need the autorefreshing of the repos. This patch adds a configure option instead of hard enabling this. Signed-off-by: Justin Lecher --- packaging/os/zypper_repository.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index 5e41683734b..44a0ed6029c 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -2,6 +2,7 @@ # encoding: utf-8 # (c) 2013, Matthias Vogelgesang +# (c) 2014, Justin Lecher # # This file is part of Ansible # @@ -58,6 +59,13 @@ options: default: "no" choices: [ "yes", "no" ] aliases: [] + refresh: + description: + - Enable autorefresh of the repository. + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] notes: [] requirements: [ zypper ] ''' @@ -145,11 +153,11 @@ def repo_exists(module, old_zypper, **kwargs): return False -def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper): +def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper, refresh): if old_zypper: cmd = ['/usr/bin/zypper', 'sa'] else: - cmd = ['/usr/bin/zypper', 'ar', '--check', '--refresh'] + cmd = ['/usr/bin/zypper', 'ar', '--check'] if repo.startswith("file:/") and old_zypper: cmd.extend(['-t', 'Plaindir']) @@ -162,6 +170,9 @@ def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper): if disable_gpg_check and not old_zypper: cmd.append('--no-gpgcheck') + if refresh: + cmd.append('--refresh') + cmd.append(repo) if not repo.endswith('.repo'): @@ -216,6 +227,7 @@ def main(): state=dict(choices=['present', 'absent'], default='present'), description=dict(required=False), disable_gpg_check = dict(required=False, default='no', type='bool'), + refresh = dict(required=False, default='yes', type='bool'), ), supports_check_mode=False, ) @@ -225,6 +237,7 @@ def main(): name = module.params['name'] description = module.params['description'] disable_gpg_check = module.params['disable_gpg_check'] + refresh = module.params['refresh'] def exit_unchanged(): module.exit_json(changed=False, repo=repo, state=state, name=name) @@ -260,7 +273,7 @@ def main(): if exists: exit_unchanged() - changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper) + changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper, refresh) elif state == 'absent': if not exists: exit_unchanged() From 8fa3e97d30dab588b433041b2fc4090fe960f28b Mon Sep 17 00:00:00 2001 From: Daniel Newport Date: Sat, 10 Jan 2015 10:40:03 -0500 Subject: [PATCH 040/224] fix pacman upgrade bug --- packaging/os/pacman.py | 48 ++++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 11 deletions(-) diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index 0b23a2f93ce..b04bfb96d07 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -67,6 +67,9 @@ EXAMPLES = ''' # Install package foo - pacman: name=foo state=present +# Upgrade package foo +- pacman: name=foo state=present update_cache=yes + # Remove packages foo and bar - pacman: name=foo,bar state=absent @@ -85,17 +88,37 @@ import sys PACMAN_PATH = "/usr/bin/pacman" +def get_version(pacman_output): + """Take pacman -Qi or pacman -Si output and get the Version""" + lines = pacman_output.split('\n') + for line in lines: + if 'Version' in line: + return line.split(':')[1].strip() + return None + def query_package(module, name, state="present"): - # pacman -Q returns 0 if the package is installed, - # 1 if it is not installed + """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, and a second boolean to indicate if the package is up-to-date.""" if state == "present": - cmd = "pacman -Q %s" % (name) - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - - if rc == 0: - return True - - return False + lcmd = "pacman -Qi %s" % (name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if lrc != 0: + # package is not installed locally + return False, False + + # get the version installed locally (if any) + lversion = get_version(lstdout) + + rcmd = "pacman -Si %s" % (name) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + # get the version in the repository + rversion = get_version(rstdout) + + if rrc == 0: + # Return True to indicate that the package is installed locally, and the result of the version number comparison + # to determine if the package is up-to-date. + return True, (lversion == rversion) + + return False, False def update_package_db(module): @@ -118,7 +141,8 @@ def remove_packages(module, packages): # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove - if not query_package(module, package): + installed, updated = query_package(module, package) + if not installed: continue cmd = "pacman -%s %s --noconfirm" % (args, package) @@ -140,7 +164,9 @@ def install_packages(module, packages, package_files): install_c = 0 for i, package in enumerate(packages): - if query_package(module, package): + # if the package is installed and up-to-date then skip + installed, updated = query_package(module, package) + if installed and updated: continue if package_files[i]: From 2b1b3df40f9e0c5b1af2a8360acb2a88f06a6c12 Mon Sep 17 00:00:00 2001 From: Daniel Newport Date: Sat, 10 Jan 2015 23:56:05 -0500 Subject: [PATCH 041/224] use state=latest to ensure the latest version is installed. mimics functionality of the apt and yum modules. --- packaging/os/pacman.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index b04bfb96d07..a91f8e3054d 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -42,7 +42,7 @@ options: - Desired state of the package. required: false default: "present" - choices: ["present", "absent"] + choices: ["present", "absent", "latest"] recurse: description: @@ -68,7 +68,7 @@ EXAMPLES = ''' - pacman: name=foo state=present # Upgrade package foo -- pacman: name=foo state=present update_cache=yes +- pacman: name=foo state=latest update_cache=yes # Remove packages foo and bar - pacman: name=foo,bar state=absent @@ -160,13 +160,13 @@ def remove_packages(module, packages): module.exit_json(changed=False, msg="package(s) already absent") -def install_packages(module, packages, package_files): +def install_packages(module, state, packages, package_files): install_c = 0 for i, package in enumerate(packages): - # if the package is installed and up-to-date then skip + # if the package is installed and state == present or state == latest and is up-to-date then skip installed, updated = query_package(module, package) - if installed and updated: + if installed and (state == 'present' or (state == 'latest' and updated)): continue if package_files[i]: @@ -191,9 +191,10 @@ def install_packages(module, packages, package_files): def check_packages(module, packages, state): would_be_changed = [] for package in packages: - installed = query_package(module, package) - if ((state == "present" and not installed) or - (state == "absent" and installed)): + installed, updated = query_package(module, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): would_be_changed.append(package) if would_be_changed: if state == "absent": @@ -208,7 +209,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(aliases=['pkg']), - state = dict(default='present', choices=['present', 'installed', 'absent', 'removed']), + state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), recurse = dict(default='no', choices=BOOLEANS, type='bool'), update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')), required_one_of = [['name', 'update_cache']], @@ -249,8 +250,8 @@ def main(): if module.check_mode: check_packages(module, pkgs, p['state']) - if p['state'] == 'present': - install_packages(module, pkgs, pkg_files) + if p['state'] in ['present', 'latest']: + install_packages(module, p['state'], pkgs, pkg_files) elif p['state'] == 'absent': remove_packages(module, pkgs) From e15fba515682f38f86b2dfd0fca6a8e01652734f Mon Sep 17 00:00:00 2001 From: Alexander Gubin Date: Mon, 12 Jan 2015 17:43:51 +0100 Subject: [PATCH 042/224] lvol: Cast size(LogicalVolumeSize) to lower to fix the difference between lvs and lvcreate --- system/lvol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/lvol.py b/system/lvol.py index e9d477edf86..8276d753db1 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -132,8 +132,8 @@ def main(): # LVCREATE(8) -L --size option unit elif size[-1].isalpha(): - if size[-1] in 'bBsSkKmMgGtTpPeE': - size_unit = size[-1] + if size[-1].lower() in 'bskmgtpe': + size_unit = size[-1].lower() if size[0:-1].isdigit(): size = int(size[0:-1]) else: From 25f595c2bada725e3b8d82a69e6fb0b680ff34f9 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Mon, 12 Jan 2015 18:52:25 +0100 Subject: [PATCH 043/224] Avoid to use the builtin 'echo' By default, the interpretation of escape characters could be disabled (bash) or enabled (dash). --- system/debconf.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/system/debconf.py b/system/debconf.py index 592c2c865c7..b5af4744d56 100644 --- a/system/debconf.py +++ b/system/debconf.py @@ -86,8 +86,6 @@ debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license- debconf: name='tzdata' ''' -import pipes - def get_selections(module, pkg): cmd = [module.get_bin_path('debconf-show', True), pkg] rc, out, err = module.run_command(' '.join(cmd)) @@ -106,14 +104,14 @@ def get_selections(module, pkg): def set_selection(module, pkg, question, vtype, value, unseen): - data = ' '.join([ question, vtype, value ]) - setsel = module.get_bin_path('debconf-set-selections', True) - cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel] + cmd = [setsel] if unseen: cmd.append('-u') - return module.run_command(' '.join(cmd), use_unsafe_shell=True) + data = ' '.join([pkg, question, vtype, value]) + + return module.run_command(cmd, data=data) def main(): From 40298a8f2422ab230b23f49e841855d6039dd1a8 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Mon, 12 Jan 2015 19:02:30 +0100 Subject: [PATCH 044/224] remove unused variable --- system/debconf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/system/debconf.py b/system/debconf.py index b5af4744d56..0deaff25eb1 100644 --- a/system/debconf.py +++ b/system/debconf.py @@ -135,7 +135,6 @@ def main(): unseen = module.params["unseen"] prev = get_selections(module, pkg) - diff = '' changed = False msg = "" From 8658b6783a7c2e8d292ac170eb636662bd01bee3 Mon Sep 17 00:00:00 2001 From: Justin Lecher Date: Wed, 14 Jan 2015 15:50:18 +0100 Subject: [PATCH 045/224] Keep default behaviour for refresh Signed-off-by: Justin Lecher --- packaging/os/zypper_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index 44a0ed6029c..9cdce3ee4f3 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -63,7 +63,7 @@ options: description: - Enable autorefresh of the repository. required: false - default: "no" + default: "yes" choices: [ "yes", "no" ] aliases: [] notes: [] From 16d622aab833f3cb75a7253325dee181a009621c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 14 Jan 2015 20:49:44 -0800 Subject: [PATCH 046/224] Fix documentation in zypper_repository --- packaging/os/zypper_repository.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index 9cdce3ee4f3..f208305fe60 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -52,16 +52,16 @@ options: - A description of the repository disable_gpg_check: description: - - Whether to disable GPG signature checking of - all packages. Has an effect only if state is - I(present). + - Whether to disable GPG signature checking of + all packages. Has an effect only if state is + I(present). required: false default: "no" choices: [ "yes", "no" ] aliases: [] refresh: - description: - - Enable autorefresh of the repository. + description: + - Enable autorefresh of the repository. required: false default: "yes" choices: [ "yes", "no" ] From e529279feaf98bb9fc09c4deed2ec6df962704f8 Mon Sep 17 00:00:00 2001 From: Jens Carl Date: Thu, 15 Jan 2015 16:15:17 +0000 Subject: [PATCH 047/224] Fix typo Fix typo and remove an obsolete space. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5d9c47f8303..7959fffa7cf 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ All new modules should be submitted here, and have a chance to be promoted to co Reporting bugs ============== -Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of modlue documentation page at [docs.ansible.com](http://docs.ansible.com/). +Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/). Testing modules =============== @@ -18,4 +18,4 @@ Ansible [module development guide](http://docs.ansible.com/developing_modules.ht License ======= -As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. +As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. From 3795ab0379000f793ec3e82e57d81756ee37206e Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Fri, 24 Oct 2014 13:41:29 +0200 Subject: [PATCH 048/224] Clear rabbitmq_user pw when none is specified --- messaging/rabbitmq_user.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py index 1cbee360dff..b19ec1fc097 100644 --- a/messaging/rabbitmq_user.py +++ b/messaging/rabbitmq_user.py @@ -162,7 +162,11 @@ class RabbitMqUser(object): return dict() def add(self): - self._exec(['add_user', self.username, self.password]) + if self.password is not None: + self._exec(['add_user', self.username, self.password]) + else + self._exec(['add_user', self.username, '']) + self._exec(['clear_password', self.username]) def delete(self): self._exec(['delete_user', self.username]) From 2396f36f1192ba47723735f563972ac1bf695079 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 19 Jan 2015 09:40:04 -0800 Subject: [PATCH 049/224] Fix typo causing SyntaxError (missing colon) --- messaging/rabbitmq_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py index b19ec1fc097..f494ce802d9 100644 --- a/messaging/rabbitmq_user.py +++ b/messaging/rabbitmq_user.py @@ -164,7 +164,7 @@ class RabbitMqUser(object): def add(self): if self.password is not None: self._exec(['add_user', self.username, self.password]) - else + else: self._exec(['add_user', self.username, '']) self._exec(['clear_password', self.username]) From 5ae3bbaf05f5b08e688c0290a0103bf0dd94c421 Mon Sep 17 00:00:00 2001 From: Giovanni Tirloni Date: Tue, 20 Jan 2015 12:35:13 -0500 Subject: [PATCH 050/224] Fix dangerous use of empty list as default arg (mutable) --- monitoring/nagios.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 9219766b86a..c564e712b04 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -364,7 +364,7 @@ class Nagios(object): return notif_str - def schedule_svc_downtime(self, host, services=[], minutes=30): + def schedule_svc_downtime(self, host, services=None, minutes=30): """ This command is used to schedule downtime for a particular service. @@ -378,6 +378,10 @@ class Nagios(object): """ cmd = "SCHEDULE_SVC_DOWNTIME" + + if services is None: + services = [] + for service in services: dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service) self._write_command(dt_cmd_str) @@ -518,7 +522,7 @@ class Nagios(object): notif_str = self._fmt_notif_str(cmd, host) self._write_command(notif_str) - def disable_svc_notifications(self, host, services=[]): + def disable_svc_notifications(self, host, services=None): """ This command is used to prevent notifications from being sent out for the specified service. @@ -530,6 +534,10 @@ class Nagios(object): """ cmd = "DISABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + for service in services: notif_str = self._fmt_notif_str(cmd, host, svc=service) self._write_command(notif_str) @@ -628,7 +636,7 @@ class Nagios(object): else: return "Fail: could not write to the command file" - def enable_svc_notifications(self, host, services=[]): + def enable_svc_notifications(self, host, services=None): """ Enables notifications for a particular service. @@ -638,6 +646,10 @@ class Nagios(object): """ cmd = "ENABLE_SVC_NOTIFICATIONS" + + if services is None: + services = [] + nagios_return = True return_str_list = [] for service in services: From 42f79478a798f4b1d8eed2a1666e5727fb362747 Mon Sep 17 00:00:00 2001 From: Hiroshi Umehara Date: Wed, 21 Jan 2015 16:10:12 +0900 Subject: [PATCH 051/224] Add EUC-JP locale name normalization The function normalizes checks for UTF-8, but the same issue exists for other locales as well. This fix adds normalization for EUC-JP, a Japanese locale. --- system/locale_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/locale_gen.py b/system/locale_gen.py index 9ff0a87f36a..edf637fecb8 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -44,7 +44,7 @@ def is_present(name): def fix_case(name): """locale -a might return the encoding in either lower or upper case. Passing through this function makes them uniform for comparisons.""" - return name.replace(".utf8", ".UTF-8") + return name.replace(".utf8", ".UTF-8").replace(".eucjp", ".EUC-JP") def replace_line(existing_line, new_line): """Replaces lines in /etc/locale.gen""" From 0f4502982728af13e5b1ba12b470d2c76d776972 Mon Sep 17 00:00:00 2001 From: Rob White Date: Thu, 22 Jan 2015 11:01:33 +1100 Subject: [PATCH 052/224] Changed status() to be case-sensitive of process --- monitoring/monit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index 558f1e696f2..b1c1b9453cd 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -75,8 +75,8 @@ def main(): # Sample output lines: # Process 'name' Running # Process 'name' Running - restart pending - parts = line.lower().split() - if len(parts) > 2 and parts[0] == 'process' and parts[1] == "'%s'" % name: + parts = line.split() + if len(parts) > 2 and parts[0] == 'Process' and parts[1] == "'%s'" % name: return ' '.join(parts[2:]) else: return '' From 13285765a8a41381e214d171813fc715b6cc728d Mon Sep 17 00:00:00 2001 From: Rob White Date: Thu, 22 Jan 2015 11:26:00 +1100 Subject: [PATCH 053/224] Keep 'process' lowercase to protect against upstream changes --- monitoring/monit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/monit.py b/monitoring/monit.py index b1c1b9453cd..8772d22b2d8 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -76,7 +76,7 @@ def main(): # Process 'name' Running # Process 'name' Running - restart pending parts = line.split() - if len(parts) > 2 and parts[0] == 'Process' and parts[1] == "'%s'" % name: + if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name: return ' '.join(parts[2:]) else: return '' From adb1f0a1c8b8d60f5162e3121275decdaedfbf33 Mon Sep 17 00:00:00 2001 From: Rob White Date: Thu, 22 Jan 2015 13:37:37 +1100 Subject: [PATCH 054/224] Convert symlinks specified in pvs to actual path --- system/lvg.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/lvg.py b/system/lvg.py index e568e9df677..295ee24e3c6 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -135,7 +135,9 @@ def main(): elif state == 'present': module.fail_json(msg="No physical volumes given.") - + # LVM always uses real paths not symlinks so replace symlinks with actual path + for idx, dev in enumerate(dev_list): + dev_list[idx] = os.path.realpath(dev) if state=='present': ### check given devices From 759e03247301ad47b537a91dbd60701919fa1f68 Mon Sep 17 00:00:00 2001 From: Hiroshi Umehara Date: Thu, 22 Jan 2015 12:07:10 +0900 Subject: [PATCH 055/224] Generalize locale name normalization --- system/locale_gen.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/system/locale_gen.py b/system/locale_gen.py index edf637fecb8..a9a17e38ab1 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -32,6 +32,11 @@ EXAMPLES = ''' - locale_gen: name=de_CH.UTF-8 state=present ''' +LOCALE_NORMALIZATION = { + ".utf8": ".UTF-8", + ".eucjp": ".EUC-JP", +} + # =========================================== # location module specific support methods. # @@ -44,7 +49,9 @@ def is_present(name): def fix_case(name): """locale -a might return the encoding in either lower or upper case. Passing through this function makes them uniform for comparisons.""" - return name.replace(".utf8", ".UTF-8").replace(".eucjp", ".EUC-JP") + for s, r in LOCALE_NORMALIZATION.iteritems(): + name = name.replace(s, r) + return name def replace_line(existing_line, new_line): """Replaces lines in /etc/locale.gen""" From ea6c887d6c768d456226ae881bb8b4292bd26058 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Sat, 24 Jan 2015 01:33:53 +0000 Subject: [PATCH 056/224] Initial commit of Ansible support for the Consul clustering framework (http://consul.io). Submission includes support for - creating and registering services and checks - reading, writing and lookup for values in consul's kv store - creating and manipulating sessions for distributed locking on values in the kv - creating and manipulating ACLs for restricting access to the kv store - inventory support that reads the Consul catalog and group nodes according to - datacenters - exposed services - service availability - arbitrary groupings from the kv store This submission makes extensive use of the python-consul library and this is required as a dependency and can be installed from pip. The tests were written to target a vagrant cluster which can be setup by following the instructions here http://github.com/sgargan/consul-vagrant --- clustering/consul | 463 ++++++++++++++++++++++++++++++++++++++ clustering/consul_acl | 298 ++++++++++++++++++++++++ clustering/consul_kv | 238 ++++++++++++++++++++ clustering/consul_session | 213 ++++++++++++++++++ 4 files changed, 1212 insertions(+) create mode 100644 clustering/consul create mode 100644 clustering/consul_acl create mode 100644 clustering/consul_kv create mode 100644 clustering/consul_session diff --git a/clustering/consul b/clustering/consul new file mode 100644 index 00000000000..fa1e06c3678 --- /dev/null +++ b/clustering/consul @@ -0,0 +1,463 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul +short_description: "Add, modify & delete services within a consul cluster. + See http://conul.io for more details." +description: + - registers services and checks for an agent with a consul cluster. A service + is some process running on the agent node that should be advertised by + consul's discovery mechanism. It may optionally supply a check definition + that will be used to notify the consul cluster of the health of the service. + Checks may also be registered per node e.g. disk usage, or cpu usage and + notify the health of the entire node to the cluster. + Service level checks do not require a check name or id as these are derived + by Consul from the Service name and id respectively by appending 'service:'. + Node level checks require a check_name and optionally a check_id Currently, + there is no complete way to retrieve the script, interval or ttl metadata for + a registered check. Without this metadata it is not possible to tell if + the data supplied with ansible represents a change to a check. As a result + this does not attempt to determine changes and will always report a changed + occurred. An api method is planned to supply this metadata so at that stage + change management will be added. +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - register or deregister the consul service, defaults to present + required: true + choices: ['present', 'absent'] + service_id: + description: + - the ID for the service, must be unique per node, defaults to the + service name + required: false + host: + description: + - host of the consul agent with which to register the service, + defaults to localhost + required: false + notes: + description: + - Notes to attach to check when registering it. + service_name: + desciption: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be ommitted if registering + a node level check + required: false + service_port: + description: + - the port on which the service is listening required for + registration of a service. + required: true + tags: + description: + - a list of tags that will be attached to the service registration. + required: false + script: + description: + - the script/command that will be run periodically to check the health + of the service + required: false + interval: + description: + - the interval at which the service check will be run. This is by + convention a number with a s or m to signify the units of seconds + or minutes. if none is supplied, m will be appended + check_id: + description: + - an ID for the service check, defaults to the check name, ignored if + part of service definition. + check_name: + description: + - a name for the service check, defaults to the check id. required if + standalone, ignored if part of service definition. +""" + +EXAMPLES = ''' + - name: register nginx service with the local consul agent + consul: + name: nginx + port: 80 + + - name: register nginx service with curl check + consul: + name: nginx + port: 80 + script: "curl http://localhost" + interval: 60s + + - name: register nginx with some service tags + consul: + name: nginx + port: 80 + tags: + - prod + - webservers + + - name: remove nginx service + consul: + name: nginx + state: absent + + - name: create a node level check to test disk usage + consul: + check_name: Disk usage + check_id: disk_usage + script: "/opt/disk_usage.py" + interval: 5m + +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul +except ImportError, e: + print "failed=True msg='python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation'" + sys.exit(1) + + +def register_with_consul(module): + + state = module.params.get('state') + + if state == 'present': + add(module) + else: + remove(module) + + +def add(module): + ''' adds a service or a check depending on supplied configuration''' + check = parse_check(module) + service = parse_service(module) + + if not service and not check: + module.fail_json(msg='a name and port are required to register a service') + + if service: + if check: + service.add_check(check) + add_service(module, service) + elif check: + add_check(module, check) + + +def remove(module): + ''' removes a service or a check ''' + service_id = module.params.get('service_id') or module.params.get('service_name') + check_id = module.params.get('check_id') or module.params.get('check_name') + if not (service_id or check_id): + module.fail_json(msg='services and checks are removed by id or name.'\ + ' please supply a service id/name or a check id/name') + if service_id: + remove_service(module, service_id) + else: + remove_check(module, check_id) + + +def add_check(module, check): + ''' registers a check with the given agent. currently there is no way + retrieve the full metadata of an existing check through the consul api. + Without this we can't compare to the supplied check and so we must assume + a change. ''' + if not check.name: + module.fail_json(msg='a check name is required for a node level check,'\ + ' one not attached to a service') + + consul_api = get_consul_api(module) + check.register(consul_api) + + module.exit_json(changed=True, + check_id=check.check_id, + check_name=check.name, + script=check.script, + interval=check.interval, + ttl=check.ttl) + + +def remove_check(module, check_id): + ''' removes a check using its id ''' + consul_api = get_consul_api(module) + + if check_id in consul_api.agent.checks(): + consul_api.agent.check.deregister(check_id) + module.exit_json(changed=True, id=check_id) + + module.exit_json(changed=False, id=check_id) + + +def add_service(module, service): + ''' registers a service with the the current agent ''' + result = service + changed = False + + consul_api = get_consul_api(module) + existing = get_service_by_id(consul_api, service.id) + + # there is no way to retreive the details of checks so if a check is present + # in the service it must be reregistered + if service.has_checks() or not(existing or existing == service): + + service.register(consul_api) + # check that it registered correctly + registered = get_service_by_id(consul_api, service.id) + if registered: + result = registered + changed = True + + module.exit_json(changed=changed, + service_id=result.id, + service_name=result.name, + service_port=result.port, + checks=map(lambda x: x.to_dict(), service.checks), + tags=result.tags) + + +def remove_service(module, service_id): + ''' deregister a service from the given agent using its service id ''' + consul_api = get_consul_api(module) + service = get_service_by_id(consul_api, service_id) + if service: + consul_api.agent.service.deregister(service_id) + module.exit_json(changed=True, id=service_id) + + module.exit_json(changed=False, id=service_id) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + + +def get_service_by_id(consul_api, service_id): + ''' iterate the registered services and find one with the given id ''' + for name, service in consul_api.agent.services().iteritems(): + if service['ID'] == service_id: + return ConsulService(loaded=service) + + +def parse_check(module): + + if module.params.get('script') and module.params.get('ttl'): + module.fail_json( + msg='check are either script or ttl driven, supplying both does'\ + ' not make sense') + + if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl'): + + return ConsulCheck( + module.params.get('check_id'), + module.params.get('check_name'), + module.params.get('check_node'), + module.params.get('check_host'), + module.params.get('script'), + module.params.get('interval'), + module.params.get('ttl'), + module.params.get('notes') + ) + + +def parse_service(module): + + if module.params.get('service_name') and module.params.get('service_port'): + return ConsulService( + module.params.get('service_id'), + module.params.get('service_name'), + module.params.get('service_port'), + module.params.get('tags'), + ) + elif module.params.get('service_name') and not module.params.get('service_port'): + + module.fail_json( + msg="service_name supplied but no service_port, a port is required"\ + " to configure a service. Did you configure the 'port' "\ + "argument meaning 'service_port'?") + + +class ConsulService(): + + def __init__(self, service_id=None, name=None, port=-1, + tags=None, loaded=None): + self.id = self.name = name + if service_id: + self.id = service_id + self.port = port + self.tags = tags + self.checks = [] + if loaded: + self.id = loaded['ID'] + self.name = loaded['Service'] + self.port = loaded['Port'] + self.tags = loaded['Tags'] + + def register(self, consul_api): + if len(self.checks) > 0: + check = self.checks[0] + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags, + script=check.script, + interval=check.interval, + ttl=check.ttl) + else: + consul_api.agent.service.register( + self.name, + service_id=self.id, + port=self.port, + tags=self.tags) + + def add_check(self, check): + self.checks.append(check) + + def checks(self): + return self.checks + + def has_checks(self): + return len(self.checks) > 0 + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.id == other.id + and self.name == other.name + and self.port == other.port + and self.tags == other.tags) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {'id': self.id, "name": self.name} + if self.port: + data['port'] = self.port + if self.tags and len(self.tags) > 0: + data['tags'] = self.tags + if len(self.checks) > 0: + data['check'] = self.checks[0].to_dict() + return data + + +class ConsulCheck(): + + def __init__(self, check_id, name, node=None, host='localhost', + script=None, interval=None, ttl=None, notes=None): + self.check_id = self.name = name + if check_id: + self.check_id = check_id + self.script = script + self.interval = str(interval) + + if not self.interval.endswith('m') or self.interval.endswith('s'): + self.interval += 'm' + + self.ttl = ttl + self.notes = notes + self.node = node + self.host = host + + if interval and interval <= 0: + raise Error('check interval must be positive') + + if ttl and ttl <= 0: + raise Error('check ttl value must be positive') + + def register(self, consul_api): + consul_api.agent.check.register(self.name, check_id=self.check_id, + script=self.script, + interval=self.interval, + ttl=self.ttl, notes=self.notes) + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.check_id == other.check_id + and self.name == other.name + and self.script == script + and self.interval == interval) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + data = {} + self._add(data, 'id', attr='check_id') + self._add(data, 'name', attr='check_name') + self._add(data, 'script') + self._add(data, 'node') + self._add(data, 'notes') + self._add(data, 'host') + self._add(data, 'interval') + self._add(data, 'ttl') + return data + + def _add(self, data, key, attr=None): + try: + if attr == None: + attr = key + data[key] = getattr(self, attr) + except: + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + check_id=dict(required=False), + check_name=dict(required=False), + host=dict(default='localhost'), + interval=dict(required=False, default='1m'), + check_node=dict(required=False), + check_host=dict(required=False), + notes=dict(required=False), + port=dict(default=8500, type='int'), + script=dict(required=False), + service_id=dict(required=False), + service_name=dict(required=False), + service_port=dict(required=False, type='int'), + state=dict(default='present', choices=['present', 'absent']), + tags=dict(required=False, type='list'), + token=dict(required=False), + url=dict(default='http://localhost:8500') + ), + supports_check_mode=False, + ) + try: + register_with_consul(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_acl b/clustering/consul_acl new file mode 100644 index 00000000000..ae3efe5787f --- /dev/null +++ b/clustering/consul_acl @@ -0,0 +1,298 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_acl +short_description: "manipulate consul acl keys and rules" +description: + - allows the addition, modification and deletion of ACL keys and associated + rules in a consul cluster via the agent. +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + mgmt_token: + description: + - a management token is required to manipulate the acl lists + state: + description: + - whether the ACL pair should be present or absent, defaults to present + required: false + choices: ['present', 'absent'] + type: + description: + - the type of token that should be created, either management or + client, defaults to client + choices: ['client', 'management'] + name: + description: + - the name that should be associated with the acl key, this is opaque + to Consul + required: false + token: + description: + - the token key indentifying an ACL rule set. If generated by consul + this will be a UUID. + required: false + rules: + description: + - an list of the rules that should be associated with a given key/token. + required: false +""" + +EXAMPLES = ''' + - name: create an acl token with rules + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + name: 'Foo access' + rules: + - key: 'foo' + policy: read + - key: 'private/foo' + policy: deny + + - name: remove a token + consul_acl: + mgmt_token: 'some_management_acl' + host: 'consul1.mycluster.io' + token: '172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e' + state: absent +''' + +import sys +import urllib2 + +try: + import consul +except ImportError, e: + print "failed=True msg='python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation'" + sys.exit(1) + +try: + import hcl +except ImportError: + print "failed=True msg='pyhcl required for this module."\ + " see https://pypi.python.org/pypi/pyhcl'" + sys.exit(1) + +import epdb + + +def execute(module): + + state = module.params.get('state') + + if state == 'present': + update_acl(module) + else: + remove_acl(module) + + +def update_acl(module): + + rules = module.params.get('rules') + state = module.params.get('state') + token = module.params.get('token') + token_type = module.params.get('token_type') + mgmt = module.params.get('mgmt_token') + name = module.params.get('name') + consul = get_consul_api(module, mgmt) + changed = False + + try: + + if token: + existing_rules = load_rules_for_token(module, consul, token) + supplied_rules = yml_to_rules(module, rules) + print existing_rules + print supplied_rules + changed = not existing_rules == supplied_rules + if changed: + y = supplied_rules.to_hcl() + token = consul.acl.update( + token, + name=name, + type=token_type, + rules=supplied_rules.to_hcl()) + else: + try: + rules = yml_to_rules(module, rules) + if rules.are_rules(): + rules = rules.to_json() + else: + rules = None + + token = consul.acl.create( + name=name, type=token_type, rules=rules) + changed = True + except Exception, e: + module.fail_json( + msg="No token returned, check your managment key and that \ + the host is in the acl datacenter %s" % e) + except Exception, e: + module.fail_json(msg="Could not create/update acl %s" % e) + + module.exit_json(changed=changed, + token=token, + rules=rules, + name=name, + type=token_type) + + +def remove_acl(module): + state = module.params.get('state') + token = module.params.get('token') + mgmt = module.params.get('mgmt_token') + + consul = get_consul_api(module, token=mgmt) + changed = token and consul.acl.info(token) + if changed: + token = consul.acl.destroy(token) + + module.exit_json(changed=changed, token=token) + + +def load_rules_for_token(module, consul_api, token): + try: + rules = Rules() + info = consul_api.acl.info(token) + if info and info['Rules']: + rule_set = to_ascii(info['Rules']) + for rule in hcl.loads(rule_set).values(): + for key, policy in rule.iteritems(): + rules.add_rule(Rule(key, policy['policy'])) + return rules + except Exception, e: + module.fail_json( + msg="Could not load rule list from retrieved rule data %s, %s" % ( + token, e)) + + return json_to_rules(module, loaded) + +def to_ascii(unicode_string): + if isinstance(unicode_string, unicode): + return unicode_string.encode('ascii', 'ignore') + return unicode_string + +def yml_to_rules(module, yml_rules): + rules = Rules() + if yml_rules: + for rule in yml_rules: + if not('key' in rule or 'policy' in rule): + module.fail_json(msg="a rule requires a key and a policy.") + rules.add_rule(Rule(rule['key'], rule['policy'])) + return rules + +template = '''key "%s" { + policy = "%s" +}''' + +class Rules: + + def __init__(self): + self.rules = {} + + def add_rule(self, rule): + self.rules[rule.key] = rule + + def are_rules(self): + return len(self.rules) > 0 + + def to_json(self): + # import epdb; epdb.serve() + rules = {} + for key, rule in self.rules.iteritems(): + rules[key] = {'policy': rule.policy} + return json.dumps({'keys': rules}) + + def to_hcl(self): + + rules = "" + for key, rule in self.rules.iteritems(): + rules += template % (key, rule.policy) + + return to_ascii(rules) + + def __eq__(self, other): + if not (other or isinstance(other, self.__class__) + or len(other.rules) == len(self.rules)): + return False + + for name, other_rule in other.rules.iteritems(): + if not name in self.rules: + return False + rule = self.rules[name] + + if not (rule and rule == other_rule): + return False + return True + + def __str__(self): + return self.to_hcl() + +class Rule: + + def __init__(self, key, policy): + self.key = key + self.policy = policy + + def __eq__(self, other): + return (isinstance(other, self.__class__) + and self.key == other.key + and self.policy == other.policy) + def __hash__(self): + return hash(self.key) ^ hash(self.policy) + + def __str__(self): + return '%s %s' % (self.key, self.policy) +def get_consul_api(module, token=None): + if not token: + token = token = module.params.get('token') + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=token) + + +def main(): + argument_spec = dict( + mgmt_token=dict(required=True), + host=dict(default='localhost'), + name=dict(required=False), + port=dict(default=8500, type='int'), + rules=dict(default=None, required=False, type='list'), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False), + token_type=dict( + required=False, choices=['client', 'management'], default='client') + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + try: + execute(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_kv b/clustering/consul_kv new file mode 100644 index 00000000000..6a2b77ea7c6 --- /dev/null +++ b/clustering/consul_kv @@ -0,0 +1,238 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_kv +short_description: "manipulate entries in the key/value store of a consul + cluster. See http://www.consul.io/docs/agent/http.html#kv for more details." +description: + - allows the addition, modification and deletion of key/value entries in a + consul cluster via the agent. The entire contents of the record, including + the indices, flags and session are returned as 'value'. If the key + represents a prefix then Note that when a value is removed, the existing + value if any is returned as part of the results. +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - the action to take with the supplied key and value. If the state is + 'present', the key contents will be set to the value supplied, + 'changed' will be set to true only if the value was different to the + current contents. The state 'absent' will remove the key/value pair, + again 'changed' will be set to true only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the + lock associated with a key/value pair with the states 'acquire' or + 'release' respectively. a valid session must be supplied to make the + attempt changed will be true if the attempt is successful, false + otherwise. + required: true + choices: ['present', 'absent', 'acquire', 'release'] + key: + description: + - the key at which the value should be stored. + required: true + value: + description: + - the value should be associated with the given key, required if state + is present + required: true + recurse: + description: + - if the key represents a prefix, each entry with the prefix can be + retrieved by setting this to true. + required: true + session: + description: + - the session that should be used to acquire or release a lock + associated with a key/value pair + token: + description: + - the token key indentifying an ACL rule set that controls access to + the key value pair + required: false + url: + description: + - location of the consul agent with which access the keay/value store, + defaults to http://localhost:8500 + required: false + cas: + description: + - used when acquiring a lock with a session. If the cas is 0, then + Consul will only put the key if it does not already exist. If the + cas value is non-zero, then the key is only set if the index matches + the ModifyIndex of that key. + flags: + description: + - opaque integer value that can be passed when setting a value. +""" + + +EXAMPLES = ''' + + - name: add or update the value associated with a key in the key/value store + consul_kv: + key: somekey + value: somevalue + + - name: remove a key from the store + consul_kv: + key: somekey + state: absent + + - name: add a node to an arbitrary group via consul inventory (see consul.ini) + consul_kv: + key: ansible/groups/dc1/somenode + value: 'top_secret' +''' + +import sys +import urllib2 + +try: + import json +except ImportError: + import simplejson as json + +try: + import consul +except ImportError, e: + print """failed=True msg='python-consul required for this module. \ + see http://python-consul.readthedocs.org/en/latest/#installation'""" + sys.exit(1) + + +def execute(module): + + state = module.params.get('state') + + if state == 'acquire' or state == 'release': + lock(module, state) + if state == 'present': + add_value(module) + else: + remove_value(module) + + +def lock(module, state): + + session = module.params.get('session') + key = module.params.get('key') + value = module.params.get('value') + + if not session: + module.fail( + msg='%s of lock for %s requested but no session supplied' % + (state, key)) + + if state == 'acquire': + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + successful = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) + + module.exit_json(changed=successful, + index=index, + key=key) + + +def add_value(module): + + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get(key) + + changed = not existing or (existing and existing['Value'] != value) + if changed and not module.check_mode: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + flags=module.params.get('flags')) + + if module.params.get('retrieve'): + index, stored = consul_api.kv.get(key) + + module.exit_json(changed=changed, + index=index, + key=key, + data=stored) + + +def remove_value(module): + ''' remove the value associated with the given key. if the recurse parameter + is set then any key prefixed with the given key will be removed. ''' + consul_api = get_consul_api(module) + + key = module.params.get('key') + value = module.params.get('value') + + index, existing = consul_api.kv.get( + key, recurse=module.params.get('recurse')) + + changed = existing != None + if changed and not module.check_mode: + consul_api.kv.delete(key, module.params.get('recurse')) + + module.exit_json(changed=changed, + index=index, + key=key, + data=existing) + + +def get_consul_api(module, token=None): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port'), + token=module.params.get('token')) + + +def main(): + + argument_spec = dict( + cas=dict(required=False), + flags=dict(required=False), + host=dict(default='localhost'), + key=dict(required=True), + port=dict(default=8500, type='int'), + recurse=dict(required=False, type='bool'), + retrieve=dict(required=False, default=True), + state=dict(default='present', choices=['present', 'absent']), + token=dict(required=False, default='anonymous'), + value=dict(required=False) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + try: + execute(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/clustering/consul_session b/clustering/consul_session new file mode 100644 index 00000000000..f11c5447e57 --- /dev/null +++ b/clustering/consul_session @@ -0,0 +1,213 @@ +#!/usr/bin/python +# +# (c) 2015, Steve Gargan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: consul_session +short_description: "manipulate consul sessions" +description: + - allows the addition, modification and deletion of sessions in a consul + cluster. These sessions can then be used in conjunction with key value pairs + to implement distributed locks. In depth documentation for working with + sessions can be found here http://www.consul.io/docs/internals/sessions.html +version_added: "1.9" +author: Steve Gargan (steve.gargan@gmail.com) +options: + state: + description: + - whether the session should be present i.e. created if it doesn't + exist, or absent, removed if present. If created, the ID for the + session is returned in the output. If absent, the name or ID is + required to remove the session. Info for a single session, all the + sessions for a node or all available sessions can be retrieved by + specifying info, node or list for the state; for node or info, the + node name or session id is required as parameter. + required: false + choices: ['present', 'absent', 'info', 'node', 'list'] + name: + description: + - the name that should be associated with the session. This is opaque + to Consul and not required. + required: false + delay: + description: + - the optional lock delay that can be attached to the session when it + is created. Locks for invalidated sessions ar blocked from being + acquired until this delay has expired. + default: 15s + node: + description: + - the name of the node that with which the session will be associated. + by default this is the name of the agent. + datacenter: + description: + - name of the datacenter in which the session exists or should be + created. + checks: + description: + - a list of checks that will be used to verify the session health. If + all the checks fail, the session will be invalidated and any locks + associated with the session will be release and can be acquired once + the associated lock delay has expired. +""" + +EXAMPLES = ''' + +''' + +import sys +import urllib2 + +try: + import consul +except ImportError, e: + print "failed=True msg='python-consul required for this module. see "\ + "http://python-consul.readthedocs.org/en/latest/#installation'" + sys.exit(1) + + +def execute(module): + + state = module.params.get('state') + + if state in ['info', 'list', 'node']: + lookup_sessions(module) + elif state == 'present': + update_session(module) + else: + remove_session(module) + +def lookup_sessions(module): + + datacenter = module.params.get('datacenter') + + state = module.params.get('state') + consul = get_consul_api(module) + try: + if state == 'list': + sessions_list = consul.session.list(dc=datacenter) + #ditch the index, this can be grabbed from the results + if sessions_list and sessions_list[1]: + sessions_list = sessions_list[1] + module.exit_json(changed=True, + sessions=sessions_list) + elif state == 'node': + node = module.params.get('node') + if not node: + module.fail_json( + msg="node name is required to retrieve sessions for node") + sessions = consul.session.node(node, dc=datacenter) + module.exit_json(changed=True, + node=node, + sessions=sessions) + elif state == 'info': + session_id = module.params.get('id') + if not session_id: + module.fail_json( + msg="session_id is required to retrieve indvidual session info") + + session_by_id = consul.session.info(session_id, dc=datacenter) + module.exit_json(changed=True, + session_id=session_id, + sessions=session_by_id) + + except Exception, e: + module.fail_json(msg="Could not retrieve session info %s" % e) + + +def update_session(module): + + name = module.params.get('name') + session_id = module.params.get('id') + delay = module.params.get('delay') + checks = module.params.get('checks') + datacenter = module.params.get('datacenter') + node = module.params.get('node') + + consul = get_consul_api(module) + changed = True + + try: + + session = consul.session.create( + name=name, + node=node, + lock_delay=delay, + dc=datacenter, + checks=checks + ) + module.exit_json(changed=True, + session_id=session, + name=name, + delay=delay, + checks=checks, + node=node) + except Exception, e: + module.fail_json(msg="Could not create/update session %s" % e) + + +def remove_session(module): + session_id = module.params.get('id') + + if not session_id: + module.fail_json(msg="""A session id must be supplied in order to + remove a session.""") + + consul = get_consul_api(module) + changed = False + + try: + session = consul.session.destroy(session_id) + + module.exit_json(changed=True, + session_id=session_id) + except Exception, e: + module.fail_json(msg="Could not remove session with id '%s' %s" % ( + session_id, e)) + + +def get_consul_api(module): + return consul.Consul(host=module.params.get('host'), + port=module.params.get('port')) + +def main(): + argument_spec = dict( + checks=dict(default=None, required=False, type='list'), + delay=dict(required=False,type='int', default=15), + host=dict(default='localhost'), + port=dict(default=8500, type='int'), + id=dict(required=False), + name=dict(required=False), + node=dict(required=False), + state=dict(default='present', + choices=['present', 'absent', 'info', 'node', 'list']) + ) + + module = AnsibleModule(argument_spec, supports_check_mode=True) + + try: + execute(module) + except IOError, e: + error = e.read() + if not error: + error = str(e) + module.fail_json(msg=error) + +# import module snippets +from ansible.module_utils.basic import * +main() From 2eae1820ff80e37c35b8c92c2c170354d6f8412e Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Sun, 25 Jan 2015 15:42:52 +0000 Subject: [PATCH 057/224] Tweak documentation. --- packaging/os/pkg5.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index bbdf3eb006b..beb04441505 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -24,17 +24,16 @@ short_description: Manages packages with the Solaris 11 Image Packaging System description: - IPS packages are the native packages in Solaris 11 and higher. notes: - - The naming of IPS packages is explained at http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html. + - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html). options: name: description: - An FRMI of the package(s) to be installed/removed/updated. - - Multiple packages may be specified, separated by C(,). If C(,) - appears in an FRMI, you can replace it with C(-). + - Multiple packages may be specified, separated by C(,). required: true state: description: - - Whether to install (C(present), C(latest)), or remove (C(absent)) a + - Whether to install (I(present), I(latest)), or remove (I(absent)) a package. required: false default: present From 3524330e5d91663d399e054aa6f38dbda1dbd589 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Sun, 25 Jan 2015 15:44:32 +0000 Subject: [PATCH 058/224] Fix idempotency when removing packages. If the package is already not present, then we have nothing to do. --- packaging/os/pkg5_publisher.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 20b0c0a659c..2993c1107cc 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -140,6 +140,9 @@ def set_publisher(module, params): def unset_publisher(module, publisher): + if not publisher in get_publishers(module): + module.exit_json() + rc, out, err = module.run_command( ["pkg", "unset-publisher", publisher], check_rc=True From e1e861fa35dccc5146c7b1c8a0ba71bd3e38c7d8 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Mon, 26 Jan 2015 21:11:38 +0000 Subject: [PATCH 059/224] Add another example. --- packaging/os/pkg5.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index beb04441505..83c08af0c3b 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -45,6 +45,12 @@ EXAMPLES = ''' # Remove finger daemon: - pkg5: name=service/network/finger state=absent + +# Install several packages at once: +- pkg5: + name: + - /file/gnu-findutils + - /text/gnu-grep ''' From d0382bda002e5862b2a4ce1035b126d0676eba57 Mon Sep 17 00:00:00 2001 From: Peter Tan Date: Tue, 27 Jan 2015 16:22:46 -0800 Subject: [PATCH 060/224] Add gce_img module for utilizing GCE image resources --- cloud/google/gce_img.py | 174 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 cloud/google/gce_img.py diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py new file mode 100644 index 00000000000..460a55b41ad --- /dev/null +++ b/cloud/google/gce_img.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# Copyright 2015 Google Inc. All Rights Reserved. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +"""An Ansible module to utilize GCE image resources.""" + +import sys + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.gce import * + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError + from libcloud.common.google import ResourceNotFoundError + _ = Provider.GCE +except ImportError: + print('failed=True ' + "msg='libcloud with GCE support is required for this module.'") + sys.exit(1) + +DOCUMENTATION = ''' +--- +module: gce_img +short_description: utilize GCE image resources +description: + - This module can create and delete GCE private images from gzipped + compressed tarball containing raw disk data or from existing detached + disks in any zone. U(https://cloud.google.com/compute/docs/images) +options: + name: + description: + - the name of the image to create + required: true + default: null + aliases: [] + source: + description: + - the source disk or the Google Cloud Storage URI to create the image from + required: false + default: null + aliases: [] + state: + description: + - desired state of the image + required: false + default: "present" + choices: ["active", "present", "absent", "deleted"] + aliases: [] + zone: + description: + - the zone of the disk specified by source + required: false + default: "us-central1-a" + aliases: [] + service_account_email: + version_added: "1.6" + description: + - service account email + required: false + default: null + aliases: [] + pem_file: + version_added: "1.6" + description: + - path to the pem file associated with the service account email + required: false + default: null + aliases: [] + project_id: + version_added: "1.6" + description: + - your GCE project ID + required: false + default: null + aliases: [] + +requirements: [ "libcloud" ] +author: Peter Tan +''' + +EXAMPLES = ''' +# Create an image named test-image from the disk 'test-disk' in zone us-central1-a. +- gce_img: + name: test-image + source: test-disk + zone: us-central1-a + state: present + +# Delete an image named test-image in zone us-central1-a. +- gce_img: + name: test-image + zone: us-central1-a + state: deleted +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + source=dict(), + state=dict(default='present'), + zone=dict(default='us-central1-a'), + service_account_email=dict(), + pem_file=dict(), + project_id=dict(), + ) + ) + + gce = gce_connect(module) + + name = module.params.get('name') + source = module.params.get('source') + state = module.params.get('state') + zone = module.params.get('zone') + changed = False + + try: + image = gce.ex_get_image(name) + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + # user wants to create an image. + if state in ['active', 'present'] and not image: + if not source: + module.fail_json(msg='Must supply a source', changed=False) + + if source.startswith('https://storage.googleapis.com'): + # source is a Google Cloud Storage URI + volume = source + else: + try: + volume = gce.ex_get_volume(source, zone) + except ResourceNotFoundError: + module.fail_json(msg='Disk %s not found in zone %s' % (source, zone), + changed=False) + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + try: + image = gce.ex_create_image(name, volume) + changed = True + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + # user wants to delete the image. + if state in ['absent', 'deleted'] and image: + try: + gce.ex_delete_image(image) + changed = True + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + module.exit_json(changed=changed, name=name) + sys.exit(0) + +main() From 5ab2dcf76acf989ff886324fd329c0635a2e7e75 Mon Sep 17 00:00:00 2001 From: Peter Tan Date: Wed, 28 Jan 2015 11:19:06 -0800 Subject: [PATCH 061/224] Address review comments from @sivel and @erjohnso --- cloud/google/gce_img.py | 141 +++++++++++++++++++++++----------------- 1 file changed, 82 insertions(+), 59 deletions(-) diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index 460a55b41ad..22f9237ec90 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -18,26 +18,10 @@ """An Ansible module to utilize GCE image resources.""" -import sys - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - from libcloud.common.google import GoogleBaseError - from libcloud.common.google import ResourceNotFoundError - _ = Provider.GCE -except ImportError: - print('failed=True ' - "msg='libcloud with GCE support is required for this module.'") - sys.exit(1) - DOCUMENTATION = ''' --- module: gce_img +version_added: "1.9" short_description: utilize GCE image resources description: - This module can create and delete GCE private images from gzipped @@ -46,7 +30,7 @@ description: options: name: description: - - the name of the image to create + - the name of the image to create or delete required: true default: null aliases: [] @@ -61,7 +45,7 @@ options: - desired state of the image required: false default: "present" - choices: ["active", "present", "absent", "deleted"] + choices: ["present", "absent"] aliases: [] zone: description: @@ -70,21 +54,18 @@ options: default: "us-central1-a" aliases: [] service_account_email: - version_added: "1.6" description: - service account email required: false default: null aliases: [] pem_file: - version_added: "1.6" description: - path to the pem file associated with the service account email required: false default: null aliases: [] project_id: - version_added: "1.6" description: - your GCE project ID required: false @@ -103,20 +84,81 @@ EXAMPLES = ''' zone: us-central1-a state: present -# Delete an image named test-image in zone us-central1-a. +# Create an image named test-image from a tarball in Google Cloud Storage. - gce_img: name: test-image - zone: us-central1-a - state: deleted + source: https://storage.googleapis.com/bucket/path/to/image.tgz + +# Alternatively use the gs scheme +- gce_img: + name: test-image + source: gs://bucket/path/to/image.tgz + +# Delete an image named test-image. +- gce_img: + name: test-image + state: absent ''' +import sys + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError + from libcloud.common.google import ResourceNotFoundError + _ = Provider.GCE + has_libcloud = True +except ImportError: + has_libcloud = False + + +GCS_URI = 'https://storage.googleapis.com/' + + +def create_image(gce, name, module): + """Create an image with the specified name.""" + source = module.params.get('source') + zone = module.params.get('zone') + + if not source: + module.fail_json(msg='Must supply a source', changed=False) + + if source.startswith(GCS_URI): + # source is a Google Cloud Storage URI + volume = source + elif source.startswith('gs://'): + # libcloud only accepts https URI. + volume = source.replace('gs://', GCS_URI) + else: + try: + volume = gce.ex_get_volume(source, zone) + except ResourceNotFoundError: + module.fail_json(msg='Disk %s not found in zone %s' % (source, zone), + changed=False) + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + try: + gce.ex_create_image(name, volume) + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + + +def delete_image(gce, image, module): + """Delete a specific image resource.""" + try: + gce.ex_delete_image(image) + except GoogleBaseError, e: + module.fail_json(msg=str(e), changed=False) + def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), source=dict(), - state=dict(default='present'), + state=dict(default='present', choices=['present', 'absent']), zone=dict(default='us-central1-a'), service_account_email=dict(), pem_file=dict(), @@ -124,51 +166,32 @@ def main(): ) ) + if not has_libcloud: + module.fail_json(msg='libcloud with GCE support is required.') + gce = gce_connect(module) name = module.params.get('name') - source = module.params.get('source') state = module.params.get('state') - zone = module.params.get('zone') changed = False - try: - image = gce.ex_get_image(name) - except GoogleBaseError, e: - module.fail_json(msg=str(e), changed=False) + image = gce.ex_get_image(name) # user wants to create an image. - if state in ['active', 'present'] and not image: - if not source: - module.fail_json(msg='Must supply a source', changed=False) - - if source.startswith('https://storage.googleapis.com'): - # source is a Google Cloud Storage URI - volume = source - else: - try: - volume = gce.ex_get_volume(source, zone) - except ResourceNotFoundError: - module.fail_json(msg='Disk %s not found in zone %s' % (source, zone), - changed=False) - except GoogleBaseError, e: - module.fail_json(msg=str(e), changed=False) - - try: - image = gce.ex_create_image(name, volume) - changed = True - except GoogleBaseError, e: - module.fail_json(msg=str(e), changed=False) + if state == 'present' and not image: + create_image(gce, name, module) + changed = True # user wants to delete the image. - if state in ['absent', 'deleted'] and image: - try: - gce.ex_delete_image(image) - changed = True - except GoogleBaseError, e: - module.fail_json(msg=str(e), changed=False) + if state == 'absent' and image: + delete_image(gce, image, module) + changed = True module.exit_json(changed=changed, name=name) sys.exit(0) +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.gce import * + main() From 23495a16f4f16982ef30f2994c9e405c9276bc78 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 29 Jan 2015 10:32:09 +0100 Subject: [PATCH 062/224] fixed tab/space mix --- packaging/os/zypper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index c848e86fcc6..11f0380e81a 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -141,7 +141,7 @@ def get_package_state(m, packages): installed_state[package] = False for package in packages: - if package not in installed_state: + if package not in installed_state: print package + ' was not returned by rpm \n' return None From ceaaa36f16a4b603cf854184d135a9535c793ef0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 29 Jan 2015 18:21:21 -0800 Subject: [PATCH 063/224] Reverse the default value of the bzr module force flags --- source_control/bzr.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/source_control/bzr.py b/source_control/bzr.py index 996150a39af..0d25a026f7a 100644 --- a/source_control/bzr.py +++ b/source_control/bzr.py @@ -45,11 +45,12 @@ options: bzr revno or revid. force: required: false - default: "yes" + default: "no" choices: [ 'yes', 'no' ] description: - If C(yes), any modified files in the working - tree will be discarded. + tree will be discarded. Before 1.9 the default + value was "yes". executable: required: false default: null @@ -145,7 +146,7 @@ def main(): dest=dict(required=True), name=dict(required=True, aliases=['parent']), version=dict(default='head'), - force=dict(default='yes', type='bool'), + force=dict(default='no', type='bool'), executable=dict(default=None), ) ) From ec599b2ecc5d89a7c9ad1f2fb1af594dad234b3a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 29 Jan 2015 18:50:08 -0800 Subject: [PATCH 064/224] Make documentation of urpmi module force parameter clearer --- packaging/os/urpmi.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py index a42ee7b87fc..320d17bfc00 100644 --- a/packaging/os/urpmi.py +++ b/packaging/os/urpmi.py @@ -52,7 +52,8 @@ options: choices: [ "yes", "no" ] force: description: - - Corresponds to the C(--force) option for I(urpmi). + - Assume "yes" is the answer to any question urpmi has to ask. + Corresponds to the C(--force) option for I(urpmi). required: false default: yes choices: [ "yes", "no" ] From 9b64cf6797362796080ded0d89b7eb3dc117d061 Mon Sep 17 00:00:00 2001 From: Peter Tan Date: Fri, 30 Jan 2015 00:47:47 -0800 Subject: [PATCH 065/224] Do not check for the image before calling gce.ex_create_image() or gce.ex_delete_image(), instead catching the ResourceExistsError or ResourceNotFoundError respectively. --- cloud/google/gce_img.py | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index 22f9237ec90..3b2351b3752 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -34,6 +34,12 @@ options: required: true default: null aliases: [] + description: + description: + - an optional description + required: false + default: null + aliases: [] source: description: - the source disk or the Google Cloud Storage URI to create the image from @@ -106,6 +112,7 @@ try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.google import GoogleBaseError + from libcloud.common.google import ResourceExistsError from libcloud.common.google import ResourceNotFoundError _ = Provider.GCE has_libcloud = True @@ -120,6 +127,7 @@ def create_image(gce, name, module): """Create an image with the specified name.""" source = module.params.get('source') zone = module.params.get('zone') + desc = module.params.get('description') if not source: module.fail_json(msg='Must supply a source', changed=False) @@ -140,15 +148,21 @@ def create_image(gce, name, module): module.fail_json(msg=str(e), changed=False) try: - gce.ex_create_image(name, volume) + gce.ex_create_image(name, volume, desc, False) + return True + except ResourceExistsError: + return False except GoogleBaseError, e: module.fail_json(msg=str(e), changed=False) -def delete_image(gce, image, module): - """Delete a specific image resource.""" +def delete_image(gce, name, module): + """Delete a specific image resource by name.""" try: - gce.ex_delete_image(image) + gce.ex_delete_image(name) + return True + except ResourceNotFoundError: + return False except GoogleBaseError, e: module.fail_json(msg=str(e), changed=False) @@ -157,6 +171,7 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), + description=dict(), source=dict(), state=dict(default='present', choices=['present', 'absent']), zone=dict(default='us-central1-a'), @@ -175,17 +190,13 @@ def main(): state = module.params.get('state') changed = False - image = gce.ex_get_image(name) - # user wants to create an image. - if state == 'present' and not image: - create_image(gce, name, module) - changed = True + if state == 'present': + changed = create_image(gce, name, module) # user wants to delete the image. - if state == 'absent' and image: - delete_image(gce, image, module) - changed = True + if state == 'absent': + changed = delete_image(gce, name, module) module.exit_json(changed=changed, name=name) sys.exit(0) From 41d50290121d38dd2c8aca596fd4cc93681b51b9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 30 Jan 2015 07:34:46 -0800 Subject: [PATCH 066/224] Move dnf and bower plugins to proper subdirs --- packaging/{ => language}/bower.py | 0 packaging/{ => os}/dnf.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename packaging/{ => language}/bower.py (100%) rename packaging/{ => os}/dnf.py (100%) diff --git a/packaging/bower.py b/packaging/language/bower.py similarity index 100% rename from packaging/bower.py rename to packaging/language/bower.py diff --git a/packaging/dnf.py b/packaging/os/dnf.py similarity index 100% rename from packaging/dnf.py rename to packaging/os/dnf.py From df5e1946aae13f3aed728ce214b25c1e395ab82c Mon Sep 17 00:00:00 2001 From: Dagobert Michelsen Date: Sat, 31 Jan 2015 22:12:40 +0100 Subject: [PATCH 067/224] Correctly report "changed: false" when trying to install a package not in the catalog. This fixes #230 --- packaging/os/pkgutil.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index 78a7db72bf5..fe0e82a5ab7 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -162,10 +162,15 @@ def main(): (rc, out, err) = package_uninstall(module, name) out = out[:75] - if rc is None: + if rc == 0: + result['changed'] = True + else: result['changed'] = False + + if rc is not None and rc != 0: + result['failed'] = True else: - result['changed'] = True + result['failed'] = False if out: result['stdout'] = out From e1008059ecf9ac94902501b5942f5fd07278286b Mon Sep 17 00:00:00 2001 From: Julien Pepy Date: Mon, 18 Aug 2014 09:28:32 +0200 Subject: [PATCH 068/224] Clean options building in Composer module --- packaging/language/composer.py | 38 +++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/packaging/language/composer.py b/packaging/language/composer.py index f788f53dd5c..86863909be8 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -128,30 +128,34 @@ def main(): supports_check_mode=True ) - module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) + options = [] - options = set([]) # Default options - options.add("--no-ansi") - options.add("--no-progress") - options.add("--no-interaction") + options.append('--no-ansi') + options.append('--no-progress') + options.append('--no-interaction') - if module.check_mode: - options.add("--dry-run") - del module.params['CHECKMODE'] + options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])]) - # Get composer command with fallback to default + # Get composer command with fallback to default command = module.params['command'] - del module.params['command']; # Prepare options - for i in module.params: - opt = "--%s" % i.replace("_","-") - p = module.params[i] - if isinstance(p, (bool)) and p: - options.add(opt) - elif isinstance(p, (str)): - options.add("%s=%s" % (opt, p)) + if module.params['prefer_source']: + options.append('--prefer-source') + if module.params['prefer_dist']: + options.append('--prefer-dist') + if module.params['no_dev']: + options.append('--no-dev') + if module.params['no_scripts']: + options.append('--no-scripts') + if module.params['no_plugins']: + options.append('--no-plugins') + if module.params['optimize_autoloader']: + options.append('--optimize-autoloader') + + if module.check_mode: + options.append('--dry-run') rc, out, err = composer_install(module, command, options) From 5a6de937cb053d8366e06c01ec59b37c22d0629c Mon Sep 17 00:00:00 2001 From: Andrew Udvare Date: Tue, 3 Feb 2015 05:16:28 -0800 Subject: [PATCH 069/224] Add support for emerge's --getbinpkg and --usepkgoonly --- packaging/os/portage.py | 43 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index d38062e47e5..ab96cb22e60 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -132,8 +132,22 @@ options: default: null choices: [ "yes", "web" ] + getbinpkg: + description: + - Prefer packages specified at PORTAGE_BINHOST in make.conf + required: false + default: null + choices: [ "yes" ] + + usepkgonly: + description: + - Merge only binaries (no compiling). This sets getbinpkg=yes. + required: false + deafult: null + choices: [ "yes" ] + requirements: [ gentoolkit ] -author: Yap Sok Ann +author: Yap Sok Ann, Andrew Udvare notes: [] ''' @@ -147,6 +161,12 @@ EXAMPLES = ''' # Update package foo to the "best" version - portage: package=foo update=yes +# Install package foo using PORTAGE_BINHOST setup +- portage: package=foo getbinpkg=yes + +# Re-install world from binary packages only and do not allow any compiling +- portage: package=@world usepkgonly=yes + # Sync repositories and update world - portage: package=@world update=yes deep=yes sync=yes @@ -160,6 +180,7 @@ EXAMPLES = ''' import os import pipes +import re def query_package(module, package, action): @@ -244,11 +265,17 @@ def emerge_packages(module, packages): 'onlydeps': '--onlydeps', 'quiet': '--quiet', 'verbose': '--verbose', + 'getbinpkg': '--getbinpkg', + 'usepkgonly': '--usepkgonly', } for flag, arg in emerge_flags.iteritems(): if p[flag]: args.append(arg) + # usepkgonly implies getbinpkg + if p['usepkgonly'] and not p['getbinpkg']: + args.append('--getbinpkg') + cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( @@ -256,9 +283,19 @@ def emerge_packages(module, packages): msg='Packages not installed.', ) + # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite + # this error + if (p['usepkgonly'] or p['getbinpkg']) \ + and 'Permission denied (publickey).' in err: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Please check your PORTAGE_BINHOST configuration in make.conf ' + 'and your SSH authorized_keys file', + ) + changed = True for line in out.splitlines(): - if line.startswith('>>> Emerging (1 of'): + if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line): break else: changed = False @@ -367,6 +404,8 @@ def main(): quiet=dict(default=None, choices=['yes']), verbose=dict(default=None, choices=['yes']), sync=dict(default=None, choices=['yes', 'web']), + getbinpkg=dict(default=None, choices=['yes']), + usepkgonly=dict(default=None, choices=['yes']), ), required_one_of=[['package', 'sync', 'depclean']], mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], From 8ecb0239390891eed4ebda10523037328bc9ba97 Mon Sep 17 00:00:00 2001 From: Matthew Pherigo Date: Mon, 2 Feb 2015 23:10:23 -0600 Subject: [PATCH 070/224] pkgng: add 'batch' parameter Some packages attempt to prompt the user for certain settings during installation. Thus, this parameter sets the environment variable $BATCH to 'yes', which forces package installation scripts to accept default values for these interactive prompts. This should work for all prompts that have a default value and aren't implemented through a custom script (as this variable is built into the ports/package system). FIXME: Package install should fail if it prompts and batch isn't set; currently, the install hangs indefinitely. TODO: Allow user to specify the answers to certain prompts. I (github.com/mwpher) have NOT tested this with any packages besides bsdstats. It's a small improvement, but not a complete answer to all the complexities of package installation. --- packaging/os/pkgng.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index a1f443fd4e1..b0ebe9d547a 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -63,6 +63,13 @@ options: for newer pkgng versions, specify a the name of a repository configured in /usr/local/etc/pkg/repos required: false + batch: + description: + - for packages with interactive prompts during installation, + this makes pkgng automatically accept all default options for + the installation of the package. + default: yes + required: false author: bleader notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. @@ -136,7 +143,7 @@ def remove_packages(module, pkgng_path, packages): return (False, "package(s) already absent") -def install_packages(module, pkgng_path, packages, cached, pkgsite): +def install_packages(module, pkgng_path, packages, cached, pkgsite, batch): install_c = 0 @@ -149,6 +156,11 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): else: pkgsite = "-r %s" % (pkgsite) + if batch == True: + batch_var = 'env BATCH=yes' + else: + batch_var = '' + if not module.check_mode and not cached: if old_pkgng: rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) @@ -163,9 +175,9 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): if not module.check_mode: if old_pkgng: - rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package)) + rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package)) else: - rc, out, err = module.run_command("%s install %s -g -U -y %s" % (pkgng_path, pkgsite, package)) + rc, out, err = module.run_command("%s %s install %s -g -U -y %s" % (batch_var, pkgng_path, pkgsite, package)) if not module.check_mode and not query_package(module, pkgng_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) @@ -264,7 +276,8 @@ def main(): name = dict(aliases=["pkg"], required=True), cached = dict(default=False, type='bool'), annotation = dict(default="", required=False), - pkgsite = dict(default="", required=False)), + pkgsite = dict(default="", required=False), + batch = dict(default=False, required=False, type='bool')), supports_check_mode = True) pkgng_path = module.get_bin_path('pkg', True) @@ -277,7 +290,7 @@ def main(): msgs = [] if p["state"] == "present": - _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], p["batch"]) changed = changed or _changed msgs.append(_msg) From e909beb6537c318f7227901bd5cdc3a4a17b1a08 Mon Sep 17 00:00:00 2001 From: Matthew Pherigo Date: Tue, 3 Feb 2015 15:43:24 -0600 Subject: [PATCH 071/224] Make $BATCH=yes the default, remove module option --- packaging/os/pkgng.py | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index b0ebe9d547a..1aa8e0c737f 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -63,13 +63,6 @@ options: for newer pkgng versions, specify a the name of a repository configured in /usr/local/etc/pkg/repos required: false - batch: - description: - - for packages with interactive prompts during installation, - this makes pkgng automatically accept all default options for - the installation of the package. - default: yes - required: false author: bleader notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. @@ -143,7 +136,7 @@ def remove_packages(module, pkgng_path, packages): return (False, "package(s) already absent") -def install_packages(module, pkgng_path, packages, cached, pkgsite, batch): +def install_packages(module, pkgng_path, packages, cached, pkgsite): install_c = 0 @@ -156,10 +149,8 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite, batch): else: pkgsite = "-r %s" % (pkgsite) - if batch == True: - batch_var = 'env BATCH=yes' - else: - batch_var = '' + batch_var = 'env BATCH=yes' # This environment variable skips mid-install prompts, + # setting them to their default values. if not module.check_mode and not cached: if old_pkgng: @@ -276,8 +267,7 @@ def main(): name = dict(aliases=["pkg"], required=True), cached = dict(default=False, type='bool'), annotation = dict(default="", required=False), - pkgsite = dict(default="", required=False), - batch = dict(default=False, required=False, type='bool')), + pkgsite = dict(default="", required=False)), supports_check_mode = True) pkgng_path = module.get_bin_path('pkg', True) @@ -290,7 +280,7 @@ def main(): msgs = [] if p["state"] == "present": - _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], p["batch"]) + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) changed = changed or _changed msgs.append(_msg) From 9722203ee0d5b0d286e1dfb3e949bee77421373b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 3 Feb 2015 22:29:56 -0500 Subject: [PATCH 072/224] corrected version added --- packaging/language/bower.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index e948f687bde..3fccf51056b 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -24,7 +24,7 @@ module: bower short_description: Manage bower packages with bower description: - Manage bower packages with bower -version_added: 1.7 +version_added: 1.9 author: Michael Warkentin options: name: From dd681321b32b3c8ad0befcadc1dad8253b94b263 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 3 Feb 2015 22:31:46 -0500 Subject: [PATCH 073/224] corrected version added --- packaging/os/dnf.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py index 2ce8cb6ab2a..222fe4fa222 100644 --- a/packaging/os/dnf.py +++ b/packaging/os/dnf.py @@ -34,7 +34,7 @@ except: DOCUMENTATION = ''' --- module: dnf -version_added: historical +version_added: 1.9 short_description: Manages packages with the I(dnf) package manager description: - Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager. @@ -43,21 +43,18 @@ options: description: - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file." required: true - version_added: "1.8" default: null aliases: [] list: description: - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. required: false - version_added: "1.8" default: null state: description: - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. required: false choices: [ "present", "latest", "absent" ] - version_added: "1.8" default: "present" enablerepo: description: @@ -65,17 +62,15 @@ options: These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". required: false - version_added: "1.8" default: null aliases: [] - + disablerepo: description: - I(Repoid) of repositories to disable for the install/update operation. These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". required: false - version_added: "1.8" default: null aliases: [] @@ -83,7 +78,6 @@ options: description: - The remote dnf configuration file to use for the transaction. required: false - version_added: "1.8" default: null aliases: [] @@ -92,7 +86,6 @@ options: - Whether to disable the GPG checking of signatures of packages being installed. Has an effect only if state is I(present) or I(latest). required: false - version_added: "1.8" default: "no" choices: ["yes", "no"] aliases: [] From 2ec916ad383a7640e4ce29706abe1e94287f5695 Mon Sep 17 00:00:00 2001 From: James Barwell Date: Thu, 5 Feb 2015 12:11:16 +0000 Subject: [PATCH 074/224] Fix argument parsing to module constructor - Change to remove kwargs in a97d1016dc77186de8ad05704b6b4c141c005409 did not remove arguments passed in to the constructor. --- network/haproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/haproxy.py b/network/haproxy.py index 51b28d623a0..38757599df5 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -243,7 +243,7 @@ def main(): if not socket: module.fail_json(msg="unable to locate haproxy socket") - ansible_haproxy = HAProxy(module, **module.params) + ansible_haproxy = HAProxy(module) ansible_haproxy.act() # import module snippets From df0130935198b03bc340b4e193b1485a55a4601a Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Mon, 9 Feb 2015 01:00:06 +0100 Subject: [PATCH 075/224] Add new module "patch" --- files/__init__.py | 0 files/patch.py | 150 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 files/__init__.py create mode 100644 files/patch.py diff --git a/files/__init__.py b/files/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/files/patch.py b/files/patch.py new file mode 100644 index 00000000000..e2e5467f36e --- /dev/null +++ b/files/patch.py @@ -0,0 +1,150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Luis Alberto Perez Lazaro +# (c) 2015, Jakub Jirutka +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: patch +author: Luis Alberto Perez Lazaro, Jakub Jirutka +version_added: 1.8 +short_description: Apply patch files using the GNU patch tool. +options: + basedir: + description: + - Path of a base directory in which the patch file will be applied. + May be ommitted when C(dest) option is specified, otherwise required. + required: false + dest: + description: + - Path of the file on the remote machine to be patched. + - The names of the files to be patched are usually taken from the patch + file, but if there's just one file to be patched it can specified with + this option. + required: false + aliases: [ "originalfile" ] + src: + description: + - Path of the patch file on the remote machine as accepted by the GNU + patch tool. + required: true + aliases: [ "patchfile" ] + strip: + description: + - Number that indicates the smallest prefix containing leading slashes + that will be stripped from each file name found in the patch file. + For more information see the strip parameter of the GNU patch tool. + required: false + type: "int" + default: "0" +note: + - This module requires GNU I(patch) utility to be installed on the remote host. +''' + +EXAMPLES = ''' +- name: apply patch to one file + patch: > + src=/tmp/index.html.patch + dest=/var/www/index.html + +- name: apply patch to multiple files under basedir + patch: > + src=/tmp/customize.patch + basedir=/var/www + strip=1 +''' + +import os +from os import path, R_OK, W_OK + + +class PatchError(Exception): + pass + + +def is_already_applied(patch_func, patch_file, basedir, dest_file=None, strip=0): + opts = ['--quiet', '--reverse', '--forward', '--dry-run', + "--strip=%s" % strip, "--directory='%s'" % basedir, + "--input='%s'" % patch_file] + if dest_file: + opts.append("'%s'" % dest_file) + + (rc, _, _) = patch_func(opts) + return rc == 0 + + +def apply_patch(patch_func, patch_file, basedir, dest_file=None, strip=0, dry_run=False): + opts = ['--quiet', '--forward', '--batch', '--reject-file=-', + "--strip=%s" % strip, "--directory='%s'" % basedir, + "--input='%s'" % patch_file] + if dry_run: + opts.append('--dry-run') + if dest_file: + opts.append("'%s'" % dest_file) + + (rc, out, err) = patch_func(opts) + if rc != 0: + msg = out if not err else err + raise PatchError(msg) + + +def main(): + module = AnsibleModule( + argument_spec={ + 'src': {'required': True, 'aliases': ['patchfile']}, + 'dest': {'aliases': ['originalfile']}, + 'basedir': {}, + 'strip': {'default': 0, 'type': 'int'} + }, + required_one_of=[['dest', 'basedir']], + supports_check_mode=True + ) + + # Create type object as namespace for module params + p = type('Params', (), module.params) + + if not os.access(p.src, R_OK): + module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) + + if p.dest and not os.access(p.dest, W_OK): + module.fail_json(msg="dest %s doesn't exist or not writable" % (d.dest)) + + if p.basedir and not path.exists(p.basedir): + module.fail_json(msg="basedir %s doesn't exist" % (p.basedir)) + + if not p.basedir: + p.basedir = path.dirname(p.dest) + + patch_bin = module.get_bin_path('patch') + patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts))) + + changed = False + if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): + try: + apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip, + dry_run=module.check_mode) + changed = True + except PatchError, e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +main() From 9ea179697d4b88daf25634488a8cbaedb80d8bc0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 9 Feb 2015 16:27:03 -0500 Subject: [PATCH 076/224] added version to patch and remote_src to allow for controlling if src is on master or target --- files/patch.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/files/patch.py b/files/patch.py index e2e5467f36e..46fefe86dd2 100644 --- a/files/patch.py +++ b/files/patch.py @@ -23,7 +23,9 @@ DOCUMENTATION = ''' --- module: patch author: Luis Alberto Perez Lazaro, Jakub Jirutka -version_added: 1.8 +version_added: 1.9 +description: + - Apply patch files using the GNU patch tool. short_description: Apply patch files using the GNU patch tool. options: basedir: @@ -41,10 +43,16 @@ options: aliases: [ "originalfile" ] src: description: - - Path of the patch file on the remote machine as accepted by the GNU - patch tool. + - Path of the patch file as accepted by the GNU patch tool. required: true aliases: [ "patchfile" ] + remote_src: + description: + - If False, it will search for src at originating/master machine, if True it will + go to the remote/target machine for the src. Default is False. + choices: [ "True", "False" ] + required: false + default: "False" strip: description: - Number that indicates the smallest prefix containing leading slashes @@ -110,7 +118,8 @@ def main(): 'src': {'required': True, 'aliases': ['patchfile']}, 'dest': {'aliases': ['originalfile']}, 'basedir': {}, - 'strip': {'default': 0, 'type': 'int'} + 'strip': {'default': 0, 'type': 'int'}, + 'remote_src': {'default': False, 'type': 'bool'}, }, required_one_of=[['dest', 'basedir']], supports_check_mode=True From 3f78475cae9bb2d288d1d9150e106070b6fbcd2e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Feb 2015 12:35:43 -0500 Subject: [PATCH 077/224] daemontools module --- system/svc.py | 265 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 265 insertions(+) create mode 100644 system/svc.py diff --git a/system/svc.py b/system/svc.py new file mode 100644 index 00000000000..841d6e78c90 --- /dev/null +++ b/system/svc.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: svc +author: Brian Coca +version_added: +short_description: Manage daemontools services. +description: + - Controls daemontools services on remote hosts using the svc utility. +options: + name: + required: true + description: + - Name of the service to manage. + state: + required: false + choices: [ started, stopped, restarted, reloaded, once ] + description: + - C(Started)/C(stopped) are idempotent actions that will not run + commands unless necessary. C(restarted) will always bounce the + svc (svc -t). C(reloaded) will send a sigusr1 (svc -u). + C(once) will run a normally downed svc once (svc -o), not really + an idempotent operation. + downed: + required: false + choices: [ "yes", "no" ] + default: no + description: + - Should a 'down' file exist or not, if it exists it disables auto startup. + defaults to no. Downed does not imply stopped. + enabled: + required: false + choices: [ "yes", "no" ] + description: + - Wheater the service is enabled or not, if disabled it also implies stopped. + Make note that a service can be enabled and downed (no auto restart). + service_dir: + required: false + default: /service + description: + - directory svscan watches for services + service_src: + required: false + description: + - directory where services are defined, the source of symlinks to service_dir. +''' + +EXAMPLES = ''' +# Example action to start svc dnscache, if not running + - svc: name=dnscache state=started + +# Example action to stop svc dnscache, if running + - svc: name=dnscache state=stopped + +# Example action to restart svc dnscache, in all cases + - svc : name=dnscache state=restarted + +# Example action to reload svc dnscache, in all cases + - svc: name=dnscache state=reloaded + +# Example using alt svc directory location + - svc: name=dnscache state=reloaded service_dir=/var/service +''' + +import platform +import shlex + +def _load_dist_subclass(cls, *args, **kwargs): + ''' + Used for derivative implementations + ''' + subclass = None + + distro = kwargs['module'].params['distro'] + + # get the most specific superclass for this platform + if distro is not None: + for sc in cls.__subclasses__(): + if sc.distro is not None and sc.distro == distro: + subclass = sc + if subclass is None: + subclass = cls + + return super(cls, subclass).__new__(subclass) + +class Svc(object): + """ + Main class that handles daemontools, can be subclassed and overriden in case + we want to use a 'derivative' like encore, s6, etc + """ + + + #def __new__(cls, *args, **kwargs): + # return _load_dist_subclass(cls, args, kwargs) + + + + def __init__(self, module): + self.extra_paths = [ '/command', '/usr/local/bin' ] + self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] + + self.module = module + + self.name = module.params['name'] + self.service_dir = module.params['service_dir'] + self.service_src = module.params['service_src'] + self.enabled = None + self.downed = None + self.full_state = None + self.state = None + self.pid = None + self.duration = None + + self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths) + self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths) + self.svc_full = '/'.join([ self.service_dir, self.name ]) + self.src_full = '/'.join([ self.service_src, self.name ]) + + self.enabled = os.path.lexists(self.svc_full) + if self.enabled: + self.downed = os.path.lexists('%s/down' % self.svc_full) + self.get_status() + else: + self.downed = os.path.lexists('%s/down' % self.src_full) + self.state = 'stopped' + + + def enable(self): + if os.path.exists(self.src_full): + try: + os.symlink(self.src_full, self.svc_full) + except OSError, e: + self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e)) + else: + self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) + + def disable(self): + try: + os.unlink(self.svc_full) + except OSError, e: + self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e)) + self.execute_command([self.svc_cmd,'-dx',self.src_full]) + + src_log = '%s/log' % self.src_full + if os.path.exists(src_log): + self.execute_command([self.svc_cmd,'-dx',src_log]) + + def get_status(self): + (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full]) + + if err is not None and err: + self.full_state = self.state = err + else: + self.full_state = out + + m = re.search('\(pid (\d+)\)', out) + if m: + self.pid = m.group(1) + + m = re.search('(\d+) seconds', out) + if m: + self.duration = m.group(1) + + if re.search(' up ', out): + self.state = 'start' + elif re.search(' down ', out): + self.state = 'stopp' + else: + self.state = 'unknown' + return + + if re.search(' want ', out): + self.state += 'ing' + else: + self.state += 'ed' + + def start(self): + return self.execute_command([self.svc_cmd, '-u', self.svc_full]) + + def stopp(self): + return self.stop() + + def stop(self): + return self.execute_command([self.svc_cmd, '-d', self.svc_full]) + + def once(self): + return self.execute_command([self.svc_cmd, '-o', self.svc_full]) + + def reload(self): + return self.execute_command([self.svc_cmd, '-1', self.svc_full]) + + def execute_command(self, cmd): + try: + (rc, out, err) = self.module.run_command(' '.join(cmd)) + except Exception, e: + self.module.fail_json(msg="failed to execute: %s" % str(e)) + return (rc, out, err) + + + def report(self): + self.get_status() + return {k: self.__dict__[k] for k in self.report_vars} + +# =========================================== +# Main control flow + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(choices=['started', 'stopped', 'restarted', 'reloaded', 'once']), + enabled = dict(required=False, type='bool', choices=BOOLEANS), + downed = dict(required=False, type='bool', choices=BOOLEANS), + dist = dict(required=False, default='daemontools'), + service_dir = dict(required=False, default='/service'), + service_src = dict(required=False, default='/etc/service'), + ), + supports_check_mode=True, + ) + + state = module.params['state'] + enabled = module.params['enabled'] + downed = module.params['downed'] + + svc = Svc(module) + changed = False + orig_state = svc.report() + + if enabled is not None and enabled != svc.enabled: + changed = True + if not module.check_mode: + try: + if enabled: + svc.enable() + else: + svc.disable() + except (OSError, IOError) as e: + module.fail_json(msg="Could change service link: %s" % str(e)) + + if state is not None and state != svc.state: + changed = True + if not module.check_mode: + getattr(svc,state[:-2])() + + if downed is not None and downed != svc.downed: + changed = True + if not module.check_mode: + d_file = "%s/down" % svc.svc_full + try: + if downed: + open(d_file, "a").close() + else: + os.unlink(d_file) + except (OSError, IOError) as e: + module.fail_json(msg="Could change downed file: %s " % (str(e))) + + module.exit_json(changed=changed, svc=svc.report()) + + +# this is magic, not normal python include +from ansible.module_utils.basic import * + +main() From 92c30c7c41ebba9f4f972547f675b36a9c7d95ae Mon Sep 17 00:00:00 2001 From: Kelley Reynolds Date: Wed, 11 Feb 2015 17:16:41 -0500 Subject: [PATCH 078/224] Add listsnapshots property to zfs --- system/zfs.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system/zfs.py b/system/zfs.py index 93248897051..f2a38dddfd4 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -96,6 +96,11 @@ options: - The jailed property. required: False choices: ['on','off'] + listsnapshots: + description: + - the listsnapshots property. + required: False + choices: ['on','off'] logbias: description: - The logbias property. @@ -351,6 +356,7 @@ def main(): # Not supported #'groupquota': {'required': False}, 'jailed': {'required': False, 'choices':['on', 'off']}, + 'listsnapshots': {'required': False, 'choices':['on', 'off']}, 'logbias': {'required': False, 'choices':['latency', 'throughput']}, 'mountpoint': {'required': False}, 'nbmand': {'required': False, 'choices':['on', 'off']}, From c7e3aee0f018c019860cf6f17ffa9be9b12b7155 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Feb 2015 17:21:35 -0500 Subject: [PATCH 079/224] now properly checks that link is suplied before trying to use it should fix #46 --- system/alternatives.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/system/alternatives.py b/system/alternatives.py index 575cb572867..871a494e87d 100755 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -136,10 +136,13 @@ def main(): # install the requested path if necessary # (unsupported on the RedHat version) if path not in all_alternatives and os_family == "Debian": - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], - check_rc=True - ) + if link: + module.run_command( + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], + check_rc=True + ) + else: + module.fail_json("Needed to install the alternative, but unable to do so, as we are missking the link") # select the requested path module.run_command( From dbccdef198d6023210b8aa29b98eb86846a53ec5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Feb 2015 17:32:12 -0500 Subject: [PATCH 080/224] minor fixes to logentries --- monitoring/logentries.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/monitoring/logentries.py b/monitoring/logentries.py index bdec2fc67b6..593664412a1 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -39,7 +39,7 @@ options: description: - name of the log required: false - type: + logtype: description: - type of the log required: false @@ -75,10 +75,10 @@ def follow_log(module, le_path, logs, name=None, logtype=None): module.exit_json(changed=True) cmd = [le_path, 'follow', log] - if name != None: - cmd.append('--name ' + str(name)) - if logtype != None: - cmd.append('--type ' + str(logtype)) + if name: + cmd.extend(['--name',name]) + if logtype: + cmd.append(['--type',logtype]) rc, out, err = module.run_command(' '.join(cmd)) if not query_log_status(module, le_path, log): @@ -121,8 +121,8 @@ def main(): argument_spec = dict( path = dict(required=True), state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), - name = dict(required=False, default=None), - type = dict(required=False, default=None) + name = dict(required=False, default=None, type='str'), + logtype = dict(required=False, default=None, type='str', aliases['type']) ), supports_check_mode=True ) @@ -136,7 +136,7 @@ def main(): logs = filter(None, logs) if p["state"] in ["present", "followed"]: - follow_log(module, le_path, logs, name=p['name'], logtype=p['type']) + follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) elif p["state"] in ["absent", "unfollowed"]: unfollow_log(module, le_path, logs) From 9219797bb58ba7777d02a800fb642aa8519cc0ab Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Feb 2015 10:17:56 -0500 Subject: [PATCH 081/224] Revert "Add listsnapshots property to zfs" --- system/zfs.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/system/zfs.py b/system/zfs.py index f2a38dddfd4..93248897051 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -96,11 +96,6 @@ options: - The jailed property. required: False choices: ['on','off'] - listsnapshots: - description: - - the listsnapshots property. - required: False - choices: ['on','off'] logbias: description: - The logbias property. @@ -356,7 +351,6 @@ def main(): # Not supported #'groupquota': {'required': False}, 'jailed': {'required': False, 'choices':['on', 'off']}, - 'listsnapshots': {'required': False, 'choices':['on', 'off']}, 'logbias': {'required': False, 'choices':['latency', 'throughput']}, 'mountpoint': {'required': False}, 'nbmand': {'required': False, 'choices':['on', 'off']}, From c1e7abd8fda9deb17014734a5de9bb757028d9cd Mon Sep 17 00:00:00 2001 From: Marcus Date: Mon, 13 Oct 2014 15:05:39 +0200 Subject: [PATCH 082/224] Add support for mirror_only parameter to cpanm module --- packaging/language/cpanm.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index 122fff559b6..85b49f4a53d 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -53,6 +53,11 @@ options: - Specifies the base URL for the CPAN mirror to use required: false default: false + mirror_only: + description: + - Use the mirror's index file instead of the CPAN Meta DB + required: false + default: false examples: - code: "cpanm: name=Dancer" description: Install I(Dancer) perl package. @@ -82,7 +87,7 @@ def _is_package_installed(module, name, locallib, cpanm): else: return False -def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): +def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm): # this code should use "%s" like everything else and just return early but not fixing all of it now. # don't copy stuff like this if from_path: @@ -99,6 +104,9 @@ def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): if mirror is not None: cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror) + if mirror_only is True: + cmd = "{cmd} --mirror-only".format(cmd=cmd) + return cmd @@ -109,6 +117,7 @@ def main(): notest=dict(default=False, type='bool'), locallib=dict(default=None, required=False), mirror=dict(default=None, required=False) + mirror_only=dict(default=False, type='bool'), ) module = AnsibleModule( @@ -116,12 +125,13 @@ def main(): required_one_of=[['name', 'from_path']], ) - cpanm = module.get_bin_path('cpanm', True) - name = module.params['name'] - from_path = module.params['from_path'] - notest = module.boolean(module.params.get('notest', False)) - locallib = module.params['locallib'] - mirror = module.params['mirror'] + cpanm = module.get_bin_path('cpanm', True) + name = module.params['name'] + from_path = module.params['from_path'] + notest = module.boolean(module.params.get('notest', False)) + locallib = module.params['locallib'] + mirror = module.params['mirror'] + mirror_only = module.params['mirror_only'] changed = False @@ -129,7 +139,7 @@ def main(): if not installed: out_cpanm = err_cpanm = '' - cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm) + cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm) rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) From e4f57eae4463ad6820229d340a5637c39e04ac22 Mon Sep 17 00:00:00 2001 From: Kai de Haan Date: Thu, 12 Feb 2015 20:46:20 +0100 Subject: [PATCH 083/224] svc: Ensure python 2.5 compatibility * eg. debian lenny --- system/svc.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) mode change 100644 => 100755 system/svc.py diff --git a/system/svc.py b/system/svc.py old mode 100644 new mode 100755 index 841d6e78c90..05ef51afd99 --- a/system/svc.py +++ b/system/svc.py @@ -198,10 +198,12 @@ class Svc(object): self.module.fail_json(msg="failed to execute: %s" % str(e)) return (rc, out, err) - def report(self): self.get_status() - return {k: self.__dict__[k] for k in self.report_vars} + states = {} + for k in self.report_vars: + states[k] = self.__dict__[k] + return states # =========================================== # Main control flow @@ -236,7 +238,7 @@ def main(): svc.enable() else: svc.disable() - except (OSError, IOError) as e: + except (OSError, IOError), e: module.fail_json(msg="Could change service link: %s" % str(e)) if state is not None and state != svc.state: @@ -253,8 +255,8 @@ def main(): open(d_file, "a").close() else: os.unlink(d_file) - except (OSError, IOError) as e: - module.fail_json(msg="Could change downed file: %s " % (str(e))) + except (OSError, IOError), e: + module.fail_json(msg="Could change downed file: %s " % (str(e))) module.exit_json(changed=changed, svc=svc.report()) From 312d59061c7cd019f65ac45c2100ccb5c17040d7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Feb 2015 15:55:06 -0500 Subject: [PATCH 084/224] fixed logentries --- monitoring/logentries.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/logentries.py b/monitoring/logentries.py index 593664412a1..a19885ea702 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -78,7 +78,7 @@ def follow_log(module, le_path, logs, name=None, logtype=None): if name: cmd.extend(['--name',name]) if logtype: - cmd.append(['--type',logtype]) + cmd.extend(['--type',logtype]) rc, out, err = module.run_command(' '.join(cmd)) if not query_log_status(module, le_path, log): @@ -122,7 +122,7 @@ def main(): path = dict(required=True), state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), name = dict(required=False, default=None, type='str'), - logtype = dict(required=False, default=None, type='str', aliases['type']) + logtype = dict(required=False, default=None, type='str', aliases=['type']) ), supports_check_mode=True ) From 8eabcdcbc6a90c8d466d8c95518ef6e7a8559858 Mon Sep 17 00:00:00 2001 From: SkaveRat Date: Thu, 12 Feb 2015 20:08:18 +0100 Subject: [PATCH 085/224] Fix missing restart method Script was missing the restart command --- system/svc.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/svc.py b/system/svc.py index 05ef51afd99..663126163e9 100755 --- a/system/svc.py +++ b/system/svc.py @@ -191,6 +191,9 @@ class Svc(object): def reload(self): return self.execute_command([self.svc_cmd, '-1', self.svc_full]) + def restart(self): + return self.execute_command([self.svc_cmd, '-t', self.svc_full]) + def execute_command(self, cmd): try: (rc, out, err) = self.module.run_command(' '.join(cmd)) From a2138c4eea59bc0c00b7b48f6d2751bbafd10ff0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Feb 2015 17:02:06 -0500 Subject: [PATCH 086/224] fixed missing , --- packaging/language/cpanm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index 85b49f4a53d..ec344b7aa9b 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -116,7 +116,7 @@ def main(): from_path=dict(default=None, required=False), notest=dict(default=False, type='bool'), locallib=dict(default=None, required=False), - mirror=dict(default=None, required=False) + mirror=dict(default=None, required=False), mirror_only=dict(default=False, type='bool'), ) From 2ffd7235e5b5977091993b8ebb5b80464b09fee2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 13 Feb 2015 08:20:12 -0500 Subject: [PATCH 087/224] added version added --- packaging/os/pkg5.py | 1 + packaging/os/pkg5_publisher.py | 1 + 2 files changed, 2 insertions(+) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index 83c08af0c3b..eea860e7be2 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' module: pkg5 author: Peter Oliver short_description: Manages packages with the Solaris 11 Image Packaging System +version_added: 1.9 description: - IPS packages are the native packages in Solaris 11 and higher. notes: diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 2993c1107cc..63c62059203 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -21,6 +21,7 @@ DOCUMENTATION = ''' module: pkg5_publisher author: Peter Oliver short_description: Manages Solaris 11 Image Packaging System publishers +version_added: 1.9 description: - IPS packages are the native packages in Solaris 11 and higher. - This modules will configure which publishers a client will download IPS From edadf4668659389f8af703d7e5ff50b068821956 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Feb 2015 09:59:24 -0500 Subject: [PATCH 088/224] changed to make name primary over user, as examples and keep consistent --- database/misc/mongodb_user.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 9af22116245..3a3cf4dfff1 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -57,11 +57,12 @@ options: description: - The name of the database to add/remove the user from required: true - user: + name: description: - The name of the user to add or remove required: true default: null + aliases: [ 'user' ] password: description: - The password to use for the user @@ -178,7 +179,7 @@ def main(): login_port=dict(default='27017'), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), - user=dict(required=True, aliases=['name']), + name=dict(required=True, aliases=['user']), password=dict(aliases=['pass']), ssl=dict(default=False), roles=dict(default=None, type='list'), @@ -195,7 +196,7 @@ def main(): login_port = module.params['login_port'] replica_set = module.params['replica_set'] db_name = module.params['database'] - user = module.params['user'] + user = module.params['name'] password = module.params['password'] ssl = module.params['ssl'] roles = module.params['roles'] From 4109bfda497108de66341a3825a20a815e95e99e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 17 Feb 2015 13:37:41 -0500 Subject: [PATCH 089/224] removed unused import json which breaks 2.4/simplejson compat --- network/citrix/netscaler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index de3c8fc2421..b2f87aa0d08 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -97,7 +97,6 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api ''' -import json import base64 import socket From 57c723fcd9a84b2b72e2a32aa31e7543701cd601 Mon Sep 17 00:00:00 2001 From: Dagobert Michelsen Date: Wed, 18 Feb 2015 10:33:21 +0100 Subject: [PATCH 090/224] Adjust setting to better make logic clearer --- packaging/os/pkgutil.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index fe0e82a5ab7..0204bbae987 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -162,15 +162,14 @@ def main(): (rc, out, err) = package_uninstall(module, name) out = out[:75] - if rc == 0: + if rc is None: + # pkgutil was not executed because the package was already present/absent + result['changed'] = False + elif rc == 0 result['changed'] = True - else: + else result['changed'] = False - - if rc is not None and rc != 0: result['failed'] = True - else: - result['failed'] = False if out: result['stdout'] = out From a2d440cceab66b0635d008c5b4b15952cb5abab0 Mon Sep 17 00:00:00 2001 From: Jeppe Toustrup Date: Wed, 18 Feb 2015 16:43:56 +0100 Subject: [PATCH 091/224] NPM: Expand the path before accessing it --- packaging/language/npm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/language/npm.py b/packaging/language/npm.py index 1dd2e998492..8407589116a 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -149,6 +149,7 @@ class Npm(object): #If path is specified, cd into that path and run the command. cwd = None if self.path: + self.path = os.path.abspath(os.path.expanduser(self.path)) if not os.path.exists(self.path): os.makedirs(self.path) if not os.path.isdir(self.path): From 4dfbafb339d9456d595efcdfd12219b6df0d8170 Mon Sep 17 00:00:00 2001 From: "chris.schmidt" Date: Wed, 18 Feb 2015 09:22:57 -0700 Subject: [PATCH 092/224] Removed version check from main as it is checked in the download function. Having the check here was breaking "latest version" functionality. --- packaging/maven_artifact.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index bf4ca59f92c..75837050892 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -349,8 +349,7 @@ def main(): prev_state = "absent" if os.path.isdir(dest): - dest = dest + "/" + artifact_id + "-" + version + ".jar" - + dest = dest + "/" + artifact_id + "-" + version + "." + extension if os.path.lexists(dest): prev_state = "present" else: From 8ba219ed78d09c2a1ce8c9d4e519e1ebe799fc2d Mon Sep 17 00:00:00 2001 From: "chris.schmidt" Date: Wed, 18 Feb 2015 09:24:21 -0700 Subject: [PATCH 093/224] Added check for "latest" in version field --- packaging/maven_artifact.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/maven_artifact.py b/packaging/maven_artifact.py index 75837050892..699d97a54c2 100755 --- a/packaging/maven_artifact.py +++ b/packaging/maven_artifact.py @@ -250,7 +250,7 @@ class MavenDownloader: def download(self, artifact, filename=None): filename = artifact.get_filename(filename) - if not artifact.version: + if not artifact.version or artifact.version == "latest": artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact), artifact.classifier, artifact.extension) From a7e1f86b8b9b45b2d54fbb9a623629c157af674b Mon Sep 17 00:00:00 2001 From: Matt Hite Date: Sat, 21 Feb 2015 11:29:35 -0800 Subject: [PATCH 094/224] Another potential fix for issue 109; now catches WebFault --- network/f5/bigip_facts.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 99a1e31de68..2c333e58fba 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -105,7 +105,7 @@ EXAMPLES = ''' try: import bigsuds - from suds import MethodNotFound + from suds import MethodNotFound, WebFault except ImportError: bigsuds_found = False else: @@ -1364,7 +1364,7 @@ def generate_dict(api_obj, fields): for field in fields: try: api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: + except (MethodNotFound, WebFault): pass else: lists.append(api_response) @@ -1380,7 +1380,7 @@ def generate_simple_dict(api_obj, fields): for field in fields: try: api_response = getattr(api_obj, "get_" + field)() - except MethodNotFound: + except (MethodNotFound, WebFault): pass else: result_dict[field] = api_response From bea9d260397189497c5c3e7a5a800a3e881e539a Mon Sep 17 00:00:00 2001 From: Konstantin Date: Wed, 25 Feb 2015 03:36:06 +0300 Subject: [PATCH 095/224] pkgutil.py syntax fix --- packaging/os/pkgutil.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index 0204bbae987..635617b4efe 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -165,9 +165,9 @@ def main(): if rc is None: # pkgutil was not executed because the package was already present/absent result['changed'] = False - elif rc == 0 + elif rc == 0: result['changed'] = True - else + else: result['changed'] = False result['failed'] = True From 8a5297c535eecbc2fd9846404a84fa779496e98d Mon Sep 17 00:00:00 2001 From: Benjamin Kluck Date: Wed, 25 Feb 2015 18:24:18 +0100 Subject: [PATCH 096/224] svc: Add kill command (svc -k) --- system/svc.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/system/svc.py b/system/svc.py index 663126163e9..04749cfc134 100755 --- a/system/svc.py +++ b/system/svc.py @@ -20,7 +20,8 @@ options: description: - C(Started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the - svc (svc -t). C(reloaded) will send a sigusr1 (svc -u). + svc (svc -t) and C(killed) will always bounce the svc (svc -k). + C(reloaded) will send a sigusr1 (svc -u). C(once) will run a normally downed svc once (svc -o), not really an idempotent operation. downed: @@ -54,6 +55,9 @@ EXAMPLES = ''' # Example action to stop svc dnscache, if running - svc: name=dnscache state=stopped +# Example action to kill svc dnscache, in all cases + - svc : name=dnscache state=killed + # Example action to restart svc dnscache, in all cases - svc : name=dnscache state=restarted @@ -194,6 +198,9 @@ class Svc(object): def restart(self): return self.execute_command([self.svc_cmd, '-t', self.svc_full]) + def kill(self): + return self.execute_command([self.svc_cmd, '-k', self.svc_full]) + def execute_command(self, cmd): try: (rc, out, err) = self.module.run_command(' '.join(cmd)) @@ -215,7 +222,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(choices=['started', 'stopped', 'restarted', 'reloaded', 'once']), + state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']), enabled = dict(required=False, type='bool', choices=BOOLEANS), downed = dict(required=False, type='bool', choices=BOOLEANS), dist = dict(required=False, default='daemontools'), From 528e1a644c86578c3376a6c00e9a72def939ac0f Mon Sep 17 00:00:00 2001 From: Maciej Delmanowski Date: Fri, 27 Feb 2015 13:00:49 +0100 Subject: [PATCH 097/224] Rename 'lxc-container' module to 'lxc_container' This is done to keep the convention of naming modules with underscores instead of hypens. --- cloud/lxc/{lxc-container.py => lxc_container.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename cloud/lxc/{lxc-container.py => lxc_container.py} (100%) diff --git a/cloud/lxc/lxc-container.py b/cloud/lxc/lxc_container.py similarity index 100% rename from cloud/lxc/lxc-container.py rename to cloud/lxc/lxc_container.py From bb886a595d50b29d7be87f271450988d4dd1344d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 27 Feb 2015 09:19:51 -0500 Subject: [PATCH 098/224] updated lxc-containter docs to reflect new name --- cloud/lxc/lxc_container.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 6df27867267..1ae67bf23c6 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -21,7 +21,7 @@ DOCUMENTATION = """ --- -module: lxc-container +module: lxc_container short_description: Manage LXC Containers version_added: 1.8.0 description: @@ -174,7 +174,7 @@ notes: EXAMPLES = """ - name: Create a started container - lxc-container: + lxc_container: name: test-container-started container_log: true template: ubuntu @@ -182,7 +182,7 @@ EXAMPLES = """ template_options: --release trusty - name: Create a stopped container - lxc-container: + lxc_container: name: test-container-stopped container_log: true template: ubuntu @@ -190,7 +190,7 @@ EXAMPLES = """ template_options: --release trusty - name: Create a frozen container - lxc-container: + lxc_container: name: test-container-frozen container_log: true template: ubuntu @@ -201,7 +201,7 @@ EXAMPLES = """ # Create filesystem container, configure it, and archive it, and start it. - name: Create filesystem container - lxc-container: + lxc_container: name: test-container-config container_log: true template: ubuntu @@ -217,7 +217,7 @@ EXAMPLES = """ # configuration to it, create an archive of it, and finally leave the container # in a frozen state. The container archive will be compressed using bzip2 - name: Create an lvm container - lxc-container: + lxc_container: name: test-container-lvm container_log: true template: ubuntu @@ -242,7 +242,7 @@ EXAMPLES = """ debug: var=lvm_container_info - name: Get information on a given container. - lxc-container: + lxc_container: name: test-container-config register: config_container_info @@ -250,33 +250,33 @@ EXAMPLES = """ debug: var=config_container_info - name: Run a command in a container and ensure its in a "stopped" state. - lxc-container: + lxc_container: name: test-container-started state: stopped container_command: | echo 'hello world.' | tee /opt/stopped - name: Run a command in a container and ensure its it in a "frozen" state. - lxc-container: + lxc_container: name: test-container-stopped state: frozen container_command: | echo 'hello world.' | tee /opt/frozen - name: Start a container. - lxc-container: + lxc_container: name: test-container-stopped state: started - name: Run a command in a container and then restart it. - lxc-container: + lxc_container: name: test-container-started state: restarted container_command: | echo 'hello world.' | tee /opt/restarted - name: Run a complex command within a "running" container. - lxc-container: + lxc_container: name: test-container-started container_command: | apt-get update @@ -289,14 +289,14 @@ EXAMPLES = """ # Create an archive of an existing container, save the archive to a defined # path and then destroy it. - name: Archive container - lxc-container: + lxc_container: name: test-container-started state: absent archive: true archive_path: /opt/archives - name: Destroy a container. - lxc-container: + lxc_container: name: "{{ item }}" state: absent with_items: From 8c94dd8b474e116feb5cea928282f6a7577d46ee Mon Sep 17 00:00:00 2001 From: Chad Norgan Date: Fri, 27 Feb 2015 11:19:44 -0600 Subject: [PATCH 099/224] Ignore lines that do not have the key in them Some switches return multi-line output, which breaks the split function, the change seeks to only preform the split on a line that has the key. --- network/lldp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/lldp.py b/network/lldp.py index 6b8836852f6..d30fa5d9a60 100755 --- a/network/lldp.py +++ b/network/lldp.py @@ -54,7 +54,7 @@ def gather_lldp(): lldp_entries = output.split("\n") for entry in lldp_entries: - if entry: + if entry.startswith('lldp'): path, value = entry.strip().split("=", 1) path = path.split(".") path_components, final = path[:-1], path[-1] From 7ef867c9cf98ecc08f7310f35d407b5b58a3f519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Luiz=20dos=20Santos?= Date: Sat, 28 Feb 2015 00:07:47 -0300 Subject: [PATCH 100/224] Variable name typo --- files/patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/patch.py b/files/patch.py index 46fefe86dd2..cd4b3130079 100644 --- a/files/patch.py +++ b/files/patch.py @@ -132,7 +132,7 @@ def main(): module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) if p.dest and not os.access(p.dest, W_OK): - module.fail_json(msg="dest %s doesn't exist or not writable" % (d.dest)) + module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest)) if p.basedir and not path.exists(p.basedir): module.fail_json(msg="basedir %s doesn't exist" % (p.basedir)) From 0d7647d904580351a87824287fc88c25bf817a10 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Sat, 28 Feb 2015 15:23:23 +0000 Subject: [PATCH 101/224] remove debug imports from acl module --- clustering/consul_acl | 3 --- 1 file changed, 3 deletions(-) diff --git a/clustering/consul_acl b/clustering/consul_acl index ae3efe5787f..fc997400ae9 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -92,8 +92,6 @@ except ImportError: " see https://pypi.python.org/pypi/pyhcl'" sys.exit(1) -import epdb - def execute(module): @@ -216,7 +214,6 @@ class Rules: return len(self.rules) > 0 def to_json(self): - # import epdb; epdb.serve() rules = {} for key, rule in self.rules.iteritems(): rules[key] = {'policy': rule.policy} From 4b574ba29445a29f31adc4396ad8ff710ccd78a5 Mon Sep 17 00:00:00 2001 From: Daniel Schep Date: Mon, 2 Mar 2015 15:06:36 -0800 Subject: [PATCH 102/224] Import @dschep's module for installing postgres extensions. Reviewed by @bcoca as a PR against the core repo here: https://github.com/ansible/ansible-modules-core/pull/21 --- database/postgresql/__init__.py | 0 database/postgresql/postgresql_ext.py | 188 ++++++++++++++++++++++++++ 2 files changed, 188 insertions(+) create mode 100644 database/postgresql/__init__.py create mode 100644 database/postgresql/postgresql_ext.py diff --git a/database/postgresql/__init__.py b/database/postgresql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/postgresql/postgresql_ext.py b/database/postgresql/postgresql_ext.py new file mode 100644 index 00000000000..8dd32106d9a --- /dev/null +++ b/database/postgresql/postgresql_ext.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: postgresql_ext +short_description: Add or remove PostgreSQL extensions from a database. +description: + - Add or remove PostgreSQL extensions from a database. +version_added: "0.1" +options: + name: + description: + - name of the extension to add or remove + required: true + default: null + db: + description: + - name of the database to add or remove the extension to/from + required: true + default: null + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - Host running the database + required: false + default: localhost + port: + description: + - Database port to connect to. + required: false + default: 5432 + state: + description: + - The database extension state + required: false + default: present + choices: [ "present", "absent" ] +notes: + - The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host. + - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on + the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. +requirements: [ psycopg2 ] +author: Daniel Schep +''' + +EXAMPLES = ''' +# Adds postgis to the database "acme" +- postgresql_ext: name=postgis db=acme +''' + +try: + import psycopg2 + import psycopg2.extras +except ImportError: + postgresqldb_found = False +else: + postgresqldb_found = True + +class NotSupportedError(Exception): + pass + + +# =========================================== +# PostgreSQL module specific support methods. +# + +def ext_exists(cursor, ext): + query = "SELECT * FROM pg_extension WHERE extname=%(ext)s" + cursor.execute(query, {'ext': ext}) + return cursor.rowcount == 1 + +def ext_delete(cursor, ext): + if ext_exists(cursor, ext): + query = "DROP EXTENSION \"%s\"" % ext + cursor.execute(query) + return True + else: + return False + +def ext_create(cursor, ext): + if not ext_exists(cursor, ext): + query = 'CREATE EXTENSION "%s"' % ext + cursor.execute(query) + return True + else: + return False + +# =========================================== +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default="postgres"), + login_password=dict(default=""), + login_host=dict(default=""), + port=dict(default="5432"), + db=dict(required=True), + ext=dict(required=True, aliases=['name']), + state=dict(default="present", choices=["absent", "present"]), + ), + supports_check_mode = True + ) + + if not postgresqldb_found: + module.fail_json(msg="the python psycopg2 module is required") + + db = module.params["db"] + ext = module.params["ext"] + port = module.params["port"] + state = module.params["state"] + changed = False + + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "login_host":"host", + "login_user":"user", + "login_password":"password", + "port":"port" + } + kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + if k in params_map and v != '' ) + try: + db_connection = psycopg2.connect(database=db, **kw) + # Enable autocommit so we can create databases + if psycopg2.__version__ >= '2.4.2': + db_connection.autocommit = True + else: + db_connection.set_isolation_level(psycopg2 + .extensions + .ISOLATION_LEVEL_AUTOCOMMIT) + cursor = db_connection.cursor( + cursor_factory=psycopg2.extras.DictCursor) + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + + try: + if module.check_mode: + if state == "absent": + changed = not db_exists(cursor, ext) + elif state == "present": + changed = db_exists(cursor, ext) + module.exit_json(changed=changed,ext=ext) + + if state == "absent": + changed = ext_delete(cursor, ext) + + elif state == "present": + changed = ext_create(cursor, ext) + except NotSupportedError, e: + module.fail_json(msg=str(e)) + except Exception, e: + module.fail_json(msg="Database query failed: %s" % e) + + module.exit_json(changed=changed, db=db) + +# import module snippets +from ansible.module_utils.basic import * +main() + From c0d8464246ae6ed2835c80ff60b46e19ff5cadbc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Mar 2015 15:08:55 -0800 Subject: [PATCH 103/224] Move other postgres module into the postgresql hierarchy --- database/{ => postgresql}/postgresql_lang.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename database/{ => postgresql}/postgresql_lang.py (100%) diff --git a/database/postgresql_lang.py b/database/postgresql/postgresql_lang.py similarity index 100% rename from database/postgresql_lang.py rename to database/postgresql/postgresql_lang.py From 945da71ce447630dd2ebec81e999f0747bee5745 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Mar 2015 15:11:25 -0800 Subject: [PATCH 104/224] Fix postgresql_ext documentation --- database/postgresql/postgresql_ext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_ext.py b/database/postgresql/postgresql_ext.py index 8dd32106d9a..d70107a4cf9 100644 --- a/database/postgresql/postgresql_ext.py +++ b/database/postgresql/postgresql_ext.py @@ -54,7 +54,7 @@ options: - Database port to connect to. required: false default: 5432 - state: + state: description: - The database extension state required: false From a8584ade957004ad43a5c18e172f5a843569a739 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Tue, 3 Mar 2015 14:18:56 +0000 Subject: [PATCH 105/224] fix logic that tests for change in an existing registered service --- clustering/consul | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/consul b/clustering/consul index fa1e06c3678..8aa2ce1fe4c 100644 --- a/clustering/consul +++ b/clustering/consul @@ -224,7 +224,7 @@ def add_service(module, service): # there is no way to retreive the details of checks so if a check is present # in the service it must be reregistered - if service.has_checks() or not(existing or existing == service): + if service.has_checks() or not existing or not existing == service: service.register(consul_api) # check that it registered correctly From 0c6d426c40932ec55a70ee96ec24f5131f46e2af Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Tue, 3 Mar 2015 20:03:46 +0000 Subject: [PATCH 106/224] require a valid duration suffix for interval and ttl values --- clustering/consul | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/clustering/consul b/clustering/consul index 8aa2ce1fe4c..24df908c45c 100644 --- a/clustering/consul +++ b/clustering/consul @@ -375,21 +375,21 @@ class ConsulCheck(): if check_id: self.check_id = check_id self.script = script - self.interval = str(interval) - - if not self.interval.endswith('m') or self.interval.endswith('s'): - self.interval += 'm' - - self.ttl = ttl + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) self.notes = notes self.node = node self.host = host - if interval and interval <= 0: - raise Error('check interval must be positive') + - if ttl and ttl <= 0: - raise Error('check ttl value must be positive') + def validate_duration(self, name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration def register(self, consul_api): consul_api.agent.check.register(self.name, check_id=self.check_id, @@ -434,7 +434,8 @@ def main(): check_id=dict(required=False), check_name=dict(required=False), host=dict(default='localhost'), - interval=dict(required=False, default='1m'), + interval=dict(required=False, type='str'), + ttl=dict(required=False, type='str'), check_node=dict(required=False), check_host=dict(required=False), notes=dict(required=False), From f3c008ed5de70af534735ef7839464ee2384408a Mon Sep 17 00:00:00 2001 From: Simon JAILLET Date: Tue, 3 Mar 2015 20:47:13 +0100 Subject: [PATCH 107/224] composer: Fix `changed` status Fix `changed` status that always returns False with composer. This [previous PR](https://github.com/ansible/ansible-modules-extras/pull/61) had fixed the issue but because of a [Composer recent change](https://github.com/composer/composer/commit/cb336a5416595efa321c024735e6452c9c7df106) stderr is now used for reporting information meant for humans while stdout is more for the output of the command. This PR would definilty solve this issue. --- packaging/language/composer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/language/composer.py b/packaging/language/composer.py index f788f53dd5c..23d9dc5031e 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -159,7 +159,8 @@ def main(): output = parse_out(err) module.fail_json(msg=output) else: - output = parse_out(out) + # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages + output = parse_out(out + err) module.exit_json(changed=has_changed(output), msg=output) # import module snippets From 57be6d7ac4a90222c35d2b818b178d024e010c18 Mon Sep 17 00:00:00 2001 From: Ben Copeland Date: Thu, 5 Mar 2015 13:27:49 +0000 Subject: [PATCH 108/224] Catch the error for non-ssl SMTP --- notification/mail.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index ccf53029741..aa3345b4f98 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -189,9 +189,13 @@ def main(): body = subject try: - smtp = smtplib.SMTP_SSL(host, port=int(port)) - except (smtplib.SMTPException, ssl.SSLError): - smtp = smtplib.SMTP(host, port=int(port)) + try: + smtp = smtplib.SMTP_SSL(host, port=int(port)) + except (smtplib.SMTPException, ssl.SSLError): + smtp = smtplib.SMTP(host, port=int(port)) + except Exception, e: + module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e)) + smtp.ehlo() if username and password: if smtp.has_extn('STARTTLS'): From a5e0fc331dc113497b02b1f6c7e509fbe6e5b734 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Mar 2015 07:58:44 -0800 Subject: [PATCH 109/224] The old slack webhook API still works. It's just deprecated by slack. Analyze the given token and use the old webhook format if the token is old style and use the new format if the token is new style. Make domain optional with new-style tokens. Fixes #157 --- notification/slack.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/notification/slack.py b/notification/slack.py index 5577228978b..1ae748247f9 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -28,12 +28,20 @@ author: Ramon de la Fuente options: domain: description: - - Slack (sub)domain for your environment without protocol. - (i.e. C(future500.slack.com)) - required: true + - Slack (sub)domain for your environment without protocol. (i.e. + C(future500.slack.com)) In 1.8 and beyond, this is deprecated and may + be ignored. See token documentation for information. + required: false token: description: - - Slack integration token + - Slack integration token. This authenticates you to the slack service. + Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In + 1.8 and above, ansible adapts to the new slack API where tokens look + like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens + are in the new format then slack will ignore any value of domain. If + the token is in the old format the domain is required. Ansible has no + control of when slack will get rid of the old API. When slack does + that the old format will stop working. required: true msg: description: @@ -105,6 +113,7 @@ EXAMPLES = """ """ +OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): @@ -127,7 +136,13 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj return payload def do_notify_slack(module, domain, token, payload): - slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token) + if token.count('/') >= 2: + # New style token + slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (token) + else: + if not domain: + module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook") + slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) response, info = fetch_url(module, slack_incoming_webhook, data=payload) if info['status'] != 200: @@ -137,7 +152,7 @@ def do_notify_slack(module, domain, token, payload): def main(): module = AnsibleModule( argument_spec = dict( - domain = dict(type='str', required=True), + domain = dict(type='str', required=False, default=None), token = dict(type='str', required=True), msg = dict(type='str', required=True), channel = dict(type='str', default=None), From d821a39131aedf39fa00547b672745b74002b16d Mon Sep 17 00:00:00 2001 From: Matthew Vernon Date: Tue, 30 Sep 2014 14:08:26 +0100 Subject: [PATCH 110/224] known_hosts: manage ssh known_hosts files with ansible. The known_hosts module lets you add or remove a host from the known_hosts file. This is useful if you're going to want to use the git module over ssh, for example. If you have a very large number of host keys to manage, you will find the template module more useful. This was pull request 7840 from the old ansible repo, which was accepted-in-principle but not yet merged. The mailing list thread reading it is: https://groups.google.com/forum/#!topic/ansible-devel/_e7H_VT6UJE/discussion --- system/known_hosts.py | 234 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 234 insertions(+) create mode 100644 system/known_hosts.py diff --git a/system/known_hosts.py b/system/known_hosts.py new file mode 100644 index 00000000000..a7e19d2e5e2 --- /dev/null +++ b/system/known_hosts.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python + +""" +Ansible module to manage the ssh known_hosts file. +Copyright(c) 2014, Matthew Vernon + +This module is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This module is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this module. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: known_hosts +short_description: Add or remove a host from the C(known_hosts) file +description: + - The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file. + This is useful if you're going to want to use the M(git) module over ssh, for example. + If you have a very large number of host keys to manage, you will find the M(template) module more useful. +version_added: "1.6" +options: + host: + description: + - The host to add or remove (must match a host specified in key) + required: true + default: null + key: + description: + - The SSH public host key, as a string (optional if removing the host) + required: true + default: null + path: + description: + - The known_hosts file to edit + required: no + default: "(homedir)+/.ssh/known_hosts" + state: + description: + - I(present) to add the host, I(absent) to remove it. + choices: [ "present", "absent" ] + required: no + default: present +requirements: [ ] +author: Matthew Vernon +''' + +EXAMPLES = ''' +# Example using with_file to set the system known_hosts file +- name: tell the host about our servers it might want to ssh to + known_hosts: path='/etc/ssh/ssh_known_hosts' + host='foo.com.invalid' + key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}" +''' + +# Makes sure public host keys are present or absent in the given known_hosts +# file. +# +# Arguments +# ========= +# host = hostname whose key should be added +# key = line(s) to add to known_hosts file +# path = the known_hosts file to edit (default: ~/.ssh/known_hosts) +# state = absent|present (default: present) + +import os,os.path,tempfile,errno + +def enforce_state(module, params): + """ + Add or remove key. + """ + + host = params["host"] + key = params.get("key",None) + port = params.get("port",None) + #expand the path parameter; otherwise module.add_path_info + #(called by exit_json) unhelpfully says the unexpanded path is absent. + params["path"]=os.path.expanduser(params.get("path")) + path = params.get("path") + state = params.get("state","present") + + #trailing newline in files gets lost, so re-add if necessary + if key is not None and key[-1]!='\n': + key+='\n' + + if key is None and state != "absent": + module.fail_json(msg="No key specified when adding a host") + + sanity_check(module,host,key) + + current,replace=search_for_host_key(module,host,key,path) + + #We will change state if current==True & state!="present" + #or current==False & state=="present" + #i.e (current) XOR (state=="present") + #Alternatively, if replace is true (i.e. key present, and we must change it) + if module.check_mode: + module.exit_json(changed = replace or ((state=="present") != current)) + + #Now do the work. + + #First, remove an extant entry if required + if replace==True or (current==True and state=="absent"): + module.run_command(['ssh-keygen','-R',host,'-f',path], + check_rc=True) + params['changed'] = True + #Next, add a new (or replacing) entry + if replace==True or (current==False and state=="present"): + try: + inf=open(path,"r") + except IOError, e: + if e.errno == errno.ENOENT: + inf=None + else: + module.fail_json(msg="Failed to read %s: %s" % \ + (path,str(e))) + try: + outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path), + delete=False) + if inf is not None: + for line in inf: + outf.write(line) + inf.close() + outf.write(key) + outf.close() + module.atomic_move(outf.name,path) + except IOError,e: + module.fail_json(msg="Failed to write to file %s: %s" % \ + (path,str(e))) + params['changed'] = True + + return params + +def sanity_check(module,host,key): + '''Check supplied key is sensible + + host and key are parameters provided by the user; If the host + provided is inconsistent with the key supplied, then this function + quits, providing an error to the user. + ''' + #If no key supplied, we're doing a removal, and have nothing to check here. + if key is None: + return + #Rather than parsing the key ourselves, get ssh-keygen to do it + #(this is essential for hashed keys, but otherwise useful, as the + #key question is whether ssh-keygen thinks the key matches the host). + + #The approach is to write the key to a temporary file, + #and then attempt to look up the specified host in that file. + try: + outf=tempfile.NamedTemporaryFile(delete=False) + outf.write(key) + outf.close() + except IOError,e: + module.fail_json(msg="Failed to write to temporary file %s: %s" % \ + (outf.name,str(e))) + rc,stdout,stderr=module.run_command(['ssh-keygen','-F',host, + '-f',outf.name], + check_rc=True) + os.remove(outf.name) + if stdout=='': #host not found + module.fail_json(msg="Host parameter does not match hashed host field in supplied key") + +def search_for_host_key(module,host,key,path): + '''search_for_host_key(module,host,key,path) -> (current,replace) + + Looks up host in the known_hosts file path; if it's there, looks to see + if one of those entries matches key. Returns: + current (Boolean): is host found in path? + replace (Boolean): is the key in path different to that supplied by user? + if current=False, then replace is always False. + ''' + replace=False + if os.path.exists(path)==False: + return False, False + rc,stdout,stderr=module.run_command(['ssh-keygen','-F',host,'-f',path], + check_rc=True) + if stdout=='': #host not found + return False, False + +#If user supplied no key, we don't want to try and replace anything with it + if key is None: + return True, False + + lines=stdout.split('\n') + k=key.strip() #trim trailing newline + #ssh-keygen returns only the host we ask about in the host field, + #even if the key entry has multiple hosts. Emulate this behaviour here, + #otherwise we get false negatives. + #Only necessary for unhashed entries. + if k[0] !='|': + k=k.split() + #The optional "marker" field, used for @cert-authority or @revoked + if k[0][0] == '@': + k[1]=host + else: + k[0]=host + k=' '.join(k) + for l in lines: + if l=='': + continue + if l[0]=='#': #comment + continue + if k==l: #found a match + return True, False #current, not-replace + #No match found, return current and replace + return True, True + +def main(): + + module = AnsibleModule( + argument_spec = dict( + host = dict(required=True, type='str'), + key = dict(required=True, type='str'), + path = dict(default="~/.ssh/known_hosts", type='str'), + state = dict(default='present', choices=['absent','present']), + ), + supports_check_mode = True + ) + + results = enforce_state(module,module.params) + module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * +main() From 7a9e6aee90a079321961163f2a7408567fcb45fd Mon Sep 17 00:00:00 2001 From: Matthew Vernon Date: Fri, 6 Mar 2015 18:22:28 +0000 Subject: [PATCH 111/224] Changes from Brian Coca's review of this module These are all the code changes from Brian's review: * change #! line * rename "host" to "name" [keep as alias] * make documentation clearer * imports 1 per line * use get_bin_path to find ssh-keygen * key not actually required when removing host --- system/known_hosts.py | 45 +++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index a7e19d2e5e2..d4a6e9c35e0 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python """ Ansible module to manage the ssh known_hosts file. @@ -28,15 +28,16 @@ description: If you have a very large number of host keys to manage, you will find the M(template) module more useful. version_added: "1.6" options: - host: + name: + aliases: [ 'host' ] description: - The host to add or remove (must match a host specified in key) required: true default: null key: description: - - The SSH public host key, as a string (optional if removing the host) - required: true + - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed) + required: false default: null path: description: @@ -66,12 +67,15 @@ EXAMPLES = ''' # # Arguments # ========= -# host = hostname whose key should be added +# name = hostname whose key should be added (alias: host) # key = line(s) to add to known_hosts file # path = the known_hosts file to edit (default: ~/.ssh/known_hosts) # state = absent|present (default: present) -import os,os.path,tempfile,errno +import os +import os.path +import tempfile +import errno def enforce_state(module, params): """ @@ -83,9 +87,10 @@ def enforce_state(module, params): port = params.get("port",None) #expand the path parameter; otherwise module.add_path_info #(called by exit_json) unhelpfully says the unexpanded path is absent. - params["path"]=os.path.expanduser(params.get("path")) - path = params.get("path") - state = params.get("state","present") + path = os.path.expanduser(params.get("path")) + state = params.get("state") + #Find the ssh-keygen binary + sshkeygen = module.get_bin_path("ssh-keygen",True) #trailing newline in files gets lost, so re-add if necessary if key is not None and key[-1]!='\n': @@ -94,9 +99,9 @@ def enforce_state(module, params): if key is None and state != "absent": module.fail_json(msg="No key specified when adding a host") - sanity_check(module,host,key) + sanity_check(module,host,key,sshkeygen) - current,replace=search_for_host_key(module,host,key,path) + current,replace=search_for_host_key(module,host,key,path,sshkeygen) #We will change state if current==True & state!="present" #or current==False & state=="present" @@ -109,7 +114,7 @@ def enforce_state(module, params): #First, remove an extant entry if required if replace==True or (current==True and state=="absent"): - module.run_command(['ssh-keygen','-R',host,'-f',path], + module.run_command([sshkeygen,'-R',host,'-f',path], check_rc=True) params['changed'] = True #Next, add a new (or replacing) entry @@ -139,12 +144,13 @@ def enforce_state(module, params): return params -def sanity_check(module,host,key): +def sanity_check(module,host,key,sshkeygen): '''Check supplied key is sensible host and key are parameters provided by the user; If the host provided is inconsistent with the key supplied, then this function quits, providing an error to the user. + sshkeygen is the path to ssh-keygen, found earlier with get_bin_path ''' #If no key supplied, we're doing a removal, and have nothing to check here. if key is None: @@ -162,26 +168,27 @@ def sanity_check(module,host,key): except IOError,e: module.fail_json(msg="Failed to write to temporary file %s: %s" % \ (outf.name,str(e))) - rc,stdout,stderr=module.run_command(['ssh-keygen','-F',host, + rc,stdout,stderr=module.run_command([sshkeygen,'-F',host, '-f',outf.name], check_rc=True) os.remove(outf.name) if stdout=='': #host not found module.fail_json(msg="Host parameter does not match hashed host field in supplied key") -def search_for_host_key(module,host,key,path): - '''search_for_host_key(module,host,key,path) -> (current,replace) +def search_for_host_key(module,host,key,path,sshkeygen): + '''search_for_host_key(module,host,key,path,sshkeygen) -> (current,replace) Looks up host in the known_hosts file path; if it's there, looks to see if one of those entries matches key. Returns: current (Boolean): is host found in path? replace (Boolean): is the key in path different to that supplied by user? if current=False, then replace is always False. + sshkeygen is the path to ssh-keygen, found earlier with get_bin_path ''' replace=False if os.path.exists(path)==False: return False, False - rc,stdout,stderr=module.run_command(['ssh-keygen','-F',host,'-f',path], + rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path], check_rc=True) if stdout=='': #host not found return False, False @@ -218,8 +225,8 @@ def main(): module = AnsibleModule( argument_spec = dict( - host = dict(required=True, type='str'), - key = dict(required=True, type='str'), + name = dict(required=True, type='str', aliases=['host']), + key = dict(required=False, type='str'), path = dict(default="~/.ssh/known_hosts", type='str'), state = dict(default='present', choices=['absent','present']), ), From 7c40201c3d3aef1ef20411dd505d499b19953bee Mon Sep 17 00:00:00 2001 From: Richard Gray Date: Sat, 7 Mar 2015 21:24:32 +1300 Subject: [PATCH 112/224] Set default for locale_gen state parameter Fix the state parameter for the locale_gen module to be non-mandatory with a default value of 'present', as indicated by the module documentation. --- system/locale_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/locale_gen.py b/system/locale_gen.py index 70b83be88aa..5d53951cf18 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -146,7 +146,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(choices=['present','absent'], required=True), + state = dict(choices=['present','absent'], default='present'), ), supports_check_mode=True ) From b8b21d37f06c8ae7fc80e22774a7bbaf83b44b9d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 10 Mar 2015 10:35:45 -0400 Subject: [PATCH 113/224] added installation info .. that you don't need to --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 7959fffa7cf..9a0ddb6c898 100644 --- a/README.md +++ b/README.md @@ -19,3 +19,8 @@ License ======= As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. + +Installation +============ + +There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods. From 3425828795324b11f4a60235963d24bf4fd9a037 Mon Sep 17 00:00:00 2001 From: Larry Gilbert Date: Fri, 6 Mar 2015 00:52:20 +0000 Subject: [PATCH 114/224] pkgin: support check mode, make other improvements * Add support for check mode * Use "pkgin search" to guarantee 0 or 1 result * Edit documentation for style, new feature, etc. * General refactoring * Lay some groundwork for future support of "state=latest" --- packaging/os/pkgin.py | 134 +++++++++++++++++++++++++++--------------- 1 file changed, 88 insertions(+), 46 deletions(-) mode change 100755 => 100644 packaging/os/pkgin.py diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py old mode 100755 new mode 100644 index 866c9f76a4c..f4c203e56e0 --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -1,8 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2013, Shaun Zinck -# Written by Shaun Zinck +# Copyright (c) 2013 Shaun Zinck +# Copyright (c) 2015 Lawrence Leonard Gilbert +# +# Written by Shaun Zinck # Based on pacman module written by Afterburn # that was based on apt module written by Matthew Williams # @@ -23,27 +25,32 @@ DOCUMENTATION = ''' --- module: pkgin -short_description: Package manager for SmartOS +short_description: Package manager for SmartOS, NetBSD, et al. description: - - Manages SmartOS packages + - "The standard package manager for SmartOS, but also usable on NetBSD + or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))" version_added: "1.0" +author: Shaun Zinck, Larry Gilbert +notes: + - "Known bug with pkgin < 0.8.0: if a package is removed and another + package depends on it, the other package will be silently removed as + well. New to Ansible 1.9: check-mode support." options: name: description: - - name of package to install/remove + - Name of package to install/remove; + - multiple names may be given, separated by commas required: true state: description: - - state of the package + - Intended state of the package choices: [ 'present', 'absent' ] required: false default: present -author: Shaun Zinck -notes: [] ''' EXAMPLES = ''' -# install package foo" +# install package foo - pkgin: name=foo state=present # remove package foo @@ -60,64 +67,97 @@ import os import sys import pipes -def query_package(module, pkgin_path, name, state="present"): +def query_package(module, pkgin_path, name): + """Search for the package by name. + + Possible return values: + * "present" - installed, no upgrade needed + * "outdated" - installed, but can be upgraded + * False - not installed or not found + """ + + # Use "pkgin search" to find the package. The regular expression will + # only match on the complete name. + rc, out, err = module.run_command("%s search \"^%s$\"" % (pkgin_path, name)) + + # rc will not be 0 unless the search was a success + if rc == 0: + + # Get first line + line = out.split('\n')[0] + + # Break up line at spaces. The first part will be the package with its + # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state + # of the package: + # '' - not installed + # '<' - installed but out of date + # '=' - installed and up to date + # '>' - installed but newer than the repository version + pkgname_with_version, raw_state = out.split(' ')[0:2] - if state == "present": + # Strip version + # (results in sth like 'gcc47-libs') + pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) - rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True) + if name != pkgname_without_version: + return False + # no fall-through - if rc == 0: - # At least one package with a package name that starts with ``name`` - # is installed. For some cases this is not sufficient to determine - # wether the queried package is installed. - # - # E.g. for ``name='gcc47'``, ``gcc47`` not being installed, but - # ``gcc47-libs`` being installed, ``out`` would be: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # - # Multiline output is also possible, for example with the same query - # and bot ``gcc47`` and ``gcc47-libs`` being installed: - # - # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. - # gcc47-4.7.2nb3 The GNU Compiler Collection (GCC) - 4.7 Release Series + # The package was found; now return its state + if raw_state == '<': + return 'outdated' + elif raw_state == '=' or raw_state == '>': + return 'present' + else: + return False - # Loop over lines in ``out`` - for line in out.split('\n'): - # Strip description - # (results in sth. like 'gcc47-libs-4.7.2nb4') - pkgname_with_version = out.split(' ')[0] +def format_action_message(module, action, count): + vars = { "actioned": action, + "count": count } - # Strip version - # (results in sth like 'gcc47-libs') - pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) + if module.check_mode: + message = "would have %(actioned)s %(count)d package" % vars + else: + message = "%(actioned)s %(count)d package" % vars - if name == pkgname_without_version: - return True + if count == 1: + return message + else: + return message + "s" - return False + +def format_pkgin_command(module, pkgin_path, command, package): + vars = { "pkgin": pkgin_path, + "command": command, + "package": package } + + if module.check_mode: + return "%(pkgin)s -n %(command)s %(package)s" % vars + else: + return "%(pkgin)s -y %(command)s %(package)s" % vars def remove_packages(module, pkgin_path, packages): remove_c = 0 + # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove if not query_package(module, pkgin_path, package): continue - rc, out, err = module.run_command("%s -y remove %s" % (pkgin_path, package)) + rc, out, err = module.run_command( + format_pkgin_command(module, pkgin_path, "remove", package)) - if query_package(module, pkgin_path, package): + if not module.check_mode and query_package(module, pkgin_path, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) remove_c += 1 if remove_c > 0: - - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c)) module.exit_json(changed=False, msg="package(s) already absent") @@ -130,15 +170,16 @@ def install_packages(module, pkgin_path, packages): if query_package(module, pkgin_path, package): continue - rc, out, err = module.run_command("%s -y install %s" % (pkgin_path, package)) + rc, out, err = module.run_command( + format_pkgin_command(module, pkgin_path, "install", package)) - if not query_package(module, pkgin_path, package): + if not module.check_mode and not query_package(module, pkgin_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out)) install_c += 1 if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c)) module.exit_json(changed=False, msg="package(s) already present") @@ -148,7 +189,8 @@ def main(): module = AnsibleModule( argument_spec = dict( state = dict(default="present", choices=["present","absent"]), - name = dict(aliases=["pkg"], required=True))) + name = dict(aliases=["pkg"], required=True)), + supports_check_mode = True) pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin']) From c6448660a3cd0a4078f9ab2cd0b6c5c1b83470d3 Mon Sep 17 00:00:00 2001 From: Dmitry Bashkatov Date: Wed, 11 Mar 2015 10:54:13 +0300 Subject: [PATCH 115/224] gluster_volume: fixes, new "force" option --- system/gluster_volume.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 00e2cdeba65..91a717aae6d 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -83,6 +83,11 @@ options: required: false description: - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list) + force: + required: false + description: + - If brick is being created in the root partition, module will fail. + Set force to true to override this behaviour notes: - "Requires cli tools for GlusterFS on servers" - "Will add new bricks, but not remove them" @@ -91,7 +96,7 @@ author: Taneli Leppä EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 brick=/bricks/brick1/g1 rebalance=yes hosts:"{{ play_hosts }}" + gluster_volume: state=present name=test1 brick=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" run_once: true - name: tune @@ -231,7 +236,7 @@ def main(): if myhostname != host: probe(host) - def create_volume(name, stripe, replica, transport, hosts, brick): + def create_volume(name, stripe, replica, transport, hosts, brick, force): args = [ 'volume', 'create' ] args.append(name) if stripe: @@ -244,6 +249,8 @@ def main(): args.append(transport) for host in hosts: args.append(('%s:%s' % (host, brick))) + if force: + args.append('force') run_gluster(args) def start_volume(name): @@ -281,10 +288,11 @@ def main(): transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]), brick=dict(required=False, default=None), start_on_create=dict(required=False, default=True, type='bool'), - rebalance=dict(required=False, default=False, taype='bool'), + rebalance=dict(required=False, default=False, type='bool'), options=dict(required=False, default=None, type='dict'), quota=dict(required=False), directory=dict(required=False, default=None), + force=dict(required=False, default=False, type='bool'), ) ) @@ -300,13 +308,14 @@ def main(): replicas = module.params['replicas'] transport = module.params['transport'] myhostname = module.params['host'] - start_volume = module.boolean(module.params['start_on_create']) + start_on_create = module.boolean(module.params['start_on_create']) rebalance = module.boolean(module.params['rebalance']) + force = module.boolean(module.params['force']) if not myhostname: myhostname = socket.gethostname() - options = module.params['options'] + options = module.params['options'] or {} quota = module.params['quota'] directory = module.params['directory'] @@ -329,11 +338,12 @@ def main(): # create if it doesn't exist if volume_name not in volumes: - create_volume(volume_name, stripes, replicas, transport, cluster, brick_path) + create_volume(volume_name, stripes, replicas, transport, cluster, brick_path, force) + volumes = get_volumes() changed = True if volume_name in volumes: - if volumes[volume_name]['status'].lower() != 'started' and start_volume: + if volumes[volume_name]['status'].lower() != 'started' and start_on_create: start_volume(volume_name) changed = True From 26c2080dcfefda4fbdea6ab8917f734401c19e20 Mon Sep 17 00:00:00 2001 From: Dmitry Bashkatov Date: Wed, 11 Mar 2015 16:40:22 +0300 Subject: [PATCH 116/224] gluster_volume: use force option during add-brick --- system/gluster_volume.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 91a717aae6d..3f735a12e1a 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -262,8 +262,11 @@ def main(): def set_volume_option(name, option, parameter): run_gluster([ 'volume', 'set', name, option, parameter ]) - def add_brick(name, brick): - run_gluster([ 'volume', 'add-brick', name, brick ]) + def add_brick(name, brick, force): + args = [ 'volume', 'add-brick', name, brick ] + if force: + args.append('force') + run_gluster(args) def rebalance(name): run_gluster(['volume', 'rebalance', name, 'start']) @@ -363,7 +366,7 @@ def main(): removed_bricks.append(brick) for brick in new_bricks: - add_brick(volume_name, brick) + add_brick(volume_name, brick, force) changed = True # handle quotas From 19d51a51dbfb2a5c2f4279baf635acb70785e902 Mon Sep 17 00:00:00 2001 From: Dmitry Bashkatov Date: Wed, 11 Mar 2015 17:22:38 +0300 Subject: [PATCH 117/224] gluster_volume: remove redundant code --- system/gluster_volume.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 3f735a12e1a..d51512a1436 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -292,7 +292,7 @@ def main(): brick=dict(required=False, default=None), start_on_create=dict(required=False, default=True, type='bool'), rebalance=dict(required=False, default=False, type='bool'), - options=dict(required=False, default=None, type='dict'), + options=dict(required=False, default={}, type='dict'), quota=dict(required=False), directory=dict(required=False, default=None), force=dict(required=False, default=False, type='bool'), @@ -318,7 +318,7 @@ def main(): if not myhostname: myhostname = socket.gethostname() - options = module.params['options'] or {} + options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] From 8baba98ebe5053e0c1e71881975ce8a1788f171c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Mar 2015 11:06:30 -0700 Subject: [PATCH 118/224] Don't know why but the 1.8.x branch didn't include the commit for crypttab. Update documentation to show it is new in 1.9 as a result of that oversight. --- system/crypttab.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/crypttab.py b/system/crypttab.py index 70230fa53e1..5142a6cf327 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -24,7 +24,7 @@ module: crypttab short_description: Encrypted Linux block devices description: - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). -version_added: "1.8" +version_added: "1.9" options: name: description: From 696bc60caad2ea96c0a70c8091e24b2da060f35c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Mar 2015 10:24:36 -0400 Subject: [PATCH 119/224] fixed doc issue (missing description and corrected version added) --- notification/mail.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index aa3345b4f98..eb61ed32744 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -74,12 +74,13 @@ options: - If SMTP requires username default: null required: false - version_added: "1.6" + version_added: "1.9" password: + description: - If SMTP requires password default: null required: false - version_added: "1.6" + version_added: "1.9" host: description: - The mail server From 34e7d9c938ef8ea0d9f754fe9d4328f50c1c1dae Mon Sep 17 00:00:00 2001 From: David Wittman Date: Fri, 13 Mar 2015 21:09:33 -0500 Subject: [PATCH 120/224] Fix multiple issues with alternatives module - Changes are no longer erroneously reported on RHEL (#12) - Adding new link groups on Debian works again. - This was broken in a previous commit by assuming the OS was RHEL if `update-alternatives --query ` had a return code of 2 - Prefer `--display` over `--query` for determining available alternatives - --display is more distro-agnostic and simplifies the code - Fix missing `msg=` in `fail_json` call when `link` is missing - Document that `link` is required on RHEL-based distros Tested on Ubuntu 12.04+ and CentOS 6/7 --- system/alternatives.py | 105 +++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 62 deletions(-) diff --git a/system/alternatives.py b/system/alternatives.py index 871a494e87d..ff4de59cf11 100755 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -4,6 +4,7 @@ """ Ansible module to manage symbolic link alternatives. (c) 2014, Gabe Mulley +(c) 2015, David Wittman This file is part of Ansible @@ -26,7 +27,7 @@ DOCUMENTATION = ''' module: alternatives short_description: Manages alternative programs for common commands description: - - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. + - Manages symbolic links using the 'update-alternatives' tool - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" options: @@ -41,6 +42,7 @@ options: link: description: - The path to the symbolic link that should point to the real executable. + - This option is required on RHEL-based distributions required: false requirements: [ update-alternatives ] ''' @@ -55,12 +57,14 @@ EXAMPLES = ''' DEFAULT_LINK_PRIORITY = 50 +import re + def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - path = dict(required=True), + path = dict(required=True), link = dict(required=False), ), supports_check_mode=True, @@ -71,78 +75,55 @@ def main(): path = params['path'] link = params['link'] - UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) + UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) current_path = None all_alternatives = [] - os_family = None - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--query', name] + # Run `update-alternatives --display ` to find existing alternatives + (rc, display_output, _) = module.run_command( + [UPDATE_ALTERNATIVES, '--display', name] ) - # Gather the current setting and all alternatives from the query output. - # Query output should look something like this on Debian systems: - - # Name: java - # Link: /usr/bin/java - # Slaves: - # java.1.gz /usr/share/man/man1/java.1.gz - # Status: manual - # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - - # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java - # Priority: 1061 - # Slaves: - # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz - - # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - # Priority: 1071 - # Slaves: - # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz - if rc == 0: - os_family = "Debian" - for line in query_output.splitlines(): - split_line = line.split(':') - if len(split_line) == 2: - key = split_line[0] - value = split_line[1].strip() - if key == 'Value': - current_path = value - elif key == 'Alternative': - all_alternatives.append(value) - elif key == 'Link' and not link: - link = value - elif rc == 2: - os_family = "RedHat" - # This is the version of update-alternatives that is shipped with - # chkconfig on RedHat-based systems. Try again with the right options. - (rc, query_output, query_error) = module.run_command( - [UPDATE_ALTERNATIVES, '--list'] - ) - for line in query_output.splitlines(): - line_name, line_mode, line_path = line.strip().split("\t") - if line_name != name: - continue - current_path = line_path - break + # Alternatives already exist for this link group + # Parse the output to determine the current path of the symlink and + # available alternatives + current_path_regex = re.compile(r'^\s*link currently points to (.*)$', + re.MULTILINE) + alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE) + + current_path = current_path_regex.search(display_output).group(1) + all_alternatives = alternative_regex.findall(display_output) + + if not link: + # Read the current symlink target from `update-alternatives --query` + # in case we need to install the new alternative before setting it. + # + # This is only compatible on Debian-based systems, as the other + # alternatives don't have --query available + rc, query_output, _ = module.run_command( + [UPDATE_ALTERNATIVES, '--query', name] + ) + if rc == 0: + for line in query_output.splitlines(): + if line.startswith('Link:'): + link = line.split()[1] + break if current_path != path: if module.check_mode: module.exit_json(changed=True, current_path=current_path) try: # install the requested path if necessary - # (unsupported on the RedHat version) - if path not in all_alternatives and os_family == "Debian": - if link: - module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], - check_rc=True - ) - else: - module.fail_json("Needed to install the alternative, but unable to do so, as we are missking the link") + if path not in all_alternatives: + if not link: + module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") + + module.run_command( + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], + check_rc=True + ) # select the requested path module.run_command( @@ -151,7 +132,7 @@ def main(): ) module.exit_json(changed=True) - except subprocess.CalledProcessError, cpe: + except subprocess.CalledProcessError as cpe: module.fail_json(msg=str(dir(cpe))) else: module.exit_json(changed=False) From b553f59a54dbf7a0cae58a6d42054cc74d593c55 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Mon, 16 Mar 2015 16:50:53 +0000 Subject: [PATCH 121/224] Properly report exception causes particularly connection exceptions contacting the consul agent --- clustering/consul | 11 ++++++----- clustering/consul_acl | 11 ++++++----- clustering/consul_kv | 12 +++++++----- clustering/consul_session | 11 ++++++----- 4 files changed, 25 insertions(+), 20 deletions(-) diff --git a/clustering/consul b/clustering/consul index 24df908c45c..15a68f068a2 100644 --- a/clustering/consul +++ b/clustering/consul @@ -143,6 +143,7 @@ except ImportError, e: "see http://python-consul.readthedocs.org/en/latest/#installation'" sys.exit(1) +from requests.exceptions import ConnectionError def register_with_consul(module): @@ -453,11 +454,11 @@ def main(): ) try: register_with_consul(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) # import module snippets from ansible.module_utils.basic import * diff --git a/clustering/consul_acl b/clustering/consul_acl index fc997400ae9..cd5466c53b1 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -92,6 +92,7 @@ except ImportError: " see https://pypi.python.org/pypi/pyhcl'" sys.exit(1) +from requests.exceptions import ConnectionError def execute(module): @@ -284,11 +285,11 @@ def main(): try: execute(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) # import module snippets from ansible.module_utils.basic import * diff --git a/clustering/consul_kv b/clustering/consul_kv index 6a2b77ea7c6..8999a43319f 100644 --- a/clustering/consul_kv +++ b/clustering/consul_kv @@ -117,6 +117,7 @@ except ImportError, e: see http://python-consul.readthedocs.org/en/latest/#installation'""" sys.exit(1) +from requests.exceptions import ConnectionError def execute(module): @@ -227,11 +228,12 @@ def main(): try: execute(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) + # import module snippets from ansible.module_utils.basic import * diff --git a/clustering/consul_session b/clustering/consul_session index f11c5447e57..00f4cae7344 100644 --- a/clustering/consul_session +++ b/clustering/consul_session @@ -80,6 +80,7 @@ except ImportError, e: "http://python-consul.readthedocs.org/en/latest/#installation'" sys.exit(1) +from requests.errors import ConnectionError def execute(module): @@ -202,11 +203,11 @@ def main(): try: execute(module) - except IOError, e: - error = e.read() - if not error: - error = str(e) - module.fail_json(msg=error) + except ConnectionError, e: + module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( + module.params.get('host'), module.params.get('port'), str(e))) + except Exception, e: + module.fail_json(msg=str(e)) # import module snippets from ansible.module_utils.basic import * From cb848fcd9ec8364210fc05a5a7addd955b8a2529 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 20:23:05 -0700 Subject: [PATCH 122/224] Make our regex match the homebrew tap upstream regex. Fixes #312 Fixes #297 --- packaging/os/homebrew_tap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index a79ba076a8a..d329227b980 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -52,7 +52,7 @@ homebrew_tap: tap=homebrew/dupes,homebrew/science state=present def a_valid_tap(tap): '''Returns True if the tap is valid.''' - regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') + regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$') return regex.match(tap) From 24cfcd2497f1d68cfffaef6f5afa355018f663c2 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Wed, 18 Mar 2015 23:33:33 -0500 Subject: [PATCH 123/224] Updated lxc_container module to fix option parsing The option parsing object within the module was performing a split on an '=' sign and assuming that there would only ever be one '=' in a user provided option. Sadly, the assumption is incorrect and the list comprehension that is building the options list needs to be set to split on the first occurrence of an '=' sign in a given option string. This commit adds the required change to make it possible for options to contain additional '=' signs and be handled correctly. --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 1ae67bf23c6..c5b290827bf 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -616,7 +616,7 @@ class LxcContainerManagement(object): # TODO(cloudnull) adjust import when issue has been resolved. import ast options_dict = ast.literal_eval(_container_config) - parsed_options = [i.split('=') for i in options_dict] + parsed_options = [i.split('=', 1) for i in options_dict] config_change = False for key, value in parsed_options: From c622c54e9e2d4071a3c8c936d8f86df8b10b7c3f Mon Sep 17 00:00:00 2001 From: HPLogsdon Date: Thu, 19 Mar 2015 17:11:42 -0600 Subject: [PATCH 124/224] Fix typo in hipchat notification failure message. Wish it could be more substantial, but alas, it's just a typo in a string. --- notification/hipchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/hipchat.py b/notification/hipchat.py index 4ff95b32bf6..24fde9ecb35 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -137,7 +137,7 @@ def main(): try: send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: - module.fail_json(msg="unable to sent msg: %s" % e) + module.fail_json(msg="unable to send msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) From b01c2cee66d720cf2f503052d5275bdff06a2f32 Mon Sep 17 00:00:00 2001 From: Ben Copeland Date: Fri, 20 Mar 2015 11:11:43 +0000 Subject: [PATCH 125/224] Added example block for the "SMTP username and password" --- notification/mail.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/notification/mail.py b/notification/mail.py index aa3345b4f98..92565016ad8 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -116,6 +116,16 @@ EXAMPLES = ''' # Example playbook sending mail to root - local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' +# Sending an e-mail using Gmail SMTP servers +- local_action: mail + host='smtp.gmail.com' + port=587 + username=username@gmail.com + password='mysecret' + to="John Smith " + subject='Ansible-report' + msg='System {{ ansible_hostname }} has been successfully provisioned.' + # Send e-mail to a bunch of users, attaching files - local_action: mail host='127.0.0.1' From b68c136010e4a7113f65a7a7bf5c52dc29c35b91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 17:17:16 +0100 Subject: [PATCH 126/224] [patch] Make sure patch command is found on remote system. --- files/patch.py | 2 ++ 1 file changed, 2 insertions(+) mode change 100644 => 100755 files/patch.py diff --git a/files/patch.py b/files/patch.py old mode 100644 new mode 100755 index cd4b3130079..314a1bc37db --- a/files/patch.py +++ b/files/patch.py @@ -141,6 +141,8 @@ def main(): p.basedir = path.dirname(p.dest) patch_bin = module.get_bin_path('patch') + if patch_bin is None: + module.fail_json(msg="patch command not found") patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts))) changed = False From 84eb895a06c8819ed5d8aea06672a69add2b9d37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 17:19:15 +0100 Subject: [PATCH 127/224] [patch] Make sure the absolute patch file is passed to the patch command. According the patch(1) manpage: The --directory option change to the directory dir immediately, before doing anything else. Thus if file is not relative to dir and making file absolute ensure that patch will find it. --- files/patch.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/files/patch.py b/files/patch.py index 314a1bc37db..78bcefe6d31 100755 --- a/files/patch.py +++ b/files/patch.py @@ -145,6 +145,9 @@ def main(): module.fail_json(msg="patch command not found") patch_func = lambda opts: module.run_command("%s %s" % (patch_bin, ' '.join(opts))) + # patch need an absolute file name + p.src = os.path.abspath(p.src) + changed = False if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, strip=p.strip): try: From 08702e44bf5dfb1cee66b0b8cebd6717f9a366fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 17:36:33 +0100 Subject: [PATCH 128/224] [patch] Update documentation for src parameter. --- files/patch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/files/patch.py b/files/patch.py index 78bcefe6d31..2f2894a6508 100755 --- a/files/patch.py +++ b/files/patch.py @@ -43,7 +43,9 @@ options: aliases: [ "originalfile" ] src: description: - - Path of the patch file as accepted by the GNU patch tool. + - Path of the patch file as accepted by the GNU patch tool. If + C(remote_src) is False, the patch source file is looked up from the + module's "files" directory. required: true aliases: [ "patchfile" ] remote_src: From f8d04bec1bbdfb0e61e6d3255b16b5bfe23b42f1 Mon Sep 17 00:00:00 2001 From: "Dustin C. Hatch" Date: Sun, 22 Mar 2015 22:16:37 -0500 Subject: [PATCH 129/224] system/lvol: Suppress prompts from lvcreate Occasionally, `lvcreate` will prompt on stdin for confirmation. In particular, this may happen when the volume is being created close to the location on disk where another volume existed previously. When this happens, Ansible will hang indefinitely with no indication of the problem. To work prevent this problem, the `--yes` command-line argument can be passed to `lvcreate`, which will instruct it not to prompt. Signed-off-by: Dustin C. Hatch --- system/lvol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index d9be9e7dc70..b14fd33c8e4 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -187,7 +187,7 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) + rc, _, err = module.run_command("%s --yes -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: From 513724c0a5ba1aaef30d64cbd1afd713a9f550ff Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Mon, 23 Mar 2015 14:26:11 -0700 Subject: [PATCH 130/224] Add `validate_certs` param to bigip_* modules Ignoring SSL cert verification may be necessary when testing with a server that has a self-signed certificate. See https://github.com/ansible/ansible-modules-extras/pull/288#issuecomment-85196736 --- network/f5/bigip_facts.py | 19 +++++++++++++++++++ network/f5/bigip_monitor_http.py | 21 +++++++++++++++++++++ network/f5/bigip_monitor_tcp.py | 21 +++++++++++++++++++++ network/f5/bigip_node.py | 19 +++++++++++++++++++ network/f5/bigip_pool.py | 19 +++++++++++++++++++ network/f5/bigip_pool_member.py | 19 +++++++++++++++++++ 6 files changed, 118 insertions(+) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 99a1e31de68..d5f63695a61 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -56,6 +56,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 session: description: - BIG-IP session support; may be useful to avoid concurrency @@ -1566,6 +1574,12 @@ def generate_software_list(f5): software_list = software.get_all_software_status() return software_list +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def main(): module = AnsibleModule( @@ -1573,6 +1587,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), session = dict(type='bool', default=False), include = dict(type='list', required=True), filter = dict(type='str', required=False), @@ -1585,6 +1600,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] session = module.params['session'] fact_filter = module.params['filter'] if fact_filter: @@ -1601,6 +1617,9 @@ def main(): if not all(include_test): module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) + if not validate_certs: + disable_ssl_cert_validation() + try: facts = {} diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index 62823f86579..dd20fb04d74 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -51,6 +51,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Monitor state @@ -177,6 +185,14 @@ def bigip_api(bigip, user, password): return api +def disable_ssl_cert_validation(): + + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + + def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -311,6 +327,7 @@ def main(): server = dict(required=True), user = dict(required=True), password = dict(required=True), + validate_certs = dict(default='yes', type='bool'), partition = dict(default='Common'), state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), @@ -331,6 +348,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] partition = module.params['partition'] parent_partition = module.params['parent_partition'] state = module.params['state'] @@ -348,6 +366,9 @@ def main(): # end monitor specific stuff + if not validate_certs: + disable_ssl_cert_validation() + if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 8b89a0c6113..78a51f2529b 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -49,6 +49,14 @@ options: - BIG-IP password required: true default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Monitor state @@ -196,6 +204,14 @@ def bigip_api(bigip, user, password): return api +def disable_ssl_cert_validation(): + + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + + def check_monitor_exists(module, api, monitor, parent): # hack to determine if monitor exists @@ -331,6 +347,7 @@ def main(): server = dict(required=True), user = dict(required=True), password = dict(required=True), + validate_certs = dict(default='yes', type='bool'), partition = dict(default='Common'), state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), @@ -351,6 +368,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] partition = module.params['partition'] parent_partition = module.params['parent_partition'] state = module.params['state'] @@ -372,6 +390,9 @@ def main(): # end monitor specific stuff + if not validate_certs: + disable_ssl_cert_validation() + if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") api = bigip_api(server, user, password) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 68b6a2b52f1..c45a7f12d5c 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -54,6 +54,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Pool member state @@ -154,6 +162,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def node_exists(api, address): # hack to determine if node exists result = False @@ -212,6 +226,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), partition = dict(type='str', default='Common'), name = dict(type='str', required=True), @@ -227,6 +242,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] partition = module.params['partition'] host = module.params['host'] @@ -234,6 +250,9 @@ def main(): address = "/%s/%s" % (partition, name) description = module.params['description'] + if not validate_certs: + disable_ssl_cert_validation() + if state == 'absent' and host is not None: module.fail_json(msg="host parameter invalid when state=absent") diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 48d03b9f1cb..e7ddce6d391 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -54,6 +54,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Pool/pool member state @@ -235,6 +243,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -359,6 +373,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True, aliases=['pool']), partition = dict(type='str', default='Common'), @@ -380,6 +395,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] name = module.params['name'] partition = module.params['partition'] @@ -407,6 +423,9 @@ def main(): address = "/%s/%s" % (partition, host) port = module.params['port'] + if not validate_certs: + disable_ssl_cert_validation() + # sanity check user supplied values if (host and not port) or (port and not host): diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 5aef9f0ae98..6a00864056c 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -56,6 +56,14 @@ options: default: null choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.9.1 state: description: - Pool member state @@ -189,6 +197,12 @@ def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + def pool_exists(api, pool): # hack to determine if pool exists result = False @@ -282,6 +296,7 @@ def main(): server = dict(type='str', required=True), user = dict(type='str', required=True), password = dict(type='str', required=True), + validate_certs = dict(default='yes', type='bool'), state = dict(type='str', default='present', choices=['present', 'absent']), pool = dict(type='str', required=True), partition = dict(type='str', default='Common'), @@ -301,6 +316,7 @@ def main(): server = module.params['server'] user = module.params['user'] password = module.params['password'] + validate_certs = module.params['validate_certs'] state = module.params['state'] partition = module.params['partition'] pool = "/%s/%s" % (partition, module.params['pool']) @@ -312,6 +328,9 @@ def main(): address = "/%s/%s" % (partition, host) port = module.params['port'] + if not validate_certs: + disable_ssl_cert_validation() + # sanity check user supplied values if (host and not port) or (port and not host): From 09dfd42d50b3477cf78aaec05467c640f822a3bd Mon Sep 17 00:00:00 2001 From: Dariusz Owczarek Date: Mon, 29 Dec 2014 16:50:43 +0100 Subject: [PATCH 131/224] new vertica modules --- database/vertica/__init__.py | 0 database/vertica/vertica_configuration.py | 198 +++++++++++ database/vertica/vertica_facts.py | 276 +++++++++++++++ database/vertica/vertica_role.py | 246 ++++++++++++++ database/vertica/vertica_schema.py | 320 ++++++++++++++++++ database/vertica/vertica_user.py | 388 ++++++++++++++++++++++ 6 files changed, 1428 insertions(+) create mode 100644 database/vertica/__init__.py create mode 100644 database/vertica/vertica_configuration.py create mode 100644 database/vertica/vertica_facts.py create mode 100644 database/vertica/vertica_role.py create mode 100644 database/vertica/vertica_schema.py create mode 100644 database/vertica/vertica_user.py diff --git a/database/vertica/__init__.py b/database/vertica/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py new file mode 100644 index 00000000000..6ee5ebe5f7f --- /dev/null +++ b/database/vertica/vertica_configuration.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_configuration +version_added: '1.0' +short_description: Updates Vertica configuration parameters. +description: + Updates Vertica configuration parameters. +options: + name: + description: + Name of the parameter to update. + required: true + default: null + value: + description: + Value of the parameter to be set. + required: true + default: null + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: updating load_balance_policy + vertica_configuration: name=failovertostandbyafter value='8 hours' +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_configuration_facts(cursor, parameter_name=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter_name, parameter_name) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def check(configuration_facts, parameter_name, current_value): + parameter_key = parameter_name.lower() + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + return False + return True + +def present(configuration_facts, cursor, parameter_name, current_value): + parameter_key = parameter_name.lower() + changed = False + if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower(): + cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value)) + changed = True + if changed: + configuration_facts.update(get_configuration_facts(cursor, parameter_name)) + return changed + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + parameter=dict(required=True, aliases=['name']), + value=dict(default=None), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + parameter_name = module.params['parameter'] + current_value = module.params['value'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + configuration_facts = get_configuration_facts(cursor) + if module.check_mode: + changed = not check(configuration_facts, parameter_name, current_value) + else: + try: + changed = present(configuration_facts, cursor, parameter_name, current_value) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py new file mode 100644 index 00000000000..2334cbaa227 --- /dev/null +++ b/database/vertica/vertica_facts.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_facts +version_added: '1.0' +short_description: Gathers Vertica database facts. +description: + Gathers Vertica database facts. +options: + cluster: + description: + Name of the cluster running the schema. + required: false + default: localhost + port: + description: + Database port to connect to. + required: false + default: 5433 + db: + description: + Name of the database running the schema. + required: false + default: null + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +- name: gathering vertica facts + vertica_facts: db=db_name +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee = r.name and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def get_configuration_facts(cursor, parameter=''): + facts = {} + cursor.execute(""" + select c.parameter_name, c.current_value, c.default_value + from configuration_parameters c + where c.node_name = 'ALL' + and (? = '' or c.parameter_name ilike ?) + """, parameter, parameter) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.parameter_name.lower()] = { + 'parameter_name': row.parameter_name, + 'current_value': row.current_value, + 'default_value': row.default_value} + return facts + +def get_node_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select node_name, node_address, export_address, node_state, node_type, + catalog_path + from nodes + """) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.node_address] = { + 'node_name': row.node_name, + 'export_address': row.export_address, + 'node_state': row.node_state, + 'node_type': row.node_type, + 'catalog_path': row.catalog_path} + return facts + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + cluster=dict(default='localhost'), + port=dict(default='5433'), + db=dict(default=None), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + user_facts = get_user_facts(cursor) + role_facts = get_role_facts(cursor) + configuration_facts = get_configuration_facts(cursor) + node_facts = get_node_facts(cursor) + module.exit_json(changed=False, + ansible_facts={'vertica_schemas': schema_facts, + 'vertica_users': user_facts, + 'vertica_roles': role_facts, + 'vertica_configuration': configuration_facts, + 'vertica_nodes': node_facts}) + except NotSupportedError, e: + module.fail_json(msg=str(e)) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py new file mode 100644 index 00000000000..dad6c5c3bc9 --- /dev/null +++ b/database/vertica/vertica_role.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_role +version_added: '1.0' +short_description: Adds or removes Vertica database roles and assigns roles to them. +description: + Adds or removes Vertica database role and, optionally, assign other roles. +options: + name: + description: + Name of the role to add or remove. + required: true + default: null + assigned_roles: + description: + Comma separated list of roles to assign to the role. + [Alias I(assigned_role)] + required: false + default: null + state: + description: + Whether to create C(present), drop C(absent) or lock C(locked) a role. + required: false + choices: ['present', 'absent'] + default: present + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: creating a new vertica role + vertica_role: name=role_name db=db_name state=present + +- name: creating a new vertica role with other role assigned + vertica_role: name=role_name assigned_role=other_role_name state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_role_facts(cursor, role=''): + facts = {} + cursor.execute(""" + select r.name, r.assigned_roles + from roles r + where (? = '' or r.name ilike ?) + """, role, role) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + role_key = row.name.lower() + facts[role_key] = { + 'name': row.name, + 'assigned_roles': []} + if row.assigned_roles: + facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') + return facts + +def update_roles(role_facts, cursor, role, + existing, required): + for assigned_role in set(existing) - set(required): + cursor.execute("revoke {0} from {1}".format(assigned_role, role)) + for assigned_role in set(required) - set(existing): + cursor.execute("grant {0} to {1}".format(assigned_role, role)) + +def check(role_facts, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + return False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + return False + return True + +def present(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key not in role_facts: + cursor.execute("create role {0}".format(role)) + update_roles(role_facts, cursor, role, [], assigned_roles) + role_facts.update(get_role_facts(cursor, role)) + return True + else: + changed = False + if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], assigned_roles) + changed = True + if changed: + role_facts.update(get_role_facts(cursor, role)) + return changed + +def absent(role_facts, cursor, role, assigned_roles): + role_key = role.lower() + if role_key in role_facts: + update_roles(role_facts, cursor, role, + role_facts[role_key]['assigned_roles'], []) + cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name'])) + del role_facts[role_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + role=dict(required=True, aliases=['name']), + assigned_roles=dict(default=None, aliases=['assigned_role']), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + role = module.params['role'] + assigned_roles = [] + if module.params['assigned_roles']: + assigned_roles = module.params['assigned_roles'].split(',') + assigned_roles = filter(None, assigned_roles) + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + role_facts = get_role_facts(cursor) + if module.check_mode: + changed = not check(role_facts, role, assigned_roles) + elif state == 'absent': + try: + changed = absent(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(role_facts, cursor, role, assigned_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py new file mode 100644 index 00000000000..7bc57a545f6 --- /dev/null +++ b/database/vertica/vertica_schema.py @@ -0,0 +1,320 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_schema +version_added: '1.0' +short_description: Adds or removes Vertica database schema and roles. +description: + Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + A schema will not be removed until all the objects have been dropped. + In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. +options: + name: + description: + Name of the schema to add or remove. + required: true + default: null + usage_roles: + description: + Comma separated list of roles to create and grant usage access to the schema. + [Alias I(usage_role)] + required: false + default: null + create_roles: + description: + Comma separated list of roles to create and grant usage and create access to the schema. + [Alias I(create_role)] + required: false + default: null + owner: + description: + Name of the user to set as owner of the schema. + required: false + default: null + state: + description: + Whether to create C(present), or drop C(absent) a schema. + required: false + default: present + choices: ['present', 'absent'] + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: creating a new vertica schema + vertica_schema: name=schema_name db=db_name state=present + +- name: creating a new schema with specific schema owner + vertica_schema: name=schema_name owner=dbowner db=db_name state=present + +- name: creating a new schema with roles + vertica_schema: + name=schema_name + create_roles=schema_name_all + usage_roles=schema_name_ro,schema_name_rw + db=db_name + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_schema_facts(cursor, schema=''): + facts = {} + cursor.execute(""" + select schema_name, schema_owner, create_time + from schemata + where not is_system_schema and schema_name not in ('public', 'TxtIndex') + and (? = '' or schema_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + facts[row.schema_name.lower()] = { + 'name': row.schema_name, + 'owner': row.schema_owner, + 'create_time': str(row.create_time), + 'usage_roles': [], + 'create_roles': []} + cursor.execute(""" + select g.object_name as schema_name, r.name as role_name, + lower(g.privileges_description) privileges_description + from roles r join grants g + on g.grantee_id = r.role_id and g.object_type='SCHEMA' + and g.privileges_description like '%USAGE%' + and g.grantee not in ('public', 'dbadmin') + and (? = '' or g.object_name ilike ?) + """, schema, schema) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + schema_key = row.schema_name.lower() + if 'create' in row.privileges_description: + facts[schema_key]['create_roles'].append(row.role_name) + else: + facts[schema_key]['usage_roles'].append(row.role_name) + return facts + +def update_roles(schema_facts, cursor, schema, + existing, required, + create_existing, create_required): + for role in set(existing + create_existing) - set(required + create_required): + cursor.execute("drop role {0} cascade".format(role)) + for role in set(create_existing) - set(create_required): + cursor.execute("revoke create on schema {0} from {1}".format(schema, role)) + for role in set(required + create_required) - set(existing + create_existing): + cursor.execute("create role {0}".format(role)) + cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) + for role in set(create_required) - set(create_existing): + cursor.execute("grant create on schema {0} to {1}".format(schema, role)) + +def check(schema_facts, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + return False + if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): + return False + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0: + return False + if cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + return False + return True + +def present(schema_facts, cursor, schema, usage_roles, create_roles, owner): + schema_key = schema.lower() + if schema_key not in schema_facts: + query_fragments = ["create schema {0}".format(schema)] + if owner: + query_fragments.append("authorization {0}".format(owner)) + cursor.execute(' '.join(query_fragments)) + update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles) + schema_facts.update(get_schema_facts(cursor, schema)) + return True + else: + changed = False + if owner and owner.lower() != schema_facts[schema_key]['owner'].lower(): + raise NotSupportedError(( + "Changing schema owner is not supported. " + "Current owner: {0}." + ).format(schema_facts[schema_key]['owner'])) + if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0 or \ + cmp(sorted(create_roles), sorted(schema_facts[schema_key]['create_roles'])) != 0: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], usage_roles, + schema_facts[schema_key]['create_roles'], create_roles) + changed = True + if changed: + schema_facts.update(get_schema_facts(cursor, schema)) + return changed + +def absent(schema_facts, cursor, schema, usage_roles, create_roles): + schema_key = schema.lower() + if schema_key in schema_facts: + update_roles(schema_facts, cursor, schema, + schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], []) + try: + cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping schema failed due to dependencies.") + del schema_facts[schema_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + schema=dict(required=True, aliases=['name']), + usage_roles=dict(default=None, aliases=['usage_role']), + create_roles=dict(default=None, aliases=['create_role']), + owner=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + schema = module.params['schema'] + usage_roles = [] + if module.params['usage_roles']: + usage_roles = module.params['usage_roles'].split(',') + usage_roles = filter(None, usage_roles) + create_roles = [] + if module.params['create_roles']: + create_roles = module.params['create_roles'].split(',') + create_roles = filter(None, create_roles) + owner = module.params['owner'] + state = module.params['state'] + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + schema_facts = get_schema_facts(cursor) + if module.check_mode: + changed = not check(schema_facts, schema, usage_roles, create_roles, owner) + elif state == 'absent': + try: + changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state == 'present': + try: + changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py new file mode 100644 index 00000000000..82182301a69 --- /dev/null +++ b/database/vertica/vertica_user.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: vertica_user +version_added: '1.0' +short_description: Adds or removes Vertica database users and assigns roles. +description: + Adds or removes Vertica database user and, optionally, assigns roles. + A user will not be removed until all the dependencies have been dropped. + In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. +options: + name: + description: + Name of the user to add or remove. + required: true + default: null + profile: + description: + Sets the user's profile. + required: false + default: null + resource_pool: + description: + Sets the user's resource pool. + required: false + default: null + password: + description: + The user's password encrypted by the MD5 algorithm. + The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). + required: false + default: null + expired: + description: + Sets the user's password expiration. + required: false + default: null + ldap: + description: + Set to true if users are authenticated via LDAP. + The user will be created with password expired and set to I($ldap$). + required: false + default: null + roles: + description: + Comma separated list of roles to assign to the user. + [Alias I(role)] + required: false + default: null + state: + description: + Whether to create C(present), drop C(absent) or lock C(locked) a user. + required: false + choices: ['present', 'absent', 'locked'] + default: present + db: + description: + Name of the Vertica database. + required: false + default: null + cluster: + description: + Name of the Vertica cluster. + required: false + default: localhost + port: + description: + Vertica cluster port to connect to. + required: false + default: 5433 + login_user: + description: + The username used to authenticate with. + required: false + default: dbadmin + login_password: + description: + The password used to authenticate with. + required: false + default: null +notes: + The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). +requirements: [ 'unixODBC', 'pyodbc' ] +author: Dariusz Owczarek +""" + +EXAMPLES = """ +Examples: + +- name: creating a new vertica user with password + vertica_user: name=user_name password=md5 db=db_name state=present + +- name: creating a new vertica user authenticated via ldap with roles assigned + vertica_user: + name=user_name + ldap=true + db=db_name + roles=schema_name_ro + state=present +""" + +try: + import pyodbc +except ImportError: + pyodbc_found = False +else: + pyodbc_found = True + +class NotSupportedError(Exception): + pass + +class CannotDropError(Exception): + pass + +# module specific functions + +def get_user_facts(cursor, user=''): + facts = {} + cursor.execute(""" + select u.user_name, u.is_locked, u.lock_time, + p.password, p.acctexpired as is_expired, + u.profile_name, u.resource_pool, + u.all_roles, u.default_roles + from users u join password_auditor p on p.user_id = u.user_id + where not u.is_super_user + and (? = '' or u.user_name ilike ?) + """, user, user) + while True: + rows = cursor.fetchmany(100) + if not rows: + break + for row in rows: + user_key = row.user_name.lower() + facts[user_key] = { + 'name': row.user_name, + 'locked': str(row.is_locked), + 'password': row.password, + 'expired': str(row.is_expired), + 'profile': row.profile_name, + 'resource_pool': row.resource_pool, + 'roles': [], + 'default_roles': []} + if row.is_locked: + facts[user_key]['locked_time'] = str(row.lock_time) + if row.all_roles: + facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') + if row.default_roles: + facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') + return facts + +def update_roles(user_facts, cursor, user, + existing_all, existing_default, required): + del_roles = list(set(existing_all) - set(required)) + if del_roles: + cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user)) + new_roles = list(set(required) - set(existing_all)) + if new_roles: + cursor.execute("grant {0} to {1}".format(','.join(new_roles), user)) + if required: + cursor.execute("alter user {0} default role {1}".format(user, ','.join(required))) + +def check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + return False + if profile and profile != user_facts[user_key]['profile']: + return False + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + return False + if locked != (user_facts[user_key]['locked'] == 'True'): + return False + if password and password != user_facts[user_key]['password']: + return False + if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \ + ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'): + return False + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + return False + return True + +def present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles): + user_key = user.lower() + if user_key not in user_facts: + query_fragments = ["create user {0}".format(user)] + if locked: + query_fragments.append("account lock") + if password or ldap: + if password: + query_fragments.append("identified by '{0}'".format(password)) + else: + query_fragments.append("identified by '$ldap$'") + if expired or ldap: + query_fragments.append("password expire") + if profile: + query_fragments.append("profile {0}".format(profile)) + if resource_pool: + query_fragments.append("resource pool {0}".format(resource_pool)) + cursor.execute(' '.join(query_fragments)) + if resource_pool and resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + update_roles(user_facts, cursor, user, [], [], roles) + user_facts.update(get_user_facts(cursor, user)) + return True + else: + changed = False + query_fragments = ["alter user {0}".format(user)] + if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'): + state = 'lock' if locked else 'unlock' + query_fragments.append("account {0}".format(state)) + changed = True + if password and password != user_facts[user_key]['password']: + query_fragments.append("identified by '{0}'".format(password)) + changed = True + if ldap: + if ldap != (user_facts[user_key]['expired'] == 'True'): + query_fragments.append("password expire") + changed = True + elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'): + if expired: + query_fragments.append("password expire") + changed = True + else: + raise NotSupportedError("Unexpiring user password is not supported.") + if profile and profile != user_facts[user_key]['profile']: + query_fragments.append("profile {0}".format(profile)) + changed = True + if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: + query_fragments.append("resource pool {0}".format(resource_pool)) + if user_facts[user_key]['resource_pool'] != 'general': + cursor.execute("revoke usage on resource pool {0} from {1}".format( + user_facts[user_key]['resource_pool'], user)) + if resource_pool != 'general': + cursor.execute("grant usage on resource pool {0} to {1}".format( + resource_pool, user)) + changed = True + if changed: + cursor.execute(' '.join(query_fragments)) + if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \ + cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0): + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles) + changed = True + if changed: + user_facts.update(get_user_facts(cursor, user)) + return changed + +def absent(user_facts, cursor, user, roles): + user_key = user.lower() + if user_key in user_facts: + update_roles(user_facts, cursor, user, + user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], []) + try: + cursor.execute("drop user {0}".format(user_facts[user_key]['name'])) + except pyodbc.Error: + raise CannotDropError("Dropping user failed due to dependencies.") + del user_facts[user_key] + return True + else: + return False + +# module logic + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True, aliases=['name']), + profile=dict(default=None), + resource_pool=dict(default=None), + password=dict(default=None), + expired=dict(type='bool', default=None), + ldap=dict(type='bool', default=None), + roles=dict(default=None, aliases=['role']), + state=dict(default='present', choices=['absent', 'present', 'locked']), + db=dict(default=None), + cluster=dict(default='localhost'), + port=dict(default='5433'), + login_user=dict(default='dbadmin'), + login_password=dict(default=None), + ), supports_check_mode = True) + + if not pyodbc_found: + module.fail_json(msg="The python pyodbc module is required.") + + user = module.params['user'] + profile = module.params['profile'] + if profile: + profile = profile.lower() + resource_pool = module.params['resource_pool'] + if resource_pool: + resource_pool = resource_pool.lower() + password = module.params['password'] + expired = module.params['expired'] + ldap = module.params['ldap'] + roles = [] + if module.params['roles']: + roles = module.params['roles'].split(',') + roles = filter(None, roles) + state = module.params['state'] + if state == 'locked': + locked = True + else: + locked = False + db = '' + if module.params['db']: + db = module.params['db'] + + changed = False + + try: + dsn = ( + "Driver=Vertica;" + "Server={0};" + "Port={1};" + "Database={2};" + "User={3};" + "Password={4};" + "ConnectionLoadBalance={5}" + ).format(module.params['cluster'], module.params['port'], db, + module.params['login_user'], module.params['login_password'], 'true') + db_conn = pyodbc.connect(dsn, autocommit=True) + cursor = db_conn.cursor() + except Exception, e: + module.fail_json(msg="Unable to connect to database: {0}.".format(e)) + + try: + user_facts = get_user_facts(cursor) + if module.check_mode: + changed = not check(user_facts, user, profile, resource_pool, + locked, password, expired, ldap, roles) + elif state == 'absent': + try: + changed = absent(user_facts, cursor, user, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + elif state in ['present', 'locked']: + try: + changed = present(user_facts, cursor, user, profile, resource_pool, + locked, password, expired, ldap, roles) + except pyodbc.Error, e: + module.fail_json(msg=str(e)) + except NotSupportedError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except CannotDropError, e: + module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) + except SystemExit: + # avoid catching this on python 2.4 + raise + except Exception, e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) + +# import ansible utilities +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() From cf9a243cd7cacc5433d3a9db0f2a3aed197464d2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 17:28:37 -0400 Subject: [PATCH 132/224] added missing __init__.py --- cloud/google/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud/google/__init__.py diff --git a/cloud/google/__init__.py b/cloud/google/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From 4bf87a0ab5ee3ede8afe6ca55494e91368b8698b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 22:00:07 -0400 Subject: [PATCH 133/224] corrected 'version added' --- database/vertica/vertica_configuration.py | 2 +- database/vertica/vertica_facts.py | 2 +- database/vertica/vertica_role.py | 2 +- database/vertica/vertica_schema.py | 2 +- database/vertica/vertica_user.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py index 6ee5ebe5f7f..c7bdb1001d6 100644 --- a/database/vertica/vertica_configuration.py +++ b/database/vertica/vertica_configuration.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_configuration -version_added: '1.0' +version_added: '2.0' short_description: Updates Vertica configuration parameters. description: Updates Vertica configuration parameters. diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py index 2334cbaa227..4b963a4e377 100644 --- a/database/vertica/vertica_facts.py +++ b/database/vertica/vertica_facts.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_facts -version_added: '1.0' +version_added: '2.0' short_description: Gathers Vertica database facts. description: Gathers Vertica database facts. diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py index dad6c5c3bc9..825bb1b07e9 100644 --- a/database/vertica/vertica_role.py +++ b/database/vertica/vertica_role.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_role -version_added: '1.0' +version_added: '2.0' short_description: Adds or removes Vertica database roles and assigns roles to them. description: Adds or removes Vertica database role and, optionally, assign other roles. diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py index 7bc57a545f6..f3a75055d06 100644 --- a/database/vertica/vertica_schema.py +++ b/database/vertica/vertica_schema.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_schema -version_added: '1.0' +version_added: '2.0' short_description: Adds or removes Vertica database schema and roles. description: Adds or removes Vertica database schema and, optionally, roles diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py index 82182301a69..1d72deca617 100644 --- a/database/vertica/vertica_user.py +++ b/database/vertica/vertica_user.py @@ -19,7 +19,7 @@ DOCUMENTATION = """ --- module: vertica_user -version_added: '1.0' +version_added: '2.0' short_description: Adds or removes Vertica database users and assigns roles. description: Adds or removes Vertica database user and, optionally, assigns roles. From 5293f452b5e0fe8cc29daa63c848e721c9729e33 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 22:04:44 -0400 Subject: [PATCH 134/224] corrected version added --- network/f5/bigip_facts.py | 2 +- network/f5/bigip_monitor_http.py | 2 +- network/f5/bigip_monitor_tcp.py | 2 +- network/f5/bigip_node.py | 2 +- network/f5/bigip_pool.py | 2 +- network/f5/bigip_pool_member.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 59a6a48aa5e..2c90b418ea2 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -63,7 +63,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 session: description: - BIG-IP session support; may be useful to avoid concurrency diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index dd20fb04d74..d131eb71eee 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -58,7 +58,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Monitor state diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 78a51f2529b..5cc00fe6b68 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -56,7 +56,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Monitor state diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index c45a7f12d5c..f54fafdb64b 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -61,7 +61,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Pool member state diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index e7ddce6d391..425c1e97149 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -61,7 +61,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Pool/pool member state diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index 6a00864056c..1304dfe33e5 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -63,7 +63,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.9.1 + version_added: 2.0 state: description: - Pool member state From d7030e9604537eee524e452762a4d5e865efcba3 Mon Sep 17 00:00:00 2001 From: Phillip Holmes Date: Wed, 15 Oct 2014 21:14:10 -0500 Subject: [PATCH 135/224] added color bar option to Slack module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This update will allow people to add a color bar at the front of a Slack notification using the default 3 colors by name Slack specify (good, warning, danger). If no color is specified, or the default is used (normal) then no bar will be added. Description and example also added in this update. Color bars are added by using the attachments json object inside the payload - this is a very simplistic implementation as using custom colors or adding titles or other formatting are not included in this update and if needed I’m sure somebody else can spend the time to add them later… Tested with ansible 1.7 --- notification/slack.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/notification/slack.py b/notification/slack.py index 1ae748247f9..2b1459c3d91 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -89,6 +89,16 @@ options: choices: - 'yes' - 'no' + color: + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + required: false + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' """ EXAMPLES = """ @@ -111,14 +121,24 @@ EXAMPLES = """ link_names: 0 parse: 'none' +- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack + slack: + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} is alive!" + color: good + username: "" + icon_url: "" """ OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): - payload = dict(text=text) - +def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color): + if color == 'normal': + payload = dict(text=text) + else: + payload = dict(attachments=[dict(text=text, color=color)]) if channel is not None: payload['channel'] = channel if (channel[0] == '#') else '#'+channel if username is not None: @@ -161,8 +181,8 @@ def main(): icon_emoji = dict(type='str', default=None), link_names = dict(type='int', default=1, choices=[0,1]), parse = dict(type='str', default=None, choices=['none', 'full']), - validate_certs = dict(default='yes', type='bool'), + color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']) ) ) @@ -175,8 +195,9 @@ def main(): icon_emoji = module.params['icon_emoji'] link_names = module.params['link_names'] parse = module.params['parse'] + color = module.params['color'] - payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) + payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color) do_notify_slack(module, domain, token, payload) module.exit_json(msg="OK") From 671317e0e105168738215bb79a71bdc1040741a0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:07:45 -0400 Subject: [PATCH 136/224] added version added to new color option --- notification/slack.py | 1 + 1 file changed, 1 insertion(+) diff --git a/notification/slack.py b/notification/slack.py index 2b1459c3d91..fc0e7403637 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -90,6 +90,7 @@ options: - 'yes' - 'no' color: + version_added: 2.0 description: - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message required: false From 8e1024ff3011416b7abb4a20aa80d135f55c99c3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:15:37 -0400 Subject: [PATCH 137/224] updated pushover version added --- notification/pushover | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notification/pushover b/notification/pushover index 7fd66333f54..8e9d2596d43 100644 --- a/notification/pushover +++ b/notification/pushover @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: pushover -version_added: "1.8" +version_added: "2.0" short_description: Send notifications via u(https://pushover.net) description: - Send notifications via pushover, to subscriber list of devices, and email From dd1508d572116520b2e7b175f34db09d40956637 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:55:00 -0400 Subject: [PATCH 138/224] moved to --- packaging/{ => language}/maven_artifact.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) rename packaging/{ => language}/maven_artifact.py (97%) diff --git a/packaging/maven_artifact.py b/packaging/language/maven_artifact.py similarity index 97% rename from packaging/maven_artifact.py rename to packaging/language/maven_artifact.py index 699d97a54c2..2aeb158625b 100755 --- a/packaging/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -32,7 +32,7 @@ DOCUMENTATION = ''' --- module: maven_artifact short_description: Downloads an Artifact from a Maven Repository -version_added: "historical" +version_added: "2.0" description: - Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve - snapshots or release versions of the artifact and will resolve the latest available version if one is not @@ -45,54 +45,42 @@ options: group_id: description: The Maven groupId coordinate required: true - default: null - version_added: 0.0.1 artifact_id: description: The maven artifactId coordinate required: true - default: null - version_added: 0.0.1 version: description: The maven version coordinate required: false default: latest - version_added: 0.0.1 classifier: description: The maven classifier coordinate required: false default: null - version_added: 0.0.1 extension: description: The maven type/extension coordinate required: false default: jar - version_added: 0.0.1 repository_url: description: The URL of the Maven Repository to download from required: false default: http://repo1.maven.org/maven2 - version_added: 0.0.1 username: description: The username to authenticate as to the Maven Repository required: false default: null - version_added: 0.0.1 password: description: The passwor to authenticate with to the Maven Repository required: false default: null - version_added: 0.0.1 dest: description: The path where the artifact should be written to required: true default: false - version_added: 0.0.1 state: description: The desired state of the artifact required: true default: present choices: [present,absent] - version_added: 0.0.1 ''' EXAMPLES = ''' @@ -372,4 +360,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() \ No newline at end of file +main() From 2438b74ca83cf3e6c6d6b26cedd56b0d9889f0cd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:56:05 -0400 Subject: [PATCH 139/224] remove x bit --- packaging/language/maven_artifact.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 packaging/language/maven_artifact.py diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py old mode 100755 new mode 100644 From 06939a8651f42db31751aae21ad34dacc6dd63c0 Mon Sep 17 00:00:00 2001 From: tedder Date: Fri, 24 Oct 2014 14:22:50 -0700 Subject: [PATCH 140/224] add cloudtrail module Cloudtrail is the AWS auditing configuration. It's fairly simple, but also very important to configuration management/devops/security to ensure it remains enabled. That's why I created it as a module. --- cloud/cloudtrail.py | 227 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100755 cloud/cloudtrail.py diff --git a/cloud/cloudtrail.py b/cloud/cloudtrail.py new file mode 100755 index 00000000000..de1656b6dd3 --- /dev/null +++ b/cloud/cloudtrail.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: cloudtrail +short_description: manage CloudTrail creation and deletion +description: + - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. +version_added: "1.7.3" +author: Ted Timmons +requirements: ["boto"] +options: + state: + description: + - add or remove CloudTrail configuration. + required: true + choices: ['enabled', 'absent'] + name: + description: + - name for given CloudTrail configuration. + - This is a primary key and is used to identify the configuration. + s3_bucket_prefix: + description: + - bucket to place CloudTrail in. + - this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html) + - required when state=enabled. + required: false + s3_key_prefix: + description: + - prefix to keys in bucket. A trailing slash is not necessary and will be removed. + required: false + include_global_events: + description: + - record API calls from global services such as IAM and STS? + required: false + default: false + choices: ["true", "false"] + + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + version_added: "1.5" + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + version_added: "1.5" + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + version_added: "1.5" + +extends_documentation_fragment: aws +""" + +EXAMPLES = """ + - name: enable cloudtrail + local_action: cloudtrail > + state=enabled name=main s3_bucket_name=ourbucket + s3_key_prefix=cloudtrail region=us-east-1 + + - name: enable cloudtrail with different configuration + local_action: cloudtrail > + state=enabled name=main s3_bucket_name=ourbucket2 + s3_key_prefix='' region=us-east-1 + + - name: remove cloudtrail + local_action: cloudtrail state=absent name=main region=us-east-1 +""" + +import time +import sys +import os +from collections import Counter + +try: + import boto + import boto.cloudtrail + from boto.regioninfo import RegionInfo +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +class CloudTrailManager: + """Handles cloudtrail configuration""" + + def __init__(self, module, region=None, **aws_connect_params): + self.module = module + self.region = region + self.aws_connect_params = aws_connect_params + self.changed = False + + try: + self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + self.module.fail_json(msg=str(e)) + + def view_status(self, name): + return self.conn.get_trail_status(name) + + def view(self, name): + ret = self.conn.describe_trails(trail_name_list=[name]) + trailList = ret.get('trailList', []) + if len(trailList) == 1: + return trailList[0] + return None + + def exists(self, name=None): + ret = self.view(name) + if ret: + return True + return False + + def enable_logging(self, name): + '''Turn on logging for a cloudtrail that already exists. Throws Exception on error.''' + self.conn.start_logging(name) + + + def enable(self, **create_args): + return self.conn.create_trail(**create_args) + + def update(self, **create_args): + return self.conn.update_trail(**create_args) + + def delete(self, name): + '''Delete a given cloudtrial configuration. Throws Exception on error.''' + self.conn.delete_trail(name) + + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state={'required': True, 'choices': ['enabled', 'absent'] }, + name={'required': True, 'type': 'str' }, + s3_bucket_name={'required': False, 'type': 'str' }, + s3_key_prefix={'default':'', 'required': False, 'type': 'str' }, + include_global_events={'default':True, 'required': False, 'type': 'bool' }, + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + ec2_url, access_key, secret_key, region = get_ec2_creds(module) + aws_connect_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + + if module.params['state'] == 'enabled' and not module.params['s3_bucket_name']: + module.fail_json(msg="s3_bucket_name must be specified as a parameter when creating a cloudtrail") + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + + ct_name = module.params['name'] + s3_bucket_name = module.params['s3_bucket_name'] + # remove trailing slash from the key prefix, really messes up the key structure. + s3_key_prefix = module.params['s3_key_prefix'].rstrip('/') + include_global_events = module.params['include_global_events'] + + #if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: + # module.fail_json(msg="ELBs are required for registration or viewing") + + cf_man = CloudTrailManager(module, region=region, **aws_connect_params) + + results = { 'changed': False } + if module.params['state'] == 'enabled': + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + results['view'] = cf_man.view(ct_name) + # only update if the values have changed. + if results['view']['S3BucketName'] != s3_bucket_name or \ + results['view']['S3KeyPrefix'] != s3_key_prefix or \ + results['view']['IncludeGlobalServiceEvents'] != include_global_events: + if not module.check_mode: + results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + else: + if not module.check_mode: + # doesn't exist. create it. + results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) + results['changed'] = True + + # given cloudtrail should exist now. Enable the logging. + results['view_status'] = cf_man.view_status(ct_name) + results['was_logging_enabled'] = results['view_status'].get('IsLogging', False) + if not results['was_logging_enabled']: + if not module.check_mode: + cf_man.enable_logging(ct_name) + results['logging_enabled'] = True + results['changed'] = True + + # delete the cloudtrai + elif module.params['state'] == 'absent': + # check to see if it exists before deleting. + results['exists'] = cf_man.exists(name=ct_name) + if results['exists']: + # it exists, so we should delete it and mark changed. + if not module.check_mode: + cf_man.delete(ct_name) + results['changed'] = True + + module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 61114cd08a506a4d0e9daebbf9f295fc921909dd Mon Sep 17 00:00:00 2001 From: tedder Date: Fri, 24 Oct 2014 14:41:47 -0700 Subject: [PATCH 141/224] Handful of changes after bcoca's code review: * update expected inclusion version * fix consistency on enabled/absent (now enabled/disabled) * safely import boto per now style of single-exit and proper JSON * use new `required_together` module style --- cloud/cloudtrail.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/cloud/cloudtrail.py b/cloud/cloudtrail.py index de1656b6dd3..777f1df846c 100755 --- a/cloud/cloudtrail.py +++ b/cloud/cloudtrail.py @@ -20,7 +20,7 @@ module: cloudtrail short_description: manage CloudTrail creation and deletion description: - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. -version_added: "1.7.3" +version_added: "2.0" author: Ted Timmons requirements: ["boto"] options: @@ -28,7 +28,7 @@ options: description: - add or remove CloudTrail configuration. required: true - choices: ['enabled', 'absent'] + choices: ['enabled', 'disabled'] name: description: - name for given CloudTrail configuration. @@ -76,12 +76,12 @@ extends_documentation_fragment: aws EXAMPLES = """ - name: enable cloudtrail - local_action: cloudtrail > + local_action: cloudtrail state=enabled name=main s3_bucket_name=ourbucket s3_key_prefix=cloudtrail region=us-east-1 - name: enable cloudtrail with different configuration - local_action: cloudtrail > + local_action: cloudtrail state=enabled name=main s3_bucket_name=ourbucket2 s3_key_prefix='' region=us-east-1 @@ -94,13 +94,13 @@ import sys import os from collections import Counter +boto_import_failed = False try: import boto import boto.cloudtrail from boto.regioninfo import RegionInfo except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + boto_import_failed = True class CloudTrailManager: """Handles cloudtrail configuration""" @@ -150,23 +150,25 @@ class CloudTrailManager: def main(): + + if not has_libcloud: + module.fail_json(msg='boto is required.') + argument_spec = ec2_argument_spec() argument_spec.update(dict( - state={'required': True, 'choices': ['enabled', 'absent'] }, + state={'required': True, 'choices': ['enabled', 'disabled'] }, name={'required': True, 'type': 'str' }, s3_bucket_name={'required': False, 'type': 'str' }, s3_key_prefix={'default':'', 'required': False, 'type': 'str' }, include_global_events={'default':True, 'required': False, 'type': 'bool' }, )) + required_together = ( ['state', 's3_bucket_name'] ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) ec2_url, access_key, secret_key, region = get_ec2_creds(module) aws_connect_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key) - if module.params['state'] == 'enabled' and not module.params['s3_bucket_name']: - module.fail_json(msg="s3_bucket_name must be specified as a parameter when creating a cloudtrail") - if not region: module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") @@ -209,7 +211,7 @@ def main(): results['changed'] = True # delete the cloudtrai - elif module.params['state'] == 'absent': + elif module.params['state'] == 'disabled': # check to see if it exists before deleting. results['exists'] = cf_man.exists(name=ct_name) if results['exists']: From 520f4102570936e9486921b58772ec2da6a7582e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Mar 2015 01:15:34 -0400 Subject: [PATCH 142/224] moved cloudtrail to amazon subdir --- cloud/amazon/__init__.py | 0 cloud/{ => amazon}/cloudtrail.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud/amazon/__init__.py rename cloud/{ => amazon}/cloudtrail.py (100%) diff --git a/cloud/amazon/__init__.py b/cloud/amazon/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudtrail.py b/cloud/amazon/cloudtrail.py similarity index 100% rename from cloud/cloudtrail.py rename to cloud/amazon/cloudtrail.py From f86ce495c7e8b29c02b25ebfabc6bdb2adc5e3b6 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Fri, 17 Oct 2014 14:58:28 -0700 Subject: [PATCH 143/224] Add enabled/disabled support to bigip_node This allows one to enable or disable a node, useful for when doing maintenance on a node to prevent connections from being attempted to it. This will completely disable the node for any pool it might be in. --- network/f5/bigip_node.py | 64 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index f54fafdb64b..ca212763881 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -67,7 +67,7 @@ options: - Pool member state required: true default: present - choices: ['present', 'absent'] + choices: ['present', 'absent', 'enabled', 'disabled'] aliases: [] partition: description: @@ -78,7 +78,7 @@ options: aliases: [] name: description: - - "Node name" + - "Node name. Required when state=enabled/disabled" required: false default: null choices: [] @@ -145,6 +145,11 @@ EXAMPLES = ''' partition=matthite name="{{ ansible_default_ipv4["address"] }}" + - name: Disable node + bigip_node: server=lb.mydomain.com user=admin password=mysecret + state=disabled name=mynodename + delegate_to: localhost + ''' try: @@ -158,6 +163,13 @@ else: # bigip_node module specific # +# map of state values +STATES={'enabled': 'STATE_ENABLED', + 'disabled': 'STATE_DISABLED'} +STATUSES={'enabled': 'SESSION_STATUS_ENABLED', + 'disabled': 'SESSION_STATUS_DISABLED', + 'offline': 'SESSION_STATUS_FORCED_DISABLED'} + def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) return api @@ -220,6 +232,25 @@ def set_node_description(api, name, description): def get_node_description(api, name): return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] +def set_node_disabled(api, name): + set_node_session_enabled_state(api, name, STATES['disabled']) + result = True + desc = "" + return (result, desc) + +def set_node_enabled(api, name): + set_node_session_enabled_state(api, name, STATES['enabled']) + result = True + desc = "" + return (result, desc) + +def set_node_session_enabled_state(api, name, state): + api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name], + states=[state]) + +def get_node_session_status(api, name): + return api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0] + def main(): module = AnsibleModule( argument_spec = dict( @@ -227,7 +258,8 @@ def main(): user = dict(type='str', required=True), password = dict(type='str', required=True), validate_certs = dict(default='yes', type='bool'), - state = dict(type='str', default='present', choices=['present', 'absent']), + state = dict(type='str', default='present', + choices=['present', 'absent', 'disabled', 'enabled']), partition = dict(type='str', default='Common'), name = dict(type='str', required=True), host = dict(type='str', aliases=['address', 'ip']), @@ -302,6 +334,32 @@ def main(): set_node_description(api, address, description) result = {'changed': True} + elif state in ('disabled', 'enabled'): + if name is None: + module.fail_json(msg="name parameter required when " \ + "state=enabled/disabled") + if not module.check_mode: + if not node_exists(api, name): + module.fail_json(msg="node does not exist") + status = get_node_session_status(api, name) + if state == 'disabled': + if status not in (STATUSES['disabled'], STATUSES['offline']): + disabled, desc = set_node_disabled(api, name) + if not disabled: + module.fail_json(msg="unable to disable: %s" % desc) + else: + result = {'changed': True} + else: + if status != STATUSES['enabled']: + enabled, desc = set_node_enabled(api, name) + if not enabled: + module.fail_json(msg="unable to enable: %s" % desc) + else: + result = {'changed': True} + else: + # check-mode return value + result = {'changed': True} + except Exception, e: module.fail_json(msg="received exception: %s" % e) From 2b192c7f8e0c99830b2beabc977ace5963c5ebc1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 11:36:21 -0700 Subject: [PATCH 144/224] remove non-ascii quotes from message string Fixes https://github.com/ansible/ansible/pull/8564 --- system/locale_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/locale_gen.py b/system/locale_gen.py index 5d53951cf18..c5943cd63a0 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -159,7 +159,7 @@ def main(): # Ubuntu created its own system to manage locales. ubuntuMode = True else: - module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?") + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?") else: # We found the common way to manage locales. ubuntuMode = False From 400166a655b304094005aace178d0fab1cfe9763 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 11:42:08 -0700 Subject: [PATCH 145/224] Port is an integer so use arg_spec to enforce that. --- database/mysql/mysql_replication.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 07d09602b6b..30811cdc924 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -239,7 +239,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), - login_port=dict(default="3306"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), master_host=dict(default=None), @@ -304,10 +304,10 @@ def main(): try: if module.params["login_unix_socket"]: db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password) - elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": + elif module.params["login_port"] != 3306 and module.params["login_host"] == "localhost": module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password) + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password) except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") try: From 3b54e7b00ec9d3c614a4821d15253dbc9166dc10 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sat, 28 Mar 2015 00:07:01 -0700 Subject: [PATCH 146/224] add zabbix_host --- monitoring/zabbix_host.py | 458 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 458 insertions(+) create mode 100644 monitoring/zabbix_host.py diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py new file mode 100644 index 00000000000..0d3cc8e661f --- /dev/null +++ b/monitoring/zabbix_host.py @@ -0,0 +1,458 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_host +short_description: Zabbix host creates/updates/deletes +description: + - When the host does not exists, a new host will be created, added to any host groups and linked to any templates. + - When the host already exists, the host group membership will be updated, along with the template links and interfaces. + - Delete a host from Zabbix if the host exists. +version_added: "1.9" +author: Tony Minfei Ding, Harrison Gu +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + host_name: + description: + - Technical name of the host. + - If the host has already been added, the host name won't be updated. + required: true + host_groups: + description: + - List of host groups to add the host to. + required: false + link_templates: + description: + - List of templates to be linked to the host. + required: false + default: None + status: + description: + - Status and function of the host. + - Possible values are: enabled and disabled + required: false + default: "enabled" + state: + description: + - create/update or delete host. + - Possible values are: present and absent. If the host already exists, and the state is "present", just to update the host. + required: false + default: "present" + timeout: + description: + - The timeout of API request(seconds). + default: 10 + interfaces: + description: + - List of interfaces to be created for the host (see example). + - Available values are: dns, ip, main, port, type and useip. + - Please review the interface documentation for more information on the supported properties: + - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface + required: false +''' + +EXAMPLES = ''' +- name: Create a new host or update an existing host's info + local_action: + module: zabbix_host + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + host_groups: + - Example group1 + - Example group2 + link_templates: + - Example template1 + - Example template2 + status: enabled + state: present + interfaces: + - type: 1 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 10050 + - type: 4 + main: 1 + useip: 1 + ip: 10.xx.xx.xx + dns: "" + port: 12345 +''' + +import logging +import copy +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far), +# it does not support the 'hostinterface' api calls, +# so we have to inherit the ZabbixAPI class to add 'hostinterface' support. +class ZabbixAPIExtends(ZabbixAPI): + hostinterface = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs)) + + +class Host(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # check if host group exists + def check_host_group_exist(self, group_names): + for group_name in group_names: + result = self._zapi.hostgroup.exists({'name': group_name}) + if not result: + self._module.fail_json(msg="Hostgroup not found: %s" % group_name) + return True + + def get_template_ids(self, template_list): + template_ids = [] + if template_list is None or len(template_list) == 0: + return template_ids + for template in template_list: + template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}}) + if len(template_list) < 1: + self._module.fail_json(msg="Template not found: %s" % template) + else: + template_id = template_list[0]['templateid'] + template_ids.append(template_id) + return template_ids + + def add_host(self, host_name, group_ids, status, interfaces): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}) + if len(host_list) >= 1: + return host_list['hostids'][0] + except Exception, e: + self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) + + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status}) + interface_list_copy = exist_interface_list + if interfaces: + for interface in interfaces: + flag = False + interface_str = interface + for exist_interface in exist_interface_list: + interface_type = interface['type'] + exist_interface_type = int(exist_interface['type']) + if interface_type == exist_interface_type: + # update + interface_str['interfaceid'] = exist_interface['interfaceid'] + self._zapi.hostinterface.update(interface_str) + flag = True + interface_list_copy.remove(exist_interface) + break + if not flag: + # add + interface_str['hostid'] = host_id + self._zapi.hostinterface.create(interface_str) + # remove + remove_interface_ids = [] + for remove_interface in interface_list_copy: + interface_id = remove_interface['interfaceid'] + remove_interface_ids.append(interface_id) + if len(remove_interface_ids) > 0: + self._zapi.hostinterface.delete(remove_interface_ids) + except Exception, e: + self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e)) + + def delete_host(self, host_id, host_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.delete({'hostid': host_id}) + except Exception, e: + self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e)) + + # get host by host name + def get_host_by_host_name(self, host_name): + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + return host_list[0] + + # get group ids by group names + def get_group_ids_by_group_names(self, group_names): + group_ids = [] + if self.check_host_group_exist(group_names): + group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}}) + for group in group_list: + group_id = group['groupid'] + group_ids.append({'groupid': group_id}) + return group_ids + + # get host templates by host id + def get_host_templates_by_host_id(self, host_id): + template_ids = [] + template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id}) + for template in template_list: + template_ids.append(template['templateid']) + return template_ids + + # get host groups by host id + def get_host_groups_by_host_id(self, host_id): + exist_host_groups = [] + host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id}) + + if len(host_groups_list) >= 1: + for host_groups_name in host_groups_list: + exist_host_groups.append(host_groups_name['name']) + return exist_host_groups + + # check the exist_interfaces whether it equals the interfaces or not + def check_interface_properties(self, exist_interface_list, interfaces): + interfaces_port_list = [] + if len(interfaces) >= 1: + for interface in interfaces: + interfaces_port_list.append(int(interface['port'])) + + exist_interface_ports = [] + if len(exist_interface_list) >= 1: + for exist_interface in exist_interface_list: + exist_interface_ports.append(int(exist_interface['port'])) + + if set(interfaces_port_list) != set(exist_interface_ports): + return True + + for exist_interface in exist_interface_list: + exit_interface_port = int(exist_interface['port']) + for interface in interfaces: + interface_port = int(interface['port']) + if interface_port == exit_interface_port: + for key in interface.keys(): + if str(exist_interface[key]) != str(interface[key]): + return True + + return False + + # get the status of host by host + def get_host_status_by_host(self, host): + return host['status'] + + # check all the properties before link or clear template + def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, host): + # get the existing host's groups + exist_host_groups = self.get_host_groups_by_host_id(host_id) + if set(host_groups) != set(exist_host_groups): + return True + + # get the existing status + exist_status = self.get_host_status_by_host(host) + if int(status) != int(exist_status): + return True + + # check the exist_interfaces whether it equals the interfaces or not + if self.check_interface_properties(exist_interfaces, interfaces): + return True + + # get the existing templates + exist_template_ids = self.get_host_templates_by_host_id(host_id) + if set(list(template_ids)) != set(exist_template_ids): + return True + + return False + + # link or clear template of the host + def link_or_clear_template(self, host_id, template_id_list): + # get host's exist template ids + exist_template_id_list = self.get_host_templates_by_host_id(host_id) + + exist_template_ids = set(exist_template_id_list) + template_ids = set(template_id_list) + template_id_list = list(template_ids) + + # get unlink and clear templates + templates_clear = exist_template_ids.difference(template_ids) + templates_clear_list = list(templates_clear) + request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list} + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception, e: + self._module.fail_json(msg="Failed to link template to host: %s" % e) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, default=None, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + host_name=dict(required=True), + host_groups=dict(required=False), + link_templates=dict(required=False), + status=dict(default="enabled"), + state=dict(default="present"), + timeout=dict(default=10), + interfaces=dict(required=False) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + host_groups = module.params['host_groups'] + link_templates = module.params['link_templates'] + status = module.params['status'] + state = module.params['state'] + timeout = module.params['timeout'] + interfaces = module.params['interfaces'] + + # convert enabled to 0; disabled to 1 + status = 1 if status == "disabled" else 0 + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host = Host(module, zbx) + + template_ids = [] + if link_templates: + template_ids = host.get_template_ids(link_templates) + + group_ids = [] + + if host_groups: + group_ids = host.get_group_ids_by_group_names(host_groups) + + ip = "" + if interfaces: + for interface in interfaces: + if interface['type'] == 1: + ip = interface['ip'] + + # check if host exist + is_host_exist = host.is_host_exist(host_name) + + if is_host_exist: + # get host id by host name + zabbix_host_obj = host.get_host_by_host_name(host_name) + host_id = zabbix_host_obj['hostid'] + + if state == "absent": + # remove host + host.delete_host(host_id, host_name) + module.exit_json(changed=True, result="Successfully delete host %s" % host_name) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name) + + # get exist host's interfaces + exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) + exist_interfaces_copy = copy.deepcopy(exist_interfaces) + + # update host + interfaces_len = len(interfaces) if interfaces else 0 + + if len(exist_interfaces) > interfaces_len: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces, zabbix_host_obj): + host.link_or_clear_template(host_id, template_ids) + host.update_host(host_name, group_ids, status, host_id, + interfaces, exist_interfaces) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, + exist_interfaces_copy, zabbix_host_obj): + host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, + result="Successfully update host %s (%s) and linked with template '%s'" + % (host_name, ip, link_templates)) + else: + module.exit_json(changed=False) + else: + if not group_ids: + module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name) + + if not interfaces or (interfaces and len(interfaces) == 0): + module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) + + # create host + host_id = host.add_host(host_name, group_ids, status, interfaces) + host.link_or_clear_template(host_id, template_ids) + module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( + host_name, ip, link_templates)) + +from ansible.module_utils.basic import * +main() + From d0256c593a7973f35c6ab5462afc93a408a1d453 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 14 Dec 2014 15:14:23 -0800 Subject: [PATCH 147/224] add zabbix_hostmacro --- monitoring/zabbix_hostmacro.py | 233 +++++++++++++++++++++++++++++++++ 1 file changed, 233 insertions(+) create mode 100644 monitoring/zabbix_hostmacro.py diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py new file mode 100644 index 00000000000..6cd95b714f8 --- /dev/null +++ b/monitoring/zabbix_hostmacro.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zabbix_hostmacro +short_description: Zabbix host macro creates/updates/deletes +description: + - When the host macro does not exists, a new macro will be created, added to specific host. + - When the host macro already exists, the value will be updated. + - Delete a host macro from Zabbix if the macro exists. +version_added: "1.9" +author: Dean Hailin Song +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + host_name: + description: + - Technical name of the host. + - If the host has already been added, the host name won't be updated. + required: true + macro_name: + description: + - Technical name of the host macro. + required: true + macro_value: + description: + - Value of the host macro. + required: true + state: + description: + - create/update or delete macro. + - Possible values are: present and absent. If the macro already exists, and the state is "present", just to update the macro. + required: false + default: "present" + timeout: + description: + - The timeout of API request(seconds). + default: 10 +''' + +EXAMPLES = ''' +- name: Create a new host macro or update an existing macro's value + local_action: + module: zabbix_hostmacro + server_url: http://monitor.example.com + login_user: username + login_password: password + host_name: ExampleHost + macro_name:Example macro + macro_value:Example value + state: present +''' + +import logging +import copy +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, no higher version so far). +class ZabbixAPIExtends(ZabbixAPI): + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + + +class HostMacro(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # exist host + def is_host_exist(self, host_name): + result = self._zapi.host.exists({'host': host_name}) + return result + + # get host id by host name + def get_host_id(self, host_name): + try: + host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}}) + if len(host_list) < 1: + self._module.fail_json(msg="Host not found: %s" % host_name) + else: + host_id = host_list[0]['hostid'] + return host_id + except Exception, e: + self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e)) + + # get host macro + def get_host_macro(self, macro_name, host_id): + try: + host_macro_list = self._zapi.usermacro.get( + {"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}}) + if len(host_macro_list) > 0: + return host_macro_list[0] + return None + except Exception, e: + self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e)) + + # create host macro + def create_host_macro(self, macro_name, macro_value, host_id): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e)) + + # update host macro + def update_host_macro(self, host_macro_obj, macro_name, macro_value): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value}) + self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e)) + + # delete host macro + def delete_host_macro(self, host_macro_obj, macro_name): + host_macro_id = host_macro_obj['hostmacroid'] + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.usermacro.delete([host_macro_id]) + self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name) + except Exception, e: + self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e)) + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, default=None, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + host_name=dict(required=True), + macro_name=dict(required=True), + macro_value=dict(required=True), + state=dict(default="present"), + timeout=dict(default=10) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + host_name = module.params['host_name'] + macro_name = (module.params['macro_name']).upper() + macro_value = module.params['macro_value'] + state = module.params['state'] + timeout = module.params['timeout'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + host_macro_class_obj = HostMacro(module, zbx) + + changed = False + + if host_name: + host_id = host_macro_class_obj.get_host_id(host_name) + host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id) + + if state == 'absent': + if not host_macro_obj: + module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name) + else: + # delete a macro + host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name) + else: + if not host_macro_obj: + # create host macro + host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id) + else: + # update host macro + host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value) + +from ansible.module_utils.basic import * +main() + From 092cb5b4cdf2515ab105ecd60ac135535454aacb Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sat, 28 Mar 2015 08:18:25 -0700 Subject: [PATCH 148/224] add zabbix_screen --- monitoring/zabbix_screen.py | 423 ++++++++++++++++++++++++++++++++++++ 1 file changed, 423 insertions(+) create mode 100644 monitoring/zabbix_screen.py diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py new file mode 100644 index 00000000000..06e336ec368 --- /dev/null +++ b/monitoring/zabbix_screen.py @@ -0,0 +1,423 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013-2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +DOCUMENTATION = ''' +--- +module: zabbix_screen +short_description: Zabbix screen creates/updates/deletes +description: + - When the screen does not exists, a new screen will be created with any screen items specified. + - When the screen already exists and the graphs have changed, the screen items will be updated. + - When the graph IDs have not changed, the screen items won't be updated unless the graph_width and graph_height have changed. + - Delete screen(s) from Zabbix if the screen(s) exists. +version_added: "1.9" +author: Tony Minfei Ding, Harrison Gu +requirements: + - zabbix-api python module +options: + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + timeout: + description: + - The timeout of API request(seconds). + default: 10 + zabbix_screens: + description: + - List of screens to be created/updated/deleted(see example). + - If the screen(s) already been added, the screen(s) name won't be updated. + - When creating or updating screen(s), the screen_name, host_group are required. + - When deleting screen(s), the screen_name is required. + - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated. + required: true + default: null +notes: + - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. +''' + +EXAMPLES = ''' +# Create/update a screen. +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Create/update multi-screen +- name: Create two of new screens or update the existing screens' items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + screens: + - screen_name: ExampleScreen1 + host_group: Example group1 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + - screen_name: ExampleScreen2 + host_group: Example group2 + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + +# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates +- name: Create a new screen or update an existing screen's items + local_action: + module: zabbix_screen + server_url: http://monitor.example.com + login_user: username + login_password: password + state: present + screens: + - screen_name: ExampleScreen + host_group: Example group + state: present + graph_names: + - Example graph1 + - Example graph2 + graph_width: 200 + graph_height: 100 + when: inventory_hostname==groups['group_name'][0] +''' + +from ansible.module_utils.basic import * + +try: + from zabbix_api import ZabbixAPI, ZabbixAPISubClass + from zabbix_api import ZabbixAPIException + from zabbix_api import Already_Exists + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +# Extend the ZabbixAPI +# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call, +# we have to inherit the ZabbixAPI class to add 'screenitem' support. +class ZabbixAPIExtends(ZabbixAPI): + screenitem = None + + def __init__(self, server, timeout, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout) + self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs)) + + +class Screen(object): + def __init__(self, module, zbx): + self._module = module + self._zapi = zbx + + # get group id by group name + def get_host_group_id(self, group_name): + if group_name == "": + self._module.fail_json(msg="group_name is required") + hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}}) + if len(hostGroup_list) < 1: + self._module.fail_json(msg="Host group not found: %s" % group_name) + else: + hostGroup_id = hostGroup_list[0]['groupid'] + return hostGroup_id + + # get monitored host_id by host_group_id + def get_host_ids_by_group_id(self, group_id): + host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1}) + if len(host_list) < 1: + self._module.fail_json(msg="No host in the group.") + else: + host_ids = [] + for i in host_list: + host_id = i['hostid'] + host_ids.append(host_id) + return host_ids + + # get screen + def get_screen_id(self, screen_name): + if screen_name == "": + self._module.fail_json(msg="screen_name is required") + try: + screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}}) + if len(screen_id_list) >= 1: + screen_id = screen_id_list[0]['screenid'] + return screen_id + return None + except Exception as e: + self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e)) + + # create screen + def create_screen(self, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size}) + return screen['screenids'][0] + except Exception as e: + self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e)) + + # update screen + def update_screen(self, screen_id, screen_name, h_size, v_size): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size}) + except Exception as e: + self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e)) + + # delete screen + def delete_screen(self, screen_id, screen_name): + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screen.delete([screen_id]) + except Exception as e: + self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e)) + + # get graph ids + def get_graph_ids(self, hosts, graph_name_list): + graph_id_lists = [] + vsize = 1 + for host in hosts: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + size = len(graph_id_list) + if size > 0: + graph_id_lists.extend(graph_id_list) + if vsize < size: + vsize = size + return graph_id_lists, vsize + + # getGraphs + def get_graphs_by_host_id(self, graph_name_list, host_id): + graph_ids = [] + for graph_name in graph_name_list: + graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id}) + graph_id_list = [] + if len(graphs_list) > 0: + for graph in graphs_list: + graph_id = graph['graphid'] + graph_id_list.append(graph_id) + if len(graph_id_list) > 0: + graph_ids.extend(graph_id_list) + return graph_ids + + # get screen items + def get_screen_items(self, screen_id): + screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id}) + return screen_item_list + + # delete screen items + def delete_screen_items(self, screen_id, screen_item_id_list): + try: + if len(screen_item_id_list) == 0: + return True + screen_item_list = self.get_screen_items(screen_id) + if len(screen_item_list) > 0: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.screenitem.delete(screen_item_id_list) + return True + return False + except ZabbixAPIException: + pass + + # get screen's hsize and vsize + def get_hsize_vsize(self, hosts, v_size): + h_size = len(hosts) + if h_size == 1: + if v_size == 1: + h_size = 1 + elif v_size in range(2, 9): + h_size = 2 + else: + h_size = 3 + v_size = (v_size - 1) / h_size + 1 + return h_size, v_size + + # create screen_items + def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size): + if len(hosts) < 4: + if width is None or width < 0: + width = 500 + else: + if width is None or width < 0: + width = 200 + if height is None or height < 0: + height = 100 + + try: + # when there're only one host, only one row is not good. + if len(hosts) == 1: + graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0]) + for i, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + else: + for i, host in enumerate(hosts): + graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) + for j, graph_id in enumerate(graph_id_list): + if graph_id is not None: + self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, + 'width': width, 'height': height, + 'x': i, 'y': j, 'colspan': 1, 'rowspan': 1, + 'elements': 0, 'valign': 0, 'halign': 0, + 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) + except Already_Exists: + pass + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True, default=None, aliases=['url']), + login_user=dict(required=True), + login_password=dict(required=True), + timeout=dict(default=10), + screens=dict(required=True) + ), + supports_check_mode=True + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + server_url = module.params['server_url'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + timeout = module.params['timeout'] + screens = module.params['screens'] + + zbx = None + # login to zabbix + try: + zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx.login(login_user, login_password) + except Exception, e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + screen = Screen(module, zbx) + created_screens = [] + changed_screens = [] + deleted_screens = [] + + for zabbix_screen in screens: + screen_name = zabbix_screen['screen_name'] + screen_id = screen.get_screen_id(screen_name) + state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present" + + if state == "absent": + if screen_id: + screen_item_list = screen.get_screen_items(screen_id) + screen_item_id_list = [] + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + screen_item_id_list.append(screen_item_id) + screen.delete_screen_items(screen_id, screen_item_id_list) + screen.delete_screen(screen_id, screen_name) + + deleted_screens.append(screen_name) + else: + host_group = zabbix_screen['host_group'] + graph_names = zabbix_screen['graph_names'] + graph_width = None + if 'graph_width' in zabbix_screen: + graph_width = zabbix_screen['graph_width'] + graph_height = None + if 'graph_height' in zabbix_screen: + graph_height = zabbix_screen['graph_height'] + host_group_id = screen.get_host_group_id(host_group) + hosts = screen.get_host_ids_by_group_id(host_group_id) + + screen_item_id_list = [] + resource_id_list = [] + + graph_ids, v_size = screen.get_graph_ids(hosts, graph_names) + h_size, v_size = screen.get_hsize_vsize(hosts, v_size) + + if not screen_id: + # create screen + screen_id = screen.create_screen(screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + created_screens.append(screen_name) + else: + screen_item_list = screen.get_screen_items(screen_id) + + for screen_item in screen_item_list: + screen_item_id = screen_item['screenitemid'] + resource_id = screen_item['resourceid'] + screen_item_id_list.append(screen_item_id) + resource_id_list.append(resource_id) + + # when the screen items changed, then update + if graph_ids != resource_id_list: + deleted = screen.delete_screen_items(screen_id, screen_item_id_list) + if deleted: + screen.update_screen(screen_id, screen_name, h_size, v_size) + screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) + changed_screens.append(screen_name) + + if created_screens and changed_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens))) + elif created_screens: + module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens)) + elif changed_screens: + module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens)) + elif deleted_screens: + module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens)) + else: + module.exit_json(changed=False) + +# <> +main() From c08ce5b30dc16be4d97a9cba36f01186310980b4 Mon Sep 17 00:00:00 2001 From: Tyler Kellen Date: Sat, 28 Mar 2015 13:20:30 -0600 Subject: [PATCH 149/224] correct version_added for known_hosts It was added in 1.9, not 1.6. --- system/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index d4a6e9c35e0..893eca3dcb7 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -26,7 +26,7 @@ description: - The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file. This is useful if you're going to want to use the M(git) module over ssh, for example. If you have a very large number of host keys to manage, you will find the M(template) module more useful. -version_added: "1.6" +version_added: "1.9" options: name: aliases: [ 'host' ] From 7c41002d933020cd781a12b7428e13ac20432864 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 29 Mar 2015 13:37:15 -0700 Subject: [PATCH 150/224] remove superfluous defaults --- monitoring/zabbix_host.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 0d3cc8e661f..a425afda1cd 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -37,18 +37,15 @@ options: - Url of Zabbix server, with protocol (http or https). C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - Technical name of the host. @@ -340,7 +337,7 @@ class Host(object): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), host_name=dict(required=True), From 636e96fafc567cf615816f7207242be14e96e03b Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 29 Mar 2015 13:41:07 -0700 Subject: [PATCH 151/224] remove superfluous defaults --- monitoring/zabbix_hostmacro.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index 6cd95b714f8..f0f1a7efdd1 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -37,18 +37,15 @@ options: - Url of Zabbix server, with protocol (http or https). C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - Technical name of the host. @@ -174,7 +171,7 @@ class HostMacro(object): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), host_name=dict(required=True), From 32878110d07e43511b4d631a21cb433840def816 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 29 Mar 2015 13:41:52 -0700 Subject: [PATCH 152/224] remove superfluous defaults --- monitoring/zabbix_screen.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 06e336ec368..ff69d24ea4e 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -39,18 +39,15 @@ options: - Url of Zabbix server, with protocol (http or https). C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null timeout: description: - The timeout of API request(seconds). @@ -63,7 +60,6 @@ options: - When deleting screen(s), the screen_name is required. - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated. required: true - default: null notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. ''' @@ -321,7 +317,7 @@ class Screen(object): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(required=True, aliases=['url']), login_user=dict(required=True), login_password=dict(required=True), timeout=dict(default=10), From 510b77ca0e7b72940a2f0acd45c0df2a7a524e26 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 29 Mar 2015 16:51:30 -0400 Subject: [PATCH 153/224] minor documentation fixes --- monitoring/zabbix_host.py | 29 +++++++++++------------------ monitoring/zabbix_hostmacro.py | 18 +++++------------- monitoring/zabbix_screen.py | 14 +++----------- notification/pushover | 4 ++-- 4 files changed, 21 insertions(+), 44 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 0d3cc8e661f..63b3178e1ac 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -24,10 +24,8 @@ DOCUMENTATION = ''' module: zabbix_host short_description: Zabbix host creates/updates/deletes description: - - When the host does not exists, a new host will be created, added to any host groups and linked to any templates. - - When the host already exists, the host group membership will be updated, along with the template links and interfaces. - - Delete a host from Zabbix if the host exists. -version_added: "1.9" + - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. +version_added: "2.0" author: Tony Minfei Ding, Harrison Gu requirements: - zabbix-api python module @@ -35,44 +33,38 @@ options: server_url: description: - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - - Zabbix user name. + - Zabbix user name, used to authenticate against the server. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - - Technical name of the host. - - If the host has already been added, the host name won't be updated. + - Name of the host in Zabbix. + - host_name is the unique identifier used and cannot be updated using this module. required: true host_groups: description: - - List of host groups to add the host to. + - List of host groups the host is part of. required: false link_templates: description: - - List of templates to be linked to the host. + - List of templates linked to the host. required: false default: None status: description: - - Status and function of the host. - - Possible values are: enabled and disabled + - 'Monitoring status of the host. Possible values are: "enabled" and "disabled".' required: false default: "enabled" state: description: - - create/update or delete host. - - Possible values are: present and absent. If the host already exists, and the state is "present", just to update the host. + - 'Possible values are: "present" and "absent". If the host already exists, and the state is "present", it will just to update the host is the associated data is different. "absent" will remove a host if it exists.' required: false default: "present" timeout: @@ -81,11 +73,12 @@ options: default: 10 interfaces: description: - - List of interfaces to be created for the host (see example). + - List of interfaces to be created for the host (see example below). - Available values are: dns, ip, main, port, type and useip. - Please review the interface documentation for more information on the supported properties: - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface required: false + default: [] ''' EXAMPLES = ''' diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index 6cd95b714f8..871a974f413 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -24,10 +24,8 @@ DOCUMENTATION = ''' module: zabbix_hostmacro short_description: Zabbix host macro creates/updates/deletes description: - - When the host macro does not exists, a new macro will be created, added to specific host. - - When the host macro already exists, the value will be updated. - - Delete a host macro from Zabbix if the macro exists. -version_added: "1.9" + - manages Zabbix host macros, it can create, update or delete them. +version_added: "2.0" author: Dean Hailin Song requirements: - zabbix-api python module @@ -35,28 +33,23 @@ options: server_url: description: - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null host_name: description: - - Technical name of the host. - - If the host has already been added, the host name won't be updated. + - Name of the host. required: true macro_name: description: - - Technical name of the host macro. + - Name of the host macro. required: true macro_value: description: @@ -64,8 +57,7 @@ options: required: true state: description: - - create/update or delete macro. - - Possible values are: present and absent. If the macro already exists, and the state is "present", just to update the macro. + - 'Possible values are: "present" and "absent". If the macro already exists, and the state is "present", it will just to update the macro if needed.' required: false default: "present" timeout: diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 06e336ec368..b0a886a2c0c 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -25,11 +25,8 @@ DOCUMENTATION = ''' module: zabbix_screen short_description: Zabbix screen creates/updates/deletes description: - - When the screen does not exists, a new screen will be created with any screen items specified. - - When the screen already exists and the graphs have changed, the screen items will be updated. - - When the graph IDs have not changed, the screen items won't be updated unless the graph_width and graph_height have changed. - - Delete screen(s) from Zabbix if the screen(s) exists. -version_added: "1.9" + - This module allows you to create, modify and delete Zabbix screens and associated graph data. +version_added: "2.0" author: Tony Minfei Ding, Harrison Gu requirements: - zabbix-api python module @@ -37,20 +34,16 @@ options: server_url: description: - Url of Zabbix server, with protocol (http or https). - C(url) is an alias for C(server_url). required: true - default: null aliases: [ "url" ] login_user: description: - Zabbix user name. required: true - default: null login_password: description: - Zabbix user password. required: true - default: null timeout: description: - The timeout of API request(seconds). @@ -61,9 +54,8 @@ options: - If the screen(s) already been added, the screen(s) name won't be updated. - When creating or updating screen(s), the screen_name, host_group are required. - When deleting screen(s), the screen_name is required. - - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated. + - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed. required: true - default: null notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. ''' diff --git a/notification/pushover b/notification/pushover index 8e9d2596d43..3e710ca02dd 100644 --- a/notification/pushover +++ b/notification/pushover @@ -27,10 +27,10 @@ version_added: "2.0" short_description: Send notifications via u(https://pushover.net) description: - Send notifications via pushover, to subscriber list of devices, and email - addresses. Requires pushover app on devices. + addresses. Requires pushover app on devices. notes: - You will require a pushover.net account to use this module. But no account - is required to receive messages. + is required to receive messages. options: msg: description: From 231ed6208da21c2b7436ba789e62a7ccd654f7a9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 29 Mar 2015 17:05:53 -0400 Subject: [PATCH 154/224] removed requirements which was causing crash with dupe from shared doc fragments --- cloud/amazon/cloudtrail.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 777f1df846c..b58bcd6e1d0 100755 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -22,7 +22,6 @@ description: - Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21. version_added: "2.0" author: Ted Timmons -requirements: ["boto"] options: state: description: From 4c0b1b42bc79634052b69557fc804f13e9557669 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Mar 2015 20:15:40 -0400 Subject: [PATCH 155/224] minor fixes to gluster: - removed functions from main scope - renamed rebalance function to disambiguate from variable - updated docs with defaults - added exception handling to command execution --- system/gluster_volume.py | 317 ++++++++++++++++++++------------------- 1 file changed, 165 insertions(+), 152 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index d51512a1436..e78b1a1bfaa 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -38,27 +38,33 @@ options: use started/stopped to control it's availability. cluster: required: false + default: null description: - List of hosts to use for probing and brick setup host: required: false + default: null description: - Override local hostname (for peer probing purposes) replicas: required: false + default: null description: - Replica count for volume stripes: required: false + default: null description: - Stripe count for volume transport: required: false choices: [ 'tcp', 'rdma', 'tcp,rdma' ] + default: 'tcp' description: - Transport type for volume brick: required: false + default: null description: - Brick path on servers start_on_create: @@ -69,22 +75,27 @@ options: rebalance: choices: [ 'yes', 'no'] required: false + default: 'no' description: - Controls whether the cluster is rebalanced after changes directory: required: false + default: null description: - Directory for limit-usage options: required: false + default: null description: - A dictionary/hash with options/settings for the volume quota: required: false + default: null description: - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list) force: required: false + default: null description: - If brick is being created in the root partition, module will fail. Set force to true to override this behaviour @@ -119,165 +130,167 @@ import shutil import time import socket -def main(): - - def run_gluster(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) +def run_gluster(gargs, **kwargs): + args = [glusterbin] + args.extend(gargs) + try: rc, out, err = module.run_command(args, **kwargs) if rc != 0: module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def run_gluster_nofail(gargs, **kwargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, **kwargs) - if rc != 0: - return None - return out - - def run_gluster_yes(gargs): - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, data='y\n') - if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) - return out - - def get_peers(): - out = run_gluster([ 'peer', 'status']) - i = 0 - peers = {} - hostname = None - uuid = None - state = None - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'hostname': - hostname = value - if key.lower() == 'uuid': - uuid = value - if key.lower() == 'state': - state = value - peers[hostname] = [ uuid, state ] - return peers - - def get_volumes(): - out = run_gluster([ 'volume', 'info' ]) - - volumes = {} - volume = {} - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'volume name': - volume['name'] = value + except Exception, e: + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)) + return out + +def run_gluster_nofail(gargs, **kwargs): + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, **kwargs) + if rc != 0: + return None + return out + +def run_gluster_yes(gargs): + args = [glusterbin] + args.extend(gargs) + rc, out, err = module.run_command(args, data='y\n') + if rc != 0: + module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) + return out + +def get_peers(): + out = run_gluster([ 'peer', 'status']) + i = 0 + peers = {} + hostname = None + uuid = None + state = None + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'hostname': + hostname = value + if key.lower() == 'uuid': + uuid = value + if key.lower() == 'state': + state = value + peers[hostname] = [ uuid, state ] + return peers + +def get_volumes(): + out = run_gluster([ 'volume', 'info' ]) + + volumes = {} + volume = {} + for row in out.split('\n'): + if ': ' in row: + key, value = row.split(': ') + if key.lower() == 'volume name': + volume['name'] = value + volume['options'] = {} + volume['quota'] = False + if key.lower() == 'volume id': + volume['id'] = value + if key.lower() == 'status': + volume['status'] = value + if key.lower() == 'transport-type': + volume['transport'] = value + if key.lower() != 'bricks' and key.lower()[:5] == 'brick': + if not 'bricks' in volume: + volume['bricks'] = [] + volume['bricks'].append(value) + # Volume options + if '.' in key: + if not 'options' in volume: volume['options'] = {} - volume['quota'] = False - if key.lower() == 'volume id': - volume['id'] = value - if key.lower() == 'status': - volume['status'] = value - if key.lower() == 'transport-type': - volume['transport'] = value - if key.lower() != 'bricks' and key.lower()[:5] == 'brick': - if not 'bricks' in volume: - volume['bricks'] = [] - volume['bricks'].append(value) - # Volume options - if '.' in key: - if not 'options' in volume: - volume['options'] = {} - volume['options'][key] = value - if key == 'features.quota' and value == 'on': - volume['quota'] = True - else: - if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': - if len(volume) > 0: - volumes[volume['name']] = volume - volume = {} - return volumes - - def get_quotas(name, nofail): - quotas = {} - if nofail: - out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) - if not out: - return quotas + volume['options'][key] = value + if key == 'features.quota' and value == 'on': + volume['quota'] = True else: - out = run_gluster([ 'volume', 'quota', name, 'list' ]) - for row in out.split('\n'): - if row[:1] == '/': - q = re.split('\s+', row) - quotas[q[0]] = q[1] - return quotas - - def wait_for_peer(host): - for x in range(0, 4): - peers = get_peers() - if host in peers and peers[host][1].lower().find('peer in cluster') != -1: - return True - time.sleep(1) - return False - - def probe(host): - run_gluster([ 'peer', 'probe', host ]) - if not wait_for_peer(host): - module.fail_json(msg='failed to probe peer %s' % host) - changed = True - - def probe_all_peers(hosts, peers, myhostname): - for host in hosts: - if host not in peers: - # dont probe ourselves - if myhostname != host: - probe(host) - - def create_volume(name, stripe, replica, transport, hosts, brick, force): - args = [ 'volume', 'create' ] - args.append(name) - if stripe: - args.append('stripe') - args.append(str(stripe)) - if replica: - args.append('replica') - args.append(str(replica)) - args.append('transport') - args.append(transport) - for host in hosts: - args.append(('%s:%s' % (host, brick))) - if force: - args.append('force') - run_gluster(args) - - def start_volume(name): - run_gluster([ 'volume', 'start', name ]) - - def stop_volume(name): - run_gluster_yes([ 'volume', 'stop', name ]) - - def set_volume_option(name, option, parameter): - run_gluster([ 'volume', 'set', name, option, parameter ]) - - def add_brick(name, brick, force): - args = [ 'volume', 'add-brick', name, brick ] - if force: - args.append('force') - run_gluster(args) - - def rebalance(name): - run_gluster(['volume', 'rebalance', name, 'start']) - - def enable_quota(name): - run_gluster([ 'volume', 'quota', name, 'enable' ]) - - def set_quota(name, directory, value): - run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) + if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': + if len(volume) > 0: + volumes[volume['name']] = volume + volume = {} + return volumes +def get_quotas(name, nofail): + quotas = {} + if nofail: + out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ]) + if not out: + return quotas + else: + out = run_gluster([ 'volume', 'quota', name, 'list' ]) + for row in out.split('\n'): + if row[:1] == '/': + q = re.split('\s+', row) + quotas[q[0]] = q[1] + return quotas + +def wait_for_peer(host): + for x in range(0, 4): + peers = get_peers() + if host in peers and peers[host][1].lower().find('peer in cluster') != -1: + return True + time.sleep(1) + return False + +def probe(host): + run_gluster([ 'peer', 'probe', host ]) + if not wait_for_peer(host): + module.fail_json(msg='failed to probe peer %s' % host) + changed = True + +def probe_all_peers(hosts, peers, myhostname): + for host in hosts: + if host not in peers: + # dont probe ourselves + if myhostname != host: + probe(host) + +def create_volume(name, stripe, replica, transport, hosts, brick, force): + args = [ 'volume', 'create' ] + args.append(name) + if stripe: + args.append('stripe') + args.append(str(stripe)) + if replica: + args.append('replica') + args.append(str(replica)) + args.append('transport') + args.append(transport) + for host in hosts: + args.append(('%s:%s' % (host, brick))) + if force: + args.append('force') + run_gluster(args) + +def start_volume(name): + run_gluster([ 'volume', 'start', name ]) + +def stop_volume(name): + run_gluster_yes([ 'volume', 'stop', name ]) + +def set_volume_option(name, option, parameter): + run_gluster([ 'volume', 'set', name, option, parameter ]) + +def add_brick(name, brick, force): + args = [ 'volume', 'add-brick', name, brick ] + if force: + args.append('force') + run_gluster(args) + +def do_rebalance(name): + run_gluster(['volume', 'rebalance', name, 'start']) + +def enable_quota(name): + run_gluster([ 'volume', 'quota', name, 'enable' ]) + +def set_quota(name, directory, value): + run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) + +def main(): ### MAIN ### module = AnsibleModule( @@ -403,7 +416,7 @@ def main(): if changed: volumes = get_volumes() if rebalance: - rebalance(volume_name) + do_rebalance(volume_name) facts = {} facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas } From ee7fbcf418a77cb8d840d8ed9c567b179212be4a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Mar 2015 22:11:17 -0400 Subject: [PATCH 156/224] minor fix to method of finding home as previous could 'overmatch' --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 1ae67bf23c6..8c708dc31cd 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -398,7 +398,7 @@ LXC_ANSIBLE_STATES = { # home directory of the user that was attached to the container and source # that users environment variables by default. ATTACH_TEMPLATE = """#!/usr/bin/env bash -pushd "$(grep $(whoami) /etc/passwd | awk -F':' '{print $6}')" +pushd "$(getent passwd $(whoami)|cut -f6 -d':')" if [[ -f ".bashrc" ]];then source .bashrc fi From 7794042cf65b075c9ca9bf4248df994bff94401f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Mar 2015 22:30:58 -0400 Subject: [PATCH 157/224] fixed missing parens --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index e78b1a1bfaa..1669dddb81c 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -139,7 +139,7 @@ def run_gluster(gargs, **kwargs): if rc != 0: module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out if out != '' else err)) except Exception, e: - module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)) + module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e))) return out def run_gluster_nofail(gargs, **kwargs): From 58aab881c29587a5795b6721c9b199d148fceb84 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 12:12:19 +0200 Subject: [PATCH 158/224] cloudstack: add new module cloudstack_fw --- cloud/cloudstack/__init__.py | 0 cloud/cloudstack/cloudstack_fw.py | 267 ++++++++++++++++++++++++++++++ 2 files changed, 267 insertions(+) create mode 100644 cloud/cloudstack/__init__.py create mode 100644 cloud/cloudstack/cloudstack_fw.py diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cloudstack_fw.py b/cloud/cloudstack/cloudstack_fw.py new file mode 100644 index 00000000000..0014f433c47 --- /dev/null +++ b/cloud/cloudstack/cloudstack_fw.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +module: cloudstack_fw +short_description: Manages firewall rules on Apache CloudStack based clouds. +description: Creates and removes firewall rules. +version_added: '2.0' +author: René Moser +options: + ip_address: + description: + - Public IP address the rule is assigned to. + required: true + state: + description: + - State of the firewall rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the firewall rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp' ] + cidr: + description: + - CIDR (full notation) to be used for firewall rule. + required: false + default: '0.0.0.0\0' + start_port + description: + - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + end_port + description: + - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + icmp_type + description: + - Type of the icmp message being sent. Considered if C(protocol=icmp). + required: false + default: null + icmp_code + description: + - Error code for this icmp message. Considered if C(protocol=icmp). + required: false + default: null + project: + description: + - Name of the project. + required: false + default: null +''' + +EXAMPLES = ''' +--- +# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 +- local_action: + module: cloudstack_fw + ip_address: 4.3.2.1 + start_port: 80 + end_port: 80 + cidr: 1.2.3.4/32 + + +# Allow inbound tcp/udp port 53 to 4.3.2.1 +- local_action: + module: cloudstack_fw + ip_address: 4.3.2.1 + start_port: 53 + end_port: 53 + protocol: '{{ item }}' + with_items: + - tcp + - udp + + +# Ensure firewall rule is removed +- local_action: + module: cloudstack_fw + ip_address: 4.3.2.1 + start_port: 8000 + end_port: 8888 + cidr: 17.0.0.0/8 + state: absent +''' + +RETURN = ''' +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackFirewall(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.firewall_rule = None + + + def get_firewall_rule(self): + if not self.firewall_rule: + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.module.params.get('end_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and not icmp_type: + self.module.fail_json(msg="no icmp_type set") + + args = {} + args['ipaddressid'] = self.get_ip_address_id() + args['projectid'] = self.get_project_id() + + firewall_rules = self.cs.listFirewallRules(**args) + if firewall_rules and 'firewallrule' in firewall_rules: + for rule in firewall_rules['firewallrule']: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) + + if type_match and protocol_match: + self.firewall_rule = rule + break + return self.firewall_rule + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == rule['icmpcode'] \ + and icmp_type == rule['icmptype'] + + + def _type_cidr_match(self, rule, cidr): + return cidr == rule['cidrlist'] + + + def create_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if not firewall_rule: + self.result['changed'] = True + args = {} + args['cidrlist'] = self.module.params.get('cidr') + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.module.params.get('end_port') + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['ipaddressid'] = self.get_ip_address_id() + + if not self.module.check_mode: + firewall_rule = self.cs.createFirewallRule(**args) + + return firewall_rule + + + def remove_firewall_rule(self): + firewall_rule = self.get_firewall_rule() + if firewall_rule: + self.result['changed'] = True + args = {} + args['id'] = firewall_rule['id'] + + if not self.module.check_mode: + res = self.cs.deleteFirewallRule(**args) + + return firewall_rule + + + def get_result(self, firewall_rule): + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + ip_address = dict(required=True, default=None), + cidr = dict(default='0.0.0.0/0'), + protocol = dict(choices=['tcp', 'udp', 'icmp'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + required_together = ( + ['start_port', 'end_port'], + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_fw = AnsibleCloudStackFirewall(module) + + state = module.params.get('state') + if state in ['absent']: + fw_rule = acs_fw.remove_firewall_rule() + else: + fw_rule = acs_fw.create_firewall_rule() + + result = acs_fw.get_result(fw_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 9f85ae16ae81fc6fb8de71ab652771b1e2808143 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 13:33:57 +0200 Subject: [PATCH 159/224] cloudstack: add new module cloudstack_iso --- cloud/cloudstack/__init__.py | 0 cloud/cloudstack/cloudstack_iso.py | 322 +++++++++++++++++++++++++++++ 2 files changed, 322 insertions(+) create mode 100644 cloud/cloudstack/__init__.py create mode 100644 cloud/cloudstack/cloudstack_iso.py diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cloudstack_iso.py b/cloud/cloudstack/cloudstack_iso.py new file mode 100644 index 00000000000..bd90c427ea4 --- /dev/null +++ b/cloud/cloudstack/cloudstack_iso.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cloudstack_iso +short_description: Manages ISOs images on Apache CloudStack based clouds. +description: Register and remove ISO images. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the ISO. + required: true + url: + description: + - URL where the ISO can be downloaded from. Required if C(state) is present. + required: false + default: null + os_type: + description: + - Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present. + required: false + default: null + is_ready: + description: + - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false). + required: false + default: false + aliases: [] + is_public: + description: + - Register the ISO to be publicly available to all users. Only used if C(state) is present. + required: false + default: false + is_featured: + description: + - Register the ISO to be featured. Only used if C(state) is present. + required: false + default: false + is_dynamically_scalable: + description: + - Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present. + required: false + default: false + aliases: [] + checksum: + description: + - The MD5 checksum value of this ISO. If set, we search by checksum instead of name. + required: false + default: false + bootable: + description: + - Register the ISO to be bootable. Only used if C(state) is present. + required: false + default: true + project: + description: + - Name of the project the ISO to be registered in. + required: false + default: null + zone: + description: + - Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used. + required: false + default: null + iso_filter: + description: + - Name of the filter used to search for the ISO. + required: false + default: 'self' + choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ] + state: + description: + - State of the ISO. + required: false + default: 'present' + choices: [ 'present', 'absent' ] +''' + +EXAMPLES = ''' +--- +# Register an ISO if ISO name does not already exist. +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: Debian GNU/Linux 7(64-bit) + + +# Register an ISO with given name if ISO md5 checksum does not already exist. +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso + os_type: + checksum: 0b31bccccb048d20b551f70830bb7ad0 + + +# Remove an ISO by name +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + state: absent + + +# Remove an ISO by checksum +- local_action: + module: cloudstack_iso + name: Debian 7 64-bit + checksum: 0b31bccccb048d20b551f70830bb7ad0 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the ISO. + returned: success + type: string + sample: Debian 7 64-bit +displaytext: + description: Text to be displayed of the ISO. + returned: success + type: string + sample: Debian 7.7 64-bit minimal 2015-03-19 +zone: + description: Name of zone the ISO is registered in. + returned: success + type: string + sample: zuerich +status: + description: Status of the ISO. + returned: success + type: string + sample: Successfully Installed +is_ready: + description: True if the ISO is ready to be deployed from. + returned: success + type: boolean + sample: true +checksum: + description: MD5 checksum of the ISO. + returned: success + type: string + sample: 0b31bccccb048d20b551f70830bb7ad0 +created: + description: Date of registering. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackIso(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.iso = None + + def register_iso(self): + iso = self.get_iso() + if not iso: + args = {} + args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project_id() + + args['bootable'] = self.module.params.get('bootable') + args['ostypeid'] = self.get_os_type_id() + if args['bootable'] and not args['ostypeid']: + self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.") + + args['url'] = self.module.params.get('url') + if not args['url']: + self.module.fail_json(msg="URL is requried.") + + args['name'] = self.module.params.get('name') + args['displaytext'] = self.module.params.get('name') + args['checksum'] = self.module.params.get('checksum') + args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable') + args['isfeatured'] = self.module.params.get('is_featured') + args['ispublic'] = self.module.params.get('is_public') + + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.registerIso(**args) + iso = res['iso'][0] + return iso + + + def get_iso(self): + if not self.iso: + args = {} + args['isready'] = self.module.params.get('is_ready') + args['isofilter'] = self.module.params.get('iso_filter') + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + + # if checksum is set, we only look on that. + checksum = self.module.params.get('checksum') + if not checksum: + args['name'] = self.module.params.get('name') + + isos = self.cs.listIsos(**args) + if isos: + if not checksum: + self.iso = isos['iso'][0] + else: + for i in isos['iso']: + if i['checksum'] == checksum: + self.iso = i + break + return self.iso + + + def remove_iso(self): + iso = self.get_iso() + if iso: + self.result['changed'] = True + args = {} + args['id'] = iso['id'] + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + if not self.module.check_mode: + res = self.cs.deleteIso(**args) + return iso + + + def get_result(self, iso): + if iso: + if 'displaytext' in iso: + self.result['displaytext'] = iso['displaytext'] + if 'name' in iso: + self.result['name'] = iso['name'] + if 'zonename' in iso: + self.result['zone'] = iso['zonename'] + if 'checksum' in iso: + self.result['checksum'] = iso['checksum'] + if 'status' in iso: + self.result['status'] = iso['status'] + if 'isready' in iso: + self.result['is_ready'] = iso['isready'] + if 'created' in iso: + self.result['created'] = iso['created'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + url = dict(default=None), + os_type = dict(default=None), + zone = dict(default=None), + iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]), + project = dict(default=None), + checksum = dict(default=None), + is_ready = dict(choices=BOOLEANS, default=False), + bootable = dict(choices=BOOLEANS, default=True), + is_featured = dict(choices=BOOLEANS, default=False), + is_dynamically_scalable = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_iso = AnsibleCloudStackIso(module) + + state = module.params.get('state') + if state in ['absent']: + iso = acs_iso.remove_iso() + else: + iso = acs_iso.register_iso() + + result = acs_iso.get_result(iso) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 60467738edba9ea50578da38092ef7d6063495a4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 26 Mar 2015 19:52:36 +0100 Subject: [PATCH 160/224] cloudstack: add new module cloudstack_sshkey This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/__init__.py | 0 cloud/cloudstack/cloudstack_sshkey.py | 210 ++++++++++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 cloud/cloudstack/__init__.py create mode 100644 cloud/cloudstack/cloudstack_sshkey.py diff --git a/cloud/cloudstack/__init__.py b/cloud/cloudstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py new file mode 100644 index 00000000000..414ded6c971 --- /dev/null +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cloudstack_sshkey +short_description: Manages SSH keys on Apache CloudStack based clouds. +description: + - If no public key is provided, a new ssh private/public key pair will be + created and the private key will be returned. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of public key. + required: true + default: null + aliases: [] + project: + description: + - Name of the project the public key to be registered in. + required: false + default: null + aliases: [] + state: + description: + - State of the public key. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + aliases: [] + public_key: + description: + - String of the public key. + required: false + default: null + aliases: [] +''' + +EXAMPLES = ''' +--- +# create a new private / public key pair: +- local_action: cloudstack_sshkey name=linus@example.com + register: key +- debug: msg='private key is {{ key.private_key }}' + +# remove a public key by its name: +- local_action: cloudstack_sshkey name=linus@example.com state=absent + +# register your existing local public key: +- local_action: cloudstack_sshkey name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' +''' + +RETURN = ''' +--- +name: + description: Name of the SSH public key. + returned: success + type: string + sample: linus@example.com +fingerprint: + description: Fingerprint of the SSH public key. + returned: success + type: string + sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28" +private_key: + description: Private key of generated SSH keypair. + returned: changed + type: string + sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n" +''' + + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackSshKey(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.ssh_key = None + + + def register_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + self.result['changed'] = True + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + args['publickey'] = self.module.params.get('public_key') + if not self.module.check_mode: + ssh_key = self.cs.registerSSHKeyPair(**args) + return ssh_key + + + def create_ssh_key(self): + ssh_key = self.get_ssh_key() + if not ssh_key: + self.result['changed'] = True + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.cs.createSSHKeyPair(**args) + ssh_key = res['keypair'] + return ssh_key + + + def remove_ssh_key(self): + ssh_key = self.get_ssh_key() + if ssh_key: + self.result['changed'] = True + args = {} + args['name'] = self.module.params.get('name') + if not self.module.check_mode: + res = self.cs.deleteSSHKeyPair(**args) + return ssh_key + + + def get_ssh_key(self): + if not self.ssh_key: + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + ssh_keys = self.cs.listSSHKeyPairs(**args) + if ssh_keys and 'sshkeypair' in ssh_keys: + self.ssh_key = ssh_keys['sshkeypair'][0] + return self.ssh_key + + + def get_result(self, ssh_key): + if ssh_key: + if 'fingerprint' in ssh_key: + self.result['fingerprint'] = ssh_key['fingerprint'] + + if 'name' in ssh_key: + self.result['name'] = ssh_key['name'] + + if 'privatekey' in ssh_key: + self.result['private_key'] = ssh_key['privatekey'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, default=None), + public_key = dict(default=None), + project = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sshkey = AnsibleCloudStackSshKey(module) + state = module.params.get('state') + if state in ['absent']: + ssh_key = acs_sshkey.remove_ssh_key() + else: + if module.params.get('public_key'): + ssh_key = acs_sshkey.register_ssh_key() + else: + ssh_key = acs_sshkey.create_ssh_key() + + result = acs_sshkey.get_result(ssh_key) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 82e25447adeab1c7d464e64119b594c5386506f8 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 10:58:02 +0100 Subject: [PATCH 161/224] cloudstack_ssh: fix missing projectid if state=absent --- cloud/cloudstack/cloudstack_sshkey.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 414ded6c971..97d6a222f09 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -139,6 +139,7 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): self.result['changed'] = True args = {} args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() if not self.module.check_mode: res = self.cs.deleteSSHKeyPair(**args) return ssh_key From bf32de8d8f705692662108759c5d2077baf6ddba Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 22:07:39 +0100 Subject: [PATCH 162/224] cloudstack_ssh: register_ssh_key() set public_key as param --- cloud/cloudstack/cloudstack_sshkey.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 97d6a222f09..589e2783913 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -107,14 +107,14 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): self.ssh_key = None - def register_ssh_key(self): + def register_ssh_key(self, public_key): ssh_key = self.get_ssh_key() if not ssh_key: self.result['changed'] = True args = {} args['projectid'] = self.get_project_id() args['name'] = self.module.params.get('name') - args['publickey'] = self.module.params.get('public_key') + args['publickey'] = public_key if not self.module.check_mode: ssh_key = self.cs.registerSSHKeyPair(**args) return ssh_key @@ -194,8 +194,9 @@ def main(): if state in ['absent']: ssh_key = acs_sshkey.remove_ssh_key() else: - if module.params.get('public_key'): - ssh_key = acs_sshkey.register_ssh_key() + public_key = module.params.get('public_key') + if public_key: + ssh_key = acs_sshkey.register_ssh_key(public_key) else: ssh_key = acs_sshkey.create_ssh_key() From a24d691419c41f0e64b95d3560237f3829340917 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 22:09:21 +0100 Subject: [PATCH 163/224] cloudstack_ssh: update description --- cloud/cloudstack/cloudstack_sshkey.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 589e2783913..7e803be02e5 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -23,8 +23,8 @@ DOCUMENTATION = ''' module: cloudstack_sshkey short_description: Manages SSH keys on Apache CloudStack based clouds. description: - - If no public key is provided, a new ssh private/public key pair will be - created and the private key will be returned. + - If no key was found and no public key was provided and a new SSH + private/public key pair will be created and the private key will be returned. version_added: '2.0' author: René Moser options: From c03baa7ec64a6ae6acbd4176c24d1e757b88c42b Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 28 Mar 2015 22:12:19 +0100 Subject: [PATCH 164/224] cloudstack_ssh: replace ssh public key if fingerprints do not match --- cloud/cloudstack/cloudstack_sshkey.py | 38 ++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 7e803be02e5..4f63a9d566b 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -95,6 +95,12 @@ try: except ImportError: has_lib_cs = False +try: + import sshpubkeys + has_lib_sshpubkeys = True +except ImportError: + has_lib_sshpubkeys = False + from ansible.module_utils.cloudstack import * class AnsibleCloudStackSshKey(AnsibleCloudStack): @@ -109,14 +115,30 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): def register_ssh_key(self, public_key): ssh_key = self.get_ssh_key() + + args = {} + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + res = None if not ssh_key: self.result['changed'] = True - args = {} - args['projectid'] = self.get_project_id() - args['name'] = self.module.params.get('name') args['publickey'] = public_key if not self.module.check_mode: - ssh_key = self.cs.registerSSHKeyPair(**args) + res = self.cs.registerSSHKeyPair(**args) + + else: + fingerprint = self._get_ssh_fingerprint(public_key) + if ssh_key['fingerprint'] != fingerprint: + self.result['changed'] = True + if not self.module.check_mode: + self.cs.deleteSSHKeyPair(**args) + args['publickey'] = public_key + res = self.cs.registerSSHKeyPair(**args) + + if res and 'keypair' in res: + ssh_key = res['keypair'] + return ssh_key @@ -170,6 +192,11 @@ class AnsibleCloudStackSshKey(AnsibleCloudStack): return self.result + def _get_ssh_fingerprint(self, public_key): + key = sshpubkeys.SSHKey(public_key) + return key.hash() + + def main(): module = AnsibleModule( argument_spec = dict( @@ -188,6 +215,9 @@ def main(): if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") + if not has_lib_sshpubkeys: + module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys") + try: acs_sshkey = AnsibleCloudStackSshKey(module) state = module.params.get('state') From 392feaea63f845f53c02f70ea4a7dd3e723f3ed9 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 11:55:39 +0200 Subject: [PATCH 165/224] cloudstack_sshkey: cleanup docs --- cloud/cloudstack/cloudstack_sshkey.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cloudstack_sshkey.py index 4f63a9d566b..657e367fefe 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cloudstack_sshkey.py @@ -32,27 +32,22 @@ options: description: - Name of public key. required: true - default: null - aliases: [] project: description: - Name of the project the public key to be registered in. required: false default: null - aliases: [] state: description: - State of the public key. required: false default: 'present' choices: [ 'present', 'absent' ] - aliases: [] public_key: description: - String of the public key. required: false default: null - aliases: [] ''' EXAMPLES = ''' From 39cff86e7b8161ac1ed6316059dd50965a2f1e1d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:33:59 -0400 Subject: [PATCH 166/224] fixed doc issues --- monitoring/zabbix_host.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 1c900a894e4..c7b8e52b9e7 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -74,8 +74,8 @@ options: interfaces: description: - List of interfaces to be created for the host (see example below). - - Available values are: dns, ip, main, port, type and useip. - - Please review the interface documentation for more information on the supported properties: + - 'Available values are: dns, ip, main, port, type and useip.' + - Please review the interface documentation for more information on the supported properties - https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface required: false default: [] From 1754c7a1cad03fdc359823d94b1627039bea45f9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:36:51 -0400 Subject: [PATCH 167/224] fixed doc issues on zabbix_screen --- monitoring/zabbix_screen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 4c58c32d47e..ada2b1c6ab0 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -54,7 +54,7 @@ options: - If the screen(s) already been added, the screen(s) name won't be updated. - When creating or updating screen(s), the screen_name, host_group are required. - When deleting screen(s), the screen_name is required. - - The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed. + - 'The available states are: present(default) and absent. If the screen(s) already exists, and the state is not "absent", the screen(s) will just be updated as needed.' required: true notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. From eb04e45311683dba1d54c8e5db293a2d3877eb68 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:39:27 -0400 Subject: [PATCH 168/224] fixed doc issues with cloudstack_fw --- cloud/cloudstack/cloudstack_fw.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/cloud/cloudstack/cloudstack_fw.py b/cloud/cloudstack/cloudstack_fw.py index 0014f433c47..cb60c1cde64 100644 --- a/cloud/cloudstack/cloudstack_fw.py +++ b/cloud/cloudstack/cloudstack_fw.py @@ -45,23 +45,23 @@ options: description: - CIDR (full notation) to be used for firewall rule. required: false - default: '0.0.0.0\0' - start_port + default: '0.0.0.0/0' + start_port: description: - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). required: false default: null - end_port + end_port: description: - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). required: false default: null - icmp_type + icmp_type: description: - Type of the icmp message being sent. Considered if C(protocol=icmp). required: false default: null - icmp_code + icmp_code: description: - Error code for this icmp message. Considered if C(protocol=icmp). required: false @@ -106,9 +106,6 @@ EXAMPLES = ''' state: absent ''' -RETURN = ''' -''' - try: from cs import CloudStack, CloudStackException, read_config has_lib_cs = True From 759e618c4ca94295e612e94c23193331fd2a1006 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 10:44:34 -0400 Subject: [PATCH 169/224] vertica doc fixes --- database/vertica/vertica_configuration.py | 36 ++++++------- database/vertica/vertica_facts.py | 26 +++++----- database/vertica/vertica_role.py | 39 +++++++------- database/vertica/vertica_schema.py | 55 ++++++++++---------- database/vertica/vertica_user.py | 63 +++++++++++------------ 5 files changed, 103 insertions(+), 116 deletions(-) diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py index c7bdb1001d6..ad74c0f23f2 100644 --- a/database/vertica/vertica_configuration.py +++ b/database/vertica/vertica_configuration.py @@ -22,59 +22,55 @@ module: vertica_configuration version_added: '2.0' short_description: Updates Vertica configuration parameters. description: - Updates Vertica configuration parameters. + - Updates Vertica configuration parameters. options: name: description: - Name of the parameter to update. + - Name of the parameter to update. required: true - default: null value: description: - Value of the parameter to be set. + - Value of the parameter to be set. required: true - default: null db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: updating load_balance_policy vertica_configuration: name=failovertostandbyafter value='8 hours' """ diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py index 4b963a4e377..b7e0ac4ad5a 100644 --- a/database/vertica/vertica_facts.py +++ b/database/vertica/vertica_facts.py @@ -22,11 +22,11 @@ module: vertica_facts version_added: '2.0' short_description: Gathers Vertica database facts. description: - Gathers Vertica database facts. + - Gathers Vertica database facts. options: cluster: description: - Name of the cluster running the schema. + - Name of the cluster running the schema. required: false default: localhost port: @@ -36,28 +36,28 @@ options: default: 5433 db: description: - Name of the database running the schema. + - Name of the database running the schema. required: false default: null login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py index 825bb1b07e9..ef56a58a866 100644 --- a/database/vertica/vertica_role.py +++ b/database/vertica/vertica_role.py @@ -22,66 +22,63 @@ module: vertica_role version_added: '2.0' short_description: Adds or removes Vertica database roles and assigns roles to them. description: - Adds or removes Vertica database role and, optionally, assign other roles. + - Adds or removes Vertica database role and, optionally, assign other roles. options: name: description: - Name of the role to add or remove. + - Name of the role to add or remove. required: true - default: null assigned_roles: description: - Comma separated list of roles to assign to the role. - [Alias I(assigned_role)] + - Comma separated list of roles to assign to the role. + aliases: ['assigned_role'] required: false default: null state: description: - Whether to create C(present), drop C(absent) or lock C(locked) a role. + - Whether to create C(present), drop C(absent) or lock C(locked) a role. required: false choices: ['present', 'absent'] default: present db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: creating a new vertica role vertica_role: name=role_name db=db_name state=present diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py index f3a75055d06..d0ed2ce05b0 100644 --- a/database/vertica/vertica_schema.py +++ b/database/vertica/vertica_schema.py @@ -22,82 +22,79 @@ module: vertica_schema version_added: '2.0' short_description: Adds or removes Vertica database schema and roles. description: - Adds or removes Vertica database schema and, optionally, roles - with schema access privileges. - A schema will not be removed until all the objects have been dropped. - In such a situation, if the module tries to remove the schema it - will fail and only remove roles created for the schema if they have - no dependencies. + - Adds or removes Vertica database schema and, optionally, roles + with schema access privileges. + - A schema will not be removed until all the objects have been dropped. + - In such a situation, if the module tries to remove the schema it + will fail and only remove roles created for the schema if they have + no dependencies. options: name: description: - Name of the schema to add or remove. + - Name of the schema to add or remove. required: true - default: null usage_roles: description: - Comma separated list of roles to create and grant usage access to the schema. - [Alias I(usage_role)] + - Comma separated list of roles to create and grant usage access to the schema. + aliases: ['usage_role'] required: false default: null create_roles: description: - Comma separated list of roles to create and grant usage and create access to the schema. - [Alias I(create_role)] + - Comma separated list of roles to create and grant usage and create access to the schema. + aliases: ['create_role'] required: false default: null owner: description: - Name of the user to set as owner of the schema. + - Name of the user to set as owner of the schema. required: false default: null state: description: - Whether to create C(present), or drop C(absent) a schema. + - Whether to create C(present), or drop C(absent) a schema. required: false default: present choices: ['present', 'absent'] db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: creating a new vertica schema vertica_schema: name=schema_name db=db_name state=present diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py index 1d72deca617..a011bf35adb 100644 --- a/database/vertica/vertica_user.py +++ b/database/vertica/vertica_user.py @@ -22,98 +22,95 @@ module: vertica_user version_added: '2.0' short_description: Adds or removes Vertica database users and assigns roles. description: - Adds or removes Vertica database user and, optionally, assigns roles. - A user will not be removed until all the dependencies have been dropped. - In such a situation, if the module tries to remove the user it - will fail and only remove roles granted to the user. + - Adds or removes Vertica database user and, optionally, assigns roles. + - A user will not be removed until all the dependencies have been dropped. + - In such a situation, if the module tries to remove the user it + will fail and only remove roles granted to the user. options: name: description: - Name of the user to add or remove. + - Name of the user to add or remove. required: true - default: null profile: description: - Sets the user's profile. + - Sets the user's profile. required: false default: null resource_pool: description: - Sets the user's resource pool. + - Sets the user's resource pool. required: false default: null password: description: - The user's password encrypted by the MD5 algorithm. - The password must be generated with the format C("md5" + md5[password + username]), - resulting in a total of 35 characters. An easy way to do this is by querying - the Vertica database with select 'md5'||md5(''). + - The user's password encrypted by the MD5 algorithm. + - The password must be generated with the format C("md5" + md5[password + username]), + resulting in a total of 35 characters. An easy way to do this is by querying + the Vertica database with select 'md5'||md5(''). required: false default: null expired: description: - Sets the user's password expiration. + - Sets the user's password expiration. required: false default: null ldap: description: - Set to true if users are authenticated via LDAP. - The user will be created with password expired and set to I($ldap$). + - Set to true if users are authenticated via LDAP. + - The user will be created with password expired and set to I($ldap$). required: false default: null roles: description: - Comma separated list of roles to assign to the user. - [Alias I(role)] + - Comma separated list of roles to assign to the user. + aliases: ['role'] required: false default: null state: description: - Whether to create C(present), drop C(absent) or lock C(locked) a user. + - Whether to create C(present), drop C(absent) or lock C(locked) a user. required: false choices: ['present', 'absent', 'locked'] default: present db: description: - Name of the Vertica database. + - Name of the Vertica database. required: false default: null cluster: description: - Name of the Vertica cluster. + - Name of the Vertica cluster. required: false default: localhost port: description: - Vertica cluster port to connect to. + - Vertica cluster port to connect to. required: false default: 5433 login_user: description: - The username used to authenticate with. + - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. + - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing - to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) - to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) - and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) - to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). + - The default authentication assumes that you are either logging in as or sudo'ing + to the C(dbadmin) account on the host. + - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure + that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) + to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) + and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) + to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: Dariusz Owczarek """ EXAMPLES = """ -Examples: - - name: creating a new vertica user with password vertica_user: name=user_name password=md5 db=db_name state=present From 30cf73e83a7a9d13faf88f5527581c11f605f317 Mon Sep 17 00:00:00 2001 From: Matthew Vernon Date: Tue, 31 Mar 2015 17:19:11 +0100 Subject: [PATCH 170/224] Fix for issue #353 (handle change in ssh-keygen behaviour) Prior to openssh 6.4, ssh-keygen -F returned 0 (and no output) when no host was found. After then, it instead returned 1 and no output. This revised code behaves correctly with either behaviour. There is currently no other code path that results in exit(1) and no output. --- system/known_hosts.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index 893eca3dcb7..c2030758cc8 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -188,10 +188,14 @@ def search_for_host_key(module,host,key,path,sshkeygen): replace=False if os.path.exists(path)==False: return False, False + #openssh >=6.4 has changed ssh-keygen behaviour such that it returns + #1 if no host is found, whereas previously it returned 0 rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path], - check_rc=True) - if stdout=='': #host not found - return False, False + check_rc=False) + if stdout=='' and stderr=='' and (rc==0 or rc==1): + return False, False #host not found, no other errors + if rc!=0: #something went wrong + module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr)) #If user supplied no key, we don't want to try and replace anything with it if key is None: From f901fd0160309d9fc2c84e849d34846fe94b35c8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 14:29:38 -0400 Subject: [PATCH 171/224] glusterbin needs to be global --- system/gluster_volume.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 1669dddb81c..e04df48d5f4 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -130,8 +130,10 @@ import shutil import time import socket +glusterbin = '' def run_gluster(gargs, **kwargs): + global glusterbin args = [glusterbin] args.extend(gargs) try: @@ -143,6 +145,7 @@ def run_gluster(gargs, **kwargs): return out def run_gluster_nofail(gargs, **kwargs): + global glusterbin args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, **kwargs) @@ -151,6 +154,7 @@ def run_gluster_nofail(gargs, **kwargs): return out def run_gluster_yes(gargs): + global glusterbin args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, data='y\n') @@ -312,6 +316,7 @@ def main(): ) ) + global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False From 42e761df4a37be65f93ef729ccc59739e2123a20 Mon Sep 17 00:00:00 2001 From: Joe Ray Date: Wed, 1 Apr 2015 17:17:52 +0100 Subject: [PATCH 172/224] Import boto.ec2 in sns to allow boto profiles to be used --- notification/sns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/notification/sns.py b/notification/sns.py index f2ed178554e..54421b0e9fa 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -105,6 +105,7 @@ from ansible.module_utils.ec2 import * try: import boto + import boto.ec2 import boto.sns except ImportError: print "failed=True msg='boto required for this module'" From 89284bcce012931340ee5212049f92795c90718f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Apr 2015 20:44:28 -0400 Subject: [PATCH 173/224] now also captrure OSErrors on temp file fixes #360 --- system/known_hosts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index c2030758cc8..b332528ed19 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -137,11 +137,11 @@ def enforce_state(module, params): outf.write(key) outf.close() module.atomic_move(outf.name,path) - except IOError,e: + except (IOError,OSError),e: module.fail_json(msg="Failed to write to file %s: %s" % \ (path,str(e))) params['changed'] = True - + return params def sanity_check(module,host,key,sshkeygen): From 34312759213fb003438e89c7ce48aa26b329eea3 Mon Sep 17 00:00:00 2001 From: Andy Hill Date: Thu, 2 Apr 2015 13:44:07 -0400 Subject: [PATCH 174/224] bigip_facts: Add missing "device" option The device option was already implemented but omitted from docs and allowed choices. With the addition of device, a devices failover_state can be determined. --- network/f5/bigip_facts.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 99a1e31de68..866119e94c3 100755 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -70,8 +70,8 @@ options: required: true default: null choices: ['address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', 'rule', - 'self_ip', 'software', 'system_info', 'traffic_group', + 'device', 'device_group', 'interface', 'key', 'node', 'pool', + 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan'] aliases: [] filter: @@ -1593,8 +1593,8 @@ def main(): regex = None include = map(lambda x: x.lower(), module.params['include']) valid_includes = ('address_class', 'certificate', 'client_ssl_profile', - 'device_group', 'interface', 'key', 'node', 'pool', - 'rule', 'self_ip', 'software', 'system_info', + 'device', 'device_group', 'interface', 'key', 'node', + 'pool', 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', 'trunk', 'virtual_address', 'virtual_server', 'vlan') include_test = map(lambda x: x in valid_includes, include) From 44280e461cdc9ecaf2696a0f076bbdde807b6e02 Mon Sep 17 00:00:00 2001 From: Gregory Haynes Date: Wed, 1 Apr 2015 11:44:05 -0700 Subject: [PATCH 175/224] Add type property to zypper To install patterns and other package types a type parameter needs to be specified to zypper. --- packaging/os/zypper.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index a6fdc5e7189..5daec8d1429 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -50,6 +50,12 @@ options: required: false choices: [ present, latest, absent ] default: "present" + type: + description: + - The type of package to be operated on. + required: false + choices: [ package, patch, pattern, product, srcpackage ] + default: "package" disable_gpg_check: description: - Whether to disable to GPG signature checking of the package @@ -148,7 +154,7 @@ def get_package_state(m, packages): return installed_state # Function used to make sure a package is present. -def package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): packages = [] for package in name: if installed_state[package] is False: @@ -158,7 +164,7 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme # add global options before zypper command if disable_gpg_check: cmd.append('--no-gpg-checks') - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) # add install parameter if disable_recommends and not old_zypper: cmd.append('--no-recommends') @@ -178,10 +184,10 @@ def package_present(m, name, installed_state, disable_gpg_check, disable_recomme return (rc, stdout, stderr, changed) # Function used to make sure a package is the latest available version. -def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper): +def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): # first of all, make sure all the packages are installed - (rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper) # if we've already made a change, we don't have to check whether a version changed if not changed: @@ -193,9 +199,9 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen cmd.append('--no-gpg-checks') if old_zypper: - cmd.extend(['install', '--auto-agree-with-licenses']) + cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) else: - cmd.extend(['update', '--auto-agree-with-licenses']) + cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type]) cmd.extend(name) rc, stdout, stderr = m.run_command(cmd, check_rc=False) @@ -209,13 +215,13 @@ def package_latest(m, name, installed_state, disable_gpg_check, disable_recommen return (rc, stdout, stderr, changed) # Function used to make sure a package is not installed. -def package_absent(m, name, installed_state, old_zypper): +def package_absent(m, name, installed_state, package_type, old_zypper): packages = [] for package in name: if installed_state[package] is True: packages.append(package) if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove'] + cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type] cmd.extend(packages) rc, stdout, stderr = m.run_command(cmd) @@ -239,6 +245,7 @@ def main(): argument_spec = dict( name = dict(required=True, aliases=['pkg'], type='list'), state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']), disable_gpg_check = dict(required=False, default='no', type='bool'), disable_recommends = dict(required=False, default='yes', type='bool'), ), @@ -250,6 +257,7 @@ def main(): name = params['name'] state = params['state'] + type_ = params['type'] disable_gpg_check = params['disable_gpg_check'] disable_recommends = params['disable_recommends'] @@ -272,11 +280,11 @@ def main(): # Perform requested action if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, old_zypper) + (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper) elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends, old_zypper) + (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) if rc != 0: if stderr: From 7c9217a8f780b3617847891df2cd333915386bea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Wed, 8 Apr 2015 18:33:40 +0200 Subject: [PATCH 176/224] [lldp] Merge wrapped lines. Some devices return their description on multiple lines such as: lldp.eth0.chassis.descr=cisco CISCO7609-S running on Cisco IOS Software, c7600s72033_rp Software (c7600s72033_rp-IPSERVICESK9-M), Version 12.2(33)SRE3, RELEASE SOFTWARE (fc1) Technical Support: http://www.cisco.com/techsupport Copyright (c) 1986-2011 by Cisco Systems, Inc. Compiled Wed 26-Jan-11 06:54 by prod_rel_team The generated fact will result as: "descr": "cisco CISCO7609-S running on" This patch fixes the line wrapping to return the full description handling line breaks: "descr": "cisco CISCO7609-S running on\nCisco IOS Software, c7600s72033_rp Software (c7600s72033_rp-IPSERVICESK9-M), Version 12.2(33)SRE3, RELEASE SOFTWARE (fc1)\nTechnical Support: http://www.cisco.com/techsupport\nCopyright (c) 1986-2011 by Cisco Systems, Inc.\nCompiled Wed 26-Jan-11 06:54 by prod_rel_team" --- network/lldp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/network/lldp.py b/network/lldp.py index d30fa5d9a60..ea6dc78d7bc 100755 --- a/network/lldp.py +++ b/network/lldp.py @@ -58,6 +58,8 @@ def gather_lldp(): path, value = entry.strip().split("=", 1) path = path.split(".") path_components, final = path[:-1], path[-1] + else: + value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: From 1f5e243acf2534c112c0f3fd01f67110635264ad Mon Sep 17 00:00:00 2001 From: mcameron Date: Wed, 8 Apr 2015 17:33:04 +0100 Subject: [PATCH 177/224] Revert "system/lvol: Suppress prompts from lvcreate" This reverts commit f8d04bec1bbdfb0e61e6d3255b16b5bfe23b42f1. --- system/lvol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/lvol.py b/system/lvol.py index b14fd33c8e4..d9be9e7dc70 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -187,7 +187,7 @@ def main(): changed = True else: lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - rc, _, err = module.run_command("%s --yes -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) + rc, _, err = module.run_command("%s -n %s -%s %s%s %s" % (lvcreate_cmd, lv, size_opt, size, size_unit, vg)) if rc == 0: changed = True else: From 8e24529970c13738d9f397dc7da8204ab3eec97c Mon Sep 17 00:00:00 2001 From: Simon Olofsson Date: Wed, 11 Feb 2015 23:24:36 +0100 Subject: [PATCH 178/224] homebrew: Package name is not required. e.g. `- homebrew: update_homebrew=yes upgrade_all=yes' is a valid task. --- packaging/os/homebrew.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 2ecac0c4ace..aac4efd827e 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -31,7 +31,8 @@ options: name: description: - name of package to install/remove - required: true + required: false + default: None state: description: - state of the package @@ -48,7 +49,7 @@ options: description: - upgrade all homebrew packages required: false - default: no + default: "no" choices: [ "yes", "no" ] install_options: description: From 14bb55e67e92593ce3498f1cbd016847c2344ad0 Mon Sep 17 00:00:00 2001 From: rhorer Date: Fri, 10 Apr 2015 15:27:34 -0500 Subject: [PATCH 179/224] Update twilio.py module name in Examples --- notification/twilio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index 8969c28aa50..e50879cd62d 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -57,14 +57,14 @@ author: Matt Makai EXAMPLES = ''' # send a text message from the local server about the build status to (555) 303 5681 # note: you have to have purchased the 'from_number' on your Twilio account -- local_action: text msg="All servers with webserver role are now configured." +- local_action: twilio msg="All servers with webserver role are now configured." account_sid={{ twilio_account_sid }} auth_token={{ twilio_auth_token }} from_number=+15552014545 to_number=+15553035681 # send a text message from a server to (555) 111 3232 # note: you have to have purchased the 'from_number' on your Twilio account -- text: msg="This server's configuration is now complete." +- twilio: msg="This server's configuration is now complete." account_sid={{ twilio_account_sid }} auth_token={{ twilio_auth_token }} from_number=+15553258899 to_number=+15551113232 From eaa136cb083697824e71e4dd0401f1dee9091cc2 Mon Sep 17 00:00:00 2001 From: Jeferson Daniel Date: Sat, 11 Apr 2015 16:00:43 -0300 Subject: [PATCH 180/224] Fixes #335 --- packaging/language/bower.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/bower.py b/packaging/language/bower.py index 3fccf51056b..085f454e639 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -108,7 +108,7 @@ class Bower(object): return '' def list(self): - cmd = ['list', '--json'] + cmd = ['list', '--json', '--config.interactive=false', '--allow-root'] installed = list() missing = list() From e19b53532b4750002dfb52aa930b77e378cf2f68 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 4 Apr 2015 00:03:24 +0200 Subject: [PATCH 181/224] cloudstack: add new module cs_affinitygroup This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_affinitygroup.py | 230 +++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 cloud/cloudstack/cs_affinitygroup.py diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py new file mode 100644 index 00000000000..59c21ee46f6 --- /dev/null +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_affinitygroup +short_description: Manages affinity groups on Apache CloudStack based clouds. +description: Create and remove affinity groups. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the affinity group. + required: true + affinty_type: + description: + - Type of the affinity group. If not specified, first found affinity type is used. + required: false + default: null + description: + description: + - Description of the affinity group. + required: false + default: null + state: + description: + - State of the affinity group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Create a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + affinty_type: host anti-affinity + + +# Remove a affinity group +- local_action: + module: cs_affinitygroup + name: haproxy + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of affinity group. + returned: success + type: string + sample: app +description: + description: Description of affinity group. + returned: success + type: string + sample: application affinity group +affinity_type: + description: Type of affinity group. + returned: success + type: string + sample: host anti-affinity +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackAffinityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.affinity_group = None + + + def get_affinity_group(self): + if not self.affinity_group: + affinity_group_name = self.module.params.get('name') + + affinity_groups = self.cs.listAffinityGroups() + if affinity_groups: + for a in affinity_groups['affinitygroup']: + if a['name'] == affinity_group_name: + self.affinity_group = a + break + return self.affinity_group + + + def get_affinity_type(self): + affinity_type = self.module.params.get('affinty_type') + + affinity_types = self.cs.listAffinityGroupTypes() + if affinity_types: + if not affinity_type: + return affinity_types['affinityGroupType'][0]['type'] + + for a in affinity_types['affinityGroupType']: + if a['type'] == affinity_type: + return a['type'] + self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type) + + + def create_affinity_group(self): + affinity_group = self.get_affinity_group() + if not affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['type'] = self.get_affinity_type() + args['description'] = self.module.params.get('description') + + if not self.module.check_mode: + res = self.cs.createAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + affinity_group = self._poll_job(res, 'affinitygroup') + + return affinity_group + + + def remove_affinity_group(self): + affinity_group = self.get_affinity_group() + if affinity_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + + if not self.module.check_mode: + res = self.cs.deleteAffinityGroup(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'affinitygroup') + + return affinity_group + + + def get_result(self, affinity_group): + if affinity_group: + if 'name' in affinity_group: + self.result['name'] = affinity_group['name'] + if 'description' in affinity_group: + self.result['description'] = affinity_group['description'] + if 'type' in affinity_group: + self.result['affinity_type'] = affinity_group['type'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + affinty_type = dict(default=None), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_ag = AnsibleCloudStackAffinityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + affinity_group = acs_ag.remove_affinity_group() + else: + affinity_group = acs_ag.create_affinity_group() + + result = acs_ag.get_result(affinity_group) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From df23b4d17b0c7de5567b14f5dcfd9d46643b3032 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Mar 2015 21:31:42 +0200 Subject: [PATCH 182/224] cloudstack: add new module cs_securitygroup This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_securitygroup.py | 195 +++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 cloud/cloudstack/cs_securitygroup.py diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py new file mode 100644 index 00000000000..4e2856d5a90 --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +module: cs_securitygroup +short_description: Manages security groups on Apache CloudStack based clouds. +description: Create and remove security groups. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Name of the security group. + required: true + description: + description: + - Description of the security group. + required: false + default: null + state: + description: + - State of the security group. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + project: + description: + - Name of the project the security group to be created in. + required: false + default: null +''' + +EXAMPLES = ''' +--- +# Create a security group +- local_action: + module: cs_securitygroup + name: default + description: default security group + + +# Remove a security group +- local_action: + module: cs_securitygroup + name: default + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of security group. + returned: success + type: string + sample: app +description: + description: Description of security group. + returned: success + type: string + sample: application security group +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroup(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + self.security_group = None + + + def get_security_group(self): + if not self.security_group: + sg_name = self.module.params.get('name') + args = {} + args['projectid'] = self.get_project_id() + sgs = self.cs.listSecurityGroups(**args) + if sgs: + for s in sgs['securitygroup']: + if s['name'] == sg_name: + self.security_group = s + break + return self.security_group + + + def create_security_group(self): + security_group = self.get_security_group() + if not security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() + args['description'] = self.module.params.get('description') + + if not self.module.check_mode: + res = self.cs.createSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + security_group = res['securitygroup'] + + return security_group + + + def remove_security_group(self): + security_group = self.get_security_group() + if security_group: + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['projectid'] = self.get_project_id() + + if not self.module.check_mode: + res = self.cs.deleteSecurityGroup(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + return security_group + + + def get_result(self, security_group): + if security_group: + if 'name' in security_group: + self.result['name'] = security_group['name'] + if 'description' in security_group: + self.result['description'] = security_group['description'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg = AnsibleCloudStackSecurityGroup(module) + + state = module.params.get('state') + if state in ['absent']: + sg = acs_sg.remove_security_group() + else: + sg = acs_sg.create_security_group() + + result = acs_sg.get_result(sg) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From b8056e8f6f71ef4270910e57e38e0fc98ad52412 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 3 Apr 2015 22:27:42 +0200 Subject: [PATCH 183/224] cloudstack: add new module cs_securitygroup_rule This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_securitygroup_rule.py | 437 ++++++++++++++++++++++ 1 file changed, 437 insertions(+) create mode 100644 cloud/cloudstack/cs_securitygroup_rule.py diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py new file mode 100644 index 00000000000..a170230acac --- /dev/null +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -0,0 +1,437 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_securitygroup_rule +short_description: Manages security group rules on Apache CloudStack based clouds. +description: Add and remove security group rules. +version_added: '2.0' +author: René Moser +options: + security_group: + description: + - Name of the security group the rule is related to. The security group must be existing. + required: true + state: + description: + - State of the security group rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + protocol: + description: + - Protocol of the security group rule. + required: false + default: 'tcp' + choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ] + type: + description: + - Ingress or egress security group rule. + required: false + default: 'ingress' + choices: [ 'ingress', 'egress' ] + cidr: + description: + - CIDR (full notation) to be used for security group rule. + required: false + default: '0.0.0.0/0' + user_security_group + description: + - Security group this rule is based of. + required: false + default: null + start_port + description: + - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp). + required: false + default: null + aliases: [ 'port' ] + end_port + description: + - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set. + required: false + default: null + icmp_type + description: + - Type of the icmp message being sent. Required if C(protocol=icmp). + required: false + default: null + icmp_code + description: + - Error code for this icmp message. Required if C(protocol=icmp). + required: false + default: null + project: + description: + - Name of the project the security group to be created in. + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + cidr: 1.2.3.4/32 + + +# Allow tcp/udp outbound added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + type: egress + start_port: 1 + end_port: 65535 + protocol: '{{ item }}' + with_items: + - tcp + - udp + + +# Allow inbound icmp from 0.0.0.0/0 added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + protocol: icmp + icmp_code: -1 + icmp_type: -1 + + +# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + state: absent + + +# Allow inbound port 80/tcp from security group web added to security group 'default' +- local_action: + module: cs_securitygroup_rule + security_group: default + port: 80 + user_security_group: web +''' + +RETURN = ''' +--- +security_group: + description: security group of the rule. + returned: success + type: string + sample: default +type: + description: type of the rule. + returned: success + type: string + sample: ingress +cidr: + description: CIDR of the rule. + returned: success and cidr is defined + type: string + sample: 0.0.0.0/0 +user_security_group: + description: user security group of the rule. + returned: success and user_security_group is defined + type: string + sample: default +protocol: + description: protocol of the rule. + returned: success + type: string + sample: tcp +start_port: + description: start port of the rule. + returned: success + type: int + sample: 80 +end_port: + description: end port of the rule. + returned: success + type: int + sample: 80 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + + + def _tcp_udp_match(self, rule, protocol, start_port, end_port): + return protocol in ['tcp', 'udp'] \ + and protocol == rule['protocol'] \ + and start_port == int(rule['startport']) \ + and end_port == int(rule['endport']) + + + def _icmp_match(self, rule, protocol, icmp_code, icmp_type): + return protocol == 'icmp' \ + and protocol == rule['protocol'] \ + and icmp_code == int(rule['icmpcode']) \ + and icmp_type == int(rule['icmptype']) + + + def _ah_esp_gre_match(self, rule, protocol): + return protocol in ['ah', 'esp', 'gre'] \ + and protocol == rule['protocol'] + + + def _type_security_group_match(self, rule, security_group_name): + return security_group_name \ + and 'securitygroupname' in rule \ + and security_group_name == rule['securitygroupname'] + + + def _type_cidr_match(self, rule, cidr): + return 'cidr' in rule \ + and cidr == rule['cidr'] + + + def _get_rule(self, rules): + user_security_group_name = self.module.params.get('user_security_group') + cidr = self.module.params.get('cidr') + protocol = self.module.params.get('protocol') + start_port = self.module.params.get('start_port') + end_port = self.module.params.get('end_port') + icmp_code = self.module.params.get('icmp_code') + icmp_type = self.module.params.get('icmp_type') + + if not end_port: + end_port = start_port + + if protocol in ['tcp', 'udp'] and not (start_port and end_port): + self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol) + + if protocol == 'icmp' and not (icmp_type and icmp_code): + self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol) + + for rule in rules: + if user_security_group_name: + type_match = self._type_security_group_match(rule, user_security_group_name) + else: + type_match = self._type_cidr_match(rule, cidr) + + protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \ + or self._icmp_match(rule, protocol, icmp_code, icmp_type) \ + or self._ah_esp_gre_match(rule, protocol) + ) + + if type_match and protocol_match: + return rule + return None + + + def get_security_group(self, security_group_name=None): + if not security_group_name: + security_group_name = self.module.params.get('security_group') + args = {} + args['securitygroupname'] = security_group_name + args['projectid'] = self.get_project_id() + sgs = self.cs.listSecurityGroups(**args) + if not sgs or 'securitygroup' not in sgs: + self.module.fail_json(msg="security group '%s' not found" % security_group_name) + return sgs['securitygroup'][0] + + + def add_rule(self): + security_group = self.get_security_group() + + args = {} + user_security_group_name = self.module.params.get('user_security_group') + + # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0. + # that is why we ignore if we have a user_security_group. + if user_security_group_name: + args['usersecuritygrouplist'] = [] + user_security_group = self.get_security_group(user_security_group_name) + args['usersecuritygrouplist'].append({ + 'group': user_security_group['name'], + 'account': user_security_group['account'], + }) + else: + args['cidrlist'] = self.module.params.get('cidr') + + args['protocol'] = self.module.params.get('protocol') + args['startport'] = self.module.params.get('start_port') + args['endport'] = self.module.params.get('end_port') + args['icmptype'] = self.module.params.get('icmp_type') + args['icmpcode'] = self.module.params.get('icmp_code') + args['projectid'] = self.get_project_id() + args['securitygroupid'] = security_group['id'] + + if not args['endport']: + args['endport'] = args['startport'] + + rule = None + res = None + type = self.module.params.get('type') + if type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupIngress(**args) + + elif type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if not rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.authorizeSecurityGroupEgress(**args) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + security_group = self._poll_job(res, 'securitygroup') + return security_group + + + def remove_rule(self): + security_group = self.get_security_group() + rule = None + res = None + type = self.module.params.get('type') + if type == 'ingress': + rule = self._get_rule(security_group['ingressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid']) + + elif type == 'egress': + rule = self._get_rule(security_group['egressrule']) + if rule: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid']) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'securitygroup') + return security_group + + + def get_result(self, security_group_rule): + type = self.module.params.get('type') + + key = 'ingressrule' + if type == 'egress': + key = 'egressrule' + + self.result['type'] = type + self.result['security_group'] = self.module.params.get('security_group') + + if key in security_group_rule and security_group_rule[key]: + if 'securitygroupname' in security_group_rule[key][0]: + self.result['user_security_group'] = security_group_rule[key][0]['securitygroupname'] + if 'cidr' in security_group_rule[key][0]: + self.result['cidr'] = security_group_rule[key][0]['cidr'] + if 'protocol' in security_group_rule[key][0]: + self.result['protocol'] = security_group_rule[key][0]['protocol'] + if 'startport' in security_group_rule[key][0]: + self.result['start_port'] = security_group_rule[key][0]['startport'] + if 'endport' in security_group_rule[key][0]: + self.result['end_port'] = security_group_rule[key][0]['endport'] + if 'icmpcode' in security_group_rule[key][0]: + self.result['icmp_code'] = security_group_rule[key][0]['icmpcode'] + if 'icmptype' in security_group_rule[key][0]: + self.result['icmp_type'] = security_group_rule[key][0]['icmptype'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + security_group = dict(required=True), + type = dict(choices=['ingress', 'egress'], default='ingress'), + cidr = dict(default='0.0.0.0/0'), + user_security_group = dict(default=None), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None, aliases=['port']), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + mutually_exclusive = ( + ['icmp_type', 'start_port'], + ['icmp_type', 'end_port'], + ['icmp_code', 'start_port'], + ['icmp_code', 'end_port'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module) + + state = module.params.get('state') + if state in ['absent']: + sg_rule = acs_sg_rule.remove_rule() + else: + sg_rule = acs_sg_rule.add_rule() + + result = acs_sg_rule.get_result(sg_rule) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 39d30168a59bd1ca296abd338f422e212b62e77f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 12 Apr 2015 12:59:03 +0200 Subject: [PATCH 184/224] cloudstack: rename modules to more meaningful name schema --- cloud/cloudstack/{cloudstack_fw.py => cs_firewall.py} | 8 ++++---- cloud/cloudstack/{cloudstack_iso.py => cs_iso.py} | 10 +++++----- .../{cloudstack_sshkey.py => cs_sshkeypair.py} | 8 ++++---- 3 files changed, 13 insertions(+), 13 deletions(-) rename cloud/cloudstack/{cloudstack_fw.py => cs_firewall.py} (98%) rename cloud/cloudstack/{cloudstack_iso.py => cs_iso.py} (98%) rename cloud/cloudstack/{cloudstack_sshkey.py => cs_sshkeypair.py} (96%) diff --git a/cloud/cloudstack/cloudstack_fw.py b/cloud/cloudstack/cs_firewall.py similarity index 98% rename from cloud/cloudstack/cloudstack_fw.py rename to cloud/cloudstack/cs_firewall.py index cb60c1cde64..9049f40f7c4 100644 --- a/cloud/cloudstack/cloudstack_fw.py +++ b/cloud/cloudstack/cs_firewall.py @@ -19,7 +19,7 @@ # along with Ansible. If not, see . DOCUMENTATION = ''' -module: cloudstack_fw +module: cs_firewall short_description: Manages firewall rules on Apache CloudStack based clouds. description: Creates and removes firewall rules. version_added: '2.0' @@ -77,7 +77,7 @@ EXAMPLES = ''' --- # Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1 - local_action: - module: cloudstack_fw + module: cs_firewall ip_address: 4.3.2.1 start_port: 80 end_port: 80 @@ -86,7 +86,7 @@ EXAMPLES = ''' # Allow inbound tcp/udp port 53 to 4.3.2.1 - local_action: - module: cloudstack_fw + module: cs_firewall ip_address: 4.3.2.1 start_port: 53 end_port: 53 @@ -98,7 +98,7 @@ EXAMPLES = ''' # Ensure firewall rule is removed - local_action: - module: cloudstack_fw + module: cs_firewall ip_address: 4.3.2.1 start_port: 8000 end_port: 8888 diff --git a/cloud/cloudstack/cloudstack_iso.py b/cloud/cloudstack/cs_iso.py similarity index 98% rename from cloud/cloudstack/cloudstack_iso.py rename to cloud/cloudstack/cs_iso.py index bd90c427ea4..42f00fb1f00 100644 --- a/cloud/cloudstack/cloudstack_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -module: cloudstack_iso +module: cs_iso short_description: Manages ISOs images on Apache CloudStack based clouds. description: Register and remove ISO images. version_added: '2.0' @@ -100,7 +100,7 @@ EXAMPLES = ''' --- # Register an ISO if ISO name does not already exist. - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso os_type: Debian GNU/Linux 7(64-bit) @@ -108,7 +108,7 @@ EXAMPLES = ''' # Register an ISO with given name if ISO md5 checksum does not already exist. - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso os_type: @@ -117,14 +117,14 @@ EXAMPLES = ''' # Remove an ISO by name - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit state: absent # Remove an ISO by checksum - local_action: - module: cloudstack_iso + module: cs_iso name: Debian 7 64-bit checksum: 0b31bccccb048d20b551f70830bb7ad0 state: absent diff --git a/cloud/cloudstack/cloudstack_sshkey.py b/cloud/cloudstack/cs_sshkeypair.py similarity index 96% rename from cloud/cloudstack/cloudstack_sshkey.py rename to cloud/cloudstack/cs_sshkeypair.py index 657e367fefe..9cc514c05ea 100644 --- a/cloud/cloudstack/cloudstack_sshkey.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -module: cloudstack_sshkey +module: cs_sshkeypair short_description: Manages SSH keys on Apache CloudStack based clouds. description: - If no key was found and no public key was provided and a new SSH @@ -53,15 +53,15 @@ options: EXAMPLES = ''' --- # create a new private / public key pair: -- local_action: cloudstack_sshkey name=linus@example.com +- local_action: cs_sshkeypair name=linus@example.com register: key - debug: msg='private key is {{ key.private_key }}' # remove a public key by its name: -- local_action: cloudstack_sshkey name=linus@example.com state=absent +- local_action: cs_sshkeypair name=linus@example.com state=absent # register your existing local public key: -- local_action: cloudstack_sshkey name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' +- local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' ''' RETURN = ''' From c899e2d9a7fe000442363112d308af59f26ed054 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 4 Apr 2015 01:30:17 +0200 Subject: [PATCH 185/224] cloudstack: add new module cs_vmsnapshot This module depends on ansible.module_utils.cloudstack. --- cloud/cloudstack/cs_vmsnapshot.py | 278 ++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 cloud/cloudstack/cs_vmsnapshot.py diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py new file mode 100644 index 00000000000..d53a33ac72e --- /dev/null +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_vmsnapshot +short_description: Manages VM snapshots on Apache CloudStack based clouds. +description: Create, remove and revert VM from snapshots. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Unique Name of the snapshot. In CloudStack terms C(displayname). + required: true + aliases: ['displayname'] + description: + description: + - Description of the snapshot. + required: false + default: null + snapshot_memory: + description: + - Snapshot memory if set to true. + required: false + default: false + project: + description: + - Name of the project the VM is assigned to. + required: false + default: null + state: + description: + - State of the snapshot. + required: false + default: 'present' + choices: [ 'present', 'absent', 'revert' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Create a VM snapshot of disk and memory before an upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + snapshot_memory: yes + + +# Revert a VM to a snapshot after a failed upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: revert + + +# Remove a VM snapshot after successful upgrade +- local_action: + module: cs_vmsnapshot + name: Snapshot before upgrade + vm: web-01 + state: absent +''' + +RETURN = ''' +--- +name: + description: Name of the snapshot. + returned: success + type: string + sample: snapshot before update +displayname: + description: displayname of the snapshot. + returned: success + type: string + sample: snapshot before update +created: + description: date of the snapshot. + returned: success + type: string + sample: 2015-03-29T14:57:06+0200 +current: + description: true if snapshot is current + returned: success + type: boolean + sample: True +state: + description: state of the vm snapshot + returned: success + type: string + sample: Allocated +type: + description: type of vm snapshot + returned: success + type: string + sample: DiskAndMemory +description: + description: + description: description of vm snapshot + returned: success + type: string + sample: snapshot brought to you by Ansible +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackVmSnapshot(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.result = { + 'changed': False, + } + + + def get_snapshot(self): + args = {} + args['virtualmachineid'] = self.get_vm_id() + args['projectid'] = self.get_project_id() + args['name'] = self.module.params.get('name') + + snapshots = self.cs.listVMSnapshot(**args) + if snapshots: + return snapshots['vmSnapshot'][0] + return None + + + def create_snapshot(self): + snapshot = self.get_snapshot() + if not snapshot: + self.result['changed'] = True + + args = {} + args['virtualmachineid'] = self.get_vm_id() + args['name'] = self.module.params.get('name') + args['description'] = self.module.params.get('description') + args['snapshotmemory'] = self.module.params.get('snapshot_memory') + + if not self.module.check_mode: + res = self.cs.createVMSnapshot(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + snapshot = self._poll_job(res, 'vmsnapshot') + + return snapshot + + + def remove_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + + def revert_vm_to_snapshot(self): + snapshot = self.get_snapshot() + if snapshot: + self.result['changed'] = True + + if snapshot['state'] != "Ready": + self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state']) + + if not self.module.check_mode: + res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id']) + + poll_async = self.module.params.get('poll_async') + if res and poll_async: + res = self._poll_job(res, 'vmsnapshot') + return snapshot + + self.module.fail_json(msg="snapshot not found, could not revert VM") + + + def get_result(self, snapshot): + if snapshot: + if 'displayname' in snapshot: + self.result['displayname'] = snapshot['displayname'] + if 'created' in snapshot: + self.result['created'] = snapshot['created'] + if 'current' in snapshot: + self.result['current'] = snapshot['current'] + if 'state' in snapshot: + self.result['state'] = snapshot['state'] + if 'type' in snapshot: + self.result['type'] = snapshot['type'] + if 'name' in snapshot: + self.result['name'] = snapshot['name'] + if 'description' in snapshot: + self.result['description'] = snapshot['description'] + return self.result + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, aliases=['displayname']), + vm = dict(required=True), + description = dict(default=None), + project = dict(default=None), + snapshot_memory = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent', 'revert'], default='present'), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module) + + state = module.params.get('state') + if state in ['revert']: + snapshot = acs_vmsnapshot.revert_vm_to_snapshot() + elif state in ['absent']: + snapshot = acs_vmsnapshot.remove_snapshot() + else: + snapshot = acs_vmsnapshot.create_snapshot() + + result = acs_vmsnapshot.get_result(snapshot) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 757a047a793cdb337a3465517dfbd97774fdcd87 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 10:04:10 -0400 Subject: [PATCH 186/224] adding sendgrid module to extras --- notification/sendgrid.py | 143 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 notification/sendgrid.py diff --git a/notification/sendgrid.py b/notification/sendgrid.py new file mode 100644 index 00000000000..6c5264521c2 --- /dev/null +++ b/notification/sendgrid.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Matt Makai +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +version_added: "2.0" +module: sendgrid +short_description: Sends an email with the SendGrid API +description: + - Sends an email with a SendGrid account through their API, not through + the SMTP service. +notes: + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid + account. +options: + username: + description: + username for logging into the SendGrid account + required: true + password: + description: password that corresponds to the username + required: true + from_address: + description: + the address in the "from" field for the email + required: true + to_addresses: + description: + a list with one or more recipient email addresses + required: true + subject: + description: + the desired subject for the email + required: true + +requirements: [ urllib, urllib2 ] +author: Matt Makai +''' + +EXAMPLES = ''' +# send an email to a single recipient that the deployment was successful +- local_action: sendgrid + username={{ sendgrid_username }} + password={{ sendgrid_password }} + from_address="ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject="Deployment success." + body="The most recent Ansible deployment was successful." + +# send an email to more than one recipient that the build failed +- local_action: sendgrid + username={{ sendgrid_username }} + password={{ sendgrid_password }} + from_address="build@mycompany.com" + to_addresses: + - "ops@mycompany.com" + - "devteam@mycompany.com" + subject="Build failure!." + body="Unable to pull source repository from Git server." +''' + +# ======================================= +# sendgrid module support methods +# +try: + import urllib, urllib2 +except ImportError: + module.fail_json(msg="urllib and urllib2 are required") + +import base64 + +def post_sendgrid_api(module, username, password, from_address, to_addresses, + subject, body): + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible/1.7" + data = {'api_user':username, 'api_key':password, + 'from':from_address, 'subject': subject, 'text': body} + encoded_data = urllib.urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + to_addresses_api += '&to[]=%s' % str(recipient) + encoded_data += to_addresses_api + request = urllib2.Request(SENDGRID_URI) + request.add_header('User-Agent', AGENT) + request.add_header('Content-type', 'application/x-www-form-urlencoded') + request.add_header('Accept', 'application/json') + return urllib2.urlopen(request, encoded_data) + + +# ======================================= +# Main +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + username=dict(required=True), + password=dict(required=True, no_log=True), + from_address=dict(required=True), + to_addresses=dict(required=True, type='list'), + subject=dict(required=True), + body=dict(required=True), + ), + supports_check_mode=True + ) + + username = module.params['username'] + password = module.params['password'] + from_address = module.params['from_address'] + to_addresses = module.params['to_addresses'] + subject = module.params['subject'] + body = module.params['body'] + + try: + response = post_sendgrid_api(module, username, password, + from_address, to_addresses, subject, body) + except Exception, e: + module.fail_json(msg="unable to send email through SendGrid API") + + module.exit_json(msg=subject, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() From 2f2a69ad8832446f2992286c8ab5822c480c695d Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 10:25:24 -0400 Subject: [PATCH 187/224] updating sendgrid module based on code review by @abadger --- notification/sendgrid.py | 47 ++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 6c5264521c2..d8bfb7d6a2e 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -27,8 +27,10 @@ description: - Sends an email with a SendGrid account through their API, not through the SMTP service. notes: - - Like the other notification modules, this one requires an external - dependency to work. In this case, you'll need an active SendGrid + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need an active SendGrid account. options: username: @@ -51,31 +53,32 @@ options: the desired subject for the email required: true -requirements: [ urllib, urllib2 ] author: Matt Makai ''' EXAMPLES = ''' # send an email to a single recipient that the deployment was successful -- local_action: sendgrid - username={{ sendgrid_username }} - password={{ sendgrid_password }} - from_address="ansible@mycompany.com" - to_addresses: - - "ops@mycompany.com" - subject="Deployment success." - body="The most recent Ansible deployment was successful." +- sendgrid: + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "ansible@mycompany.com" + to_addresses: + - "ops@mycompany.com" + subject: "Deployment success." + body: "The most recent Ansible deployment was successful." + delegate_to: localhost # send an email to more than one recipient that the build failed -- local_action: sendgrid - username={{ sendgrid_username }} - password={{ sendgrid_password }} - from_address="build@mycompany.com" +- sendgrid + username: "{{ sendgrid_username }}" + password: "{{ sendgrid_password }}" + from_address: "build@mycompany.com" to_addresses: - "ops@mycompany.com" - "devteam@mycompany.com" - subject="Build failure!." - body="Unable to pull source repository from Git server." + subject: "Build failure!." + body: "Unable to pull source repository from Git server." + delegate_to: localhost ''' # ======================================= @@ -91,13 +94,15 @@ import base64 def post_sendgrid_api(module, username, password, from_address, to_addresses, subject, body): SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" - AGENT = "Ansible/1.7" - data = {'api_user':username, 'api_key':password, + AGENT = "Ansible" + data = {'api_user': username, 'api_key':password, 'from':from_address, 'subject': subject, 'text': body} encoded_data = urllib.urlencode(data) to_addresses_api = '' for recipient in to_addresses: - to_addresses_api += '&to[]=%s' % str(recipient) + if isinstance(recipient, unicode): + recipient = recipient.encode('utf-8') + to_addresses_api += '&to[]=%s' % recipient encoded_data += to_addresses_api request = urllib2.Request(SENDGRID_URI) request.add_header('User-Agent', AGENT) @@ -133,7 +138,7 @@ def main(): try: response = post_sendgrid_api(module, username, password, from_address, to_addresses, subject, body) - except Exception, e: + except Exception: module.fail_json(msg="unable to send email through SendGrid API") module.exit_json(msg=subject, changed=False) From 3e1ffd12c725080979c6e7625c6119cd378033f2 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 11:22:09 -0400 Subject: [PATCH 188/224] updating twilio module docs and making it possible to send to a list of phone numbers --- notification/twilio.py | 82 +++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index e50879cd62d..a95f21bde1f 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2014, Matt Makai +# (c) 2015, Matt Makai # # This file is part of Ansible # @@ -24,18 +24,20 @@ version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - - Sends a text message to a phone number through an the Twilio SMS service. + - Sends a text message to a phone number through the Twilio SMS API. notes: - - Like the other notification modules, this one requires an external + - This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails. + - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased or verified phone number to send the text message. options: account_sid: description: - user's account id for Twilio found on the account page + user's Twilio account token found on the account page required: true auth_token: - description: user's authentication token for Twilio found on the account page + description: user's Twilio authentication token required: true msg: description: @@ -43,36 +45,45 @@ options: required: true to_number: description: - what phone number to send the text message to, format +15551112222 + one or more phone numbers to send the text message to, + format +15551112222 required: true from_number: description: - what phone number to send the text message from, format +15551112222 + the Twilio number to send the text message from, format +15551112222 required: true - -requirements: [ urllib, urllib2 ] + author: Matt Makai ''' EXAMPLES = ''' -# send a text message from the local server about the build status to (555) 303 5681 -# note: you have to have purchased the 'from_number' on your Twilio account -- local_action: twilio msg="All servers with webserver role are now configured." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15552014545 to_number=+15553035681 - -# send a text message from a server to (555) 111 3232 -# note: you have to have purchased the 'from_number' on your Twilio account -- twilio: msg="This server's configuration is now complete." - account_sid={{ twilio_account_sid }} - auth_token={{ twilio_auth_token }} - from_number=+15553258899 to_number=+15551113232 - +# send an SMS about the build status to (555) 303 5681 +# note: you have to have the 'from_number' on your Twilio account +- twilio: + msg: "All servers with webserver role are now configured." + account_sid: "{{ twilio_account_sid }}" + auth_token: "{{ twilio_auth_token }}" + from_number: "+15552014545" + to_number: "+15553035681" + delegate_to: localhost + +# send an SMS to multiple phone numbers about the deployment +# note: you must have the 'from_number' on your Twilio account +- twilio: + msg: "This server's configuration is now complete." + account_sid: "{{ twilio_account_sid }}" + auth_token: "{{ twilio_auth_token }}" + from_number: "+15553258899" + to_number: + - "+15551113232" + - "+12025551235" + - "+19735559010" + delegate_to: localhost + ''' # ======================================= -# text module support methods +# twilio module support methods # try: import urllib, urllib2 @@ -82,10 +93,11 @@ except ImportError: import base64 -def post_text(module, account_sid, auth_token, msg, from_number, to_number): +def post_twilio_api(module, account_sid, auth_token, msg, from_number, + to_number): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) - AGENT = "Ansible/1.5" + AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} encoded_data = urllib.urlencode(data) @@ -94,7 +106,7 @@ def post_text(module, account_sid, auth_token, msg, from_number, to_number): (account_sid, auth_token)).replace('\n', '') request.add_header('User-Agent', AGENT) request.add_header('Content-type', 'application/x-www-form-urlencoded') - request.add_header('Accept', 'application/ansible') + request.add_header('Accept', 'application/json') request.add_header('Authorization', 'Basic %s' % base64string) return urllib2.urlopen(request, encoded_data) @@ -115,7 +127,7 @@ def main(): ), supports_check_mode=True ) - + account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] @@ -123,12 +135,18 @@ def main(): to_number = module.params['to_number'] try: - response = post_text(module, account_sid, auth_token, msg, - from_number, to_number) - except Exception, e: + if isinstance(to_number, list): + for number in to_number: + post_twilio_api(module, account_sid, auth_token, msg, + from_number, number) + else: + post_twilio_api(module, account_sid, auth_token, msg, + from_number, to_number) + pass + except Exception: module.fail_json(msg="unable to send text message to %s" % to_number) - module.exit_json(msg=msg, changed=False) + module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * From 9f4ad0246946c4e21aaf2547f7d311de99879720 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 11:42:50 -0400 Subject: [PATCH 189/224] updating twilio module to optionally support MMS --- notification/twilio.py | 43 ++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index a95f21bde1f..00bde6cc8f2 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -24,7 +24,7 @@ version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - - Sends a text message to a phone number through the Twilio SMS API. + - Sends a text message to a phone number through the Twilio messaging API. notes: - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. @@ -52,27 +52,34 @@ options: description: the Twilio number to send the text message from, format +15551112222 required: true + media_url: + description: + a URL with a picture, video or sound clip to send with an MMS + (multimedia message) instead of a plain SMS + required: false author: Matt Makai ''' EXAMPLES = ''' # send an SMS about the build status to (555) 303 5681 -# note: you have to have the 'from_number' on your Twilio account +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account - twilio: msg: "All servers with webserver role are now configured." - account_sid: "{{ twilio_account_sid }}" - auth_token: "{{ twilio_auth_token }}" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" from_number: "+15552014545" to_number: "+15553035681" delegate_to: localhost # send an SMS to multiple phone numbers about the deployment -# note: you must have the 'from_number' on your Twilio account +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account - twilio: msg: "This server's configuration is now complete." - account_sid: "{{ twilio_account_sid }}" - auth_token: "{{ twilio_auth_token }}" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" from_number: "+15553258899" to_number: - "+15551113232" @@ -80,6 +87,18 @@ EXAMPLES = ''' - "+19735559010" delegate_to: localhost +# send an MMS to multiple phone numbers with an update on the +# deployment and a screenshot of the results +# note: replace account_sid and auth_token values with your credentials +# and you have to have the 'from_number' on your Twilio account +- twilio: + msg: "Deployment complete!" + account_sid: "ACXXXXXXXXXXXXXXXXX" + auth_token: "ACXXXXXXXXXXXXXXXXX" + from_number: "+15552014545" + to_number: "+15553035681" + media_url: "https://demo.twilio.com/logo.png" + delegate_to: localhost ''' # ======================================= @@ -94,12 +113,14 @@ import base64 def post_twilio_api(module, account_sid, auth_token, msg, from_number, - to_number): + to_number, media_url=None): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} + if media_url: + data['MediaUrl'] = media_url encoded_data = urllib.urlencode(data) request = urllib2.Request(URI) base64string = base64.encodestring('%s:%s' % \ @@ -124,6 +145,7 @@ def main(): msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), + media_url=dict(default=None, required=False), ), supports_check_mode=True ) @@ -133,15 +155,16 @@ def main(): msg = module.params['msg'] from_number = module.params['from_number'] to_number = module.params['to_number'] + media_url = module.params['media_url'] try: if isinstance(to_number, list): for number in to_number: post_twilio_api(module, account_sid, auth_token, msg, - from_number, number) + from_number, number, media_url) else: post_twilio_api(module, account_sid, auth_token, msg, - from_number, to_number) + from_number, to_number, media_url) pass except Exception: module.fail_json(msg="unable to send text message to %s" % to_number) From c72be32b94b6fd2a19acad113c6d2339d0b97169 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 11:55:20 -0400 Subject: [PATCH 190/224] fixing typo in documentation --- notification/twilio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index 00bde6cc8f2..a45dc022988 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -87,8 +87,8 @@ EXAMPLES = ''' - "+19735559010" delegate_to: localhost -# send an MMS to multiple phone numbers with an update on the -# deployment and a screenshot of the results +# send an MMS to a single recipient with an update on the deployment +# and an image of the results # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: From 6522e4e016c958b09c2b0a21db671ac670f8f942 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Mon, 13 Apr 2015 12:06:27 -0400 Subject: [PATCH 191/224] updating main for loop based on @abadger code review --- notification/twilio.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/notification/twilio.py b/notification/twilio.py index a45dc022988..faae7b6f58f 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -157,17 +157,15 @@ def main(): to_number = module.params['to_number'] media_url = module.params['media_url'] - try: - if isinstance(to_number, list): - for number in to_number: - post_twilio_api(module, account_sid, auth_token, msg, - from_number, number, media_url) - else: + if not isinstance(to_number, list): + to_number = [to_number] + + for number in to_number: + try: post_twilio_api(module, account_sid, auth_token, msg, - from_number, to_number, media_url) - pass - except Exception: - module.fail_json(msg="unable to send text message to %s" % to_number) + from_number, number, media_url) + except Exception: + module.fail_json(msg="unable to send message to %s" % number) module.exit_json(msg=msg, changed=False) From 582da5b911212b37fee1272696daeb0b1c1cae97 Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Tue, 14 Apr 2015 12:02:32 -0500 Subject: [PATCH 192/224] Make 'module' global. Small whitespace formatting fixes. Closes Issue #397. --- system/gluster_volume.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index e04df48d5f4..c1607f627c3 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -134,6 +134,7 @@ glusterbin = '' def run_gluster(gargs, **kwargs): global glusterbin + global module args = [glusterbin] args.extend(gargs) try: @@ -146,6 +147,7 @@ def run_gluster(gargs, **kwargs): def run_gluster_nofail(gargs, **kwargs): global glusterbin + global module args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, **kwargs) @@ -155,6 +157,7 @@ def run_gluster_nofail(gargs, **kwargs): def run_gluster_yes(gargs): global glusterbin + global module args = [glusterbin] args.extend(gargs) rc, out, err = module.run_command(args, data='y\n') @@ -240,6 +243,7 @@ def wait_for_peer(host): return False def probe(host): + global module run_gluster([ 'peer', 'probe', host ]) if not wait_for_peer(host): module.fail_json(msg='failed to probe peer %s' % host) @@ -285,18 +289,19 @@ def add_brick(name, brick, force): run_gluster(args) def do_rebalance(name): - run_gluster(['volume', 'rebalance', name, 'start']) + run_gluster([ 'volume', 'rebalance', name, 'start' ]) def enable_quota(name): run_gluster([ 'volume', 'quota', name, 'enable' ]) def set_quota(name, directory, value): - run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) + run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ]) def main(): ### MAIN ### + global module module = AnsibleModule( argument_spec=dict( name=dict(required=True, default=None, aliases=['volume']), From 9ba0f9f57280911cc49e53ee948e972776f9fc2e Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 18:48:36 -0400 Subject: [PATCH 193/224] Expand user in path, fix #385 --- files/patch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/patch.py b/files/patch.py index 2f2894a6508..ec3a3b02c00 100755 --- a/files/patch.py +++ b/files/patch.py @@ -130,6 +130,7 @@ def main(): # Create type object as namespace for module params p = type('Params', (), module.params) + p.src = os.path.expanduser(p.src) if not os.access(p.src, R_OK): module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) From b4ad53224882fc18d332f9c181ebec8997ca3122 Mon Sep 17 00:00:00 2001 From: Niall Donegan Date: Wed, 15 Apr 2015 16:15:31 +0100 Subject: [PATCH 194/224] sys.exit used, but not imported. --- network/dnsimple.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/network/dnsimple.py b/network/dnsimple.py index 363a2ca24c1..4e6ae4ec57e 100755 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -32,7 +32,7 @@ options: description: - Account API token. See I(account_email) for info. required: false - default: null + default: null domain: description: @@ -67,7 +67,7 @@ options: default: 3600 (one hour) value: - description: + description: - Record value - "Must be specified when trying to ensure a record exists" required: false @@ -130,12 +130,13 @@ EXAMPLES = ''' ''' import os +from sys import exit try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException except ImportError: print "failed=True msg='dnsimple required for this module'" - sys.exit(1) + exit(1) def main(): module = AnsibleModule( @@ -148,7 +149,7 @@ def main(): type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), ttl = dict(required=False, default=3600, type='int'), value = dict(required=False), - priority = dict(required=False, type='int'), + priority = dict(required=False, type='int'), state = dict(required=False, choices=['present', 'absent']), solo = dict(required=False, type='bool'), ), From b125d2685a0619435530a3267ae800ff7dcf6ebc Mon Sep 17 00:00:00 2001 From: Niall Donegan Date: Wed, 15 Apr 2015 17:07:18 +0100 Subject: [PATCH 195/224] Handle missing module with fail_json --- network/dnsimple.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/network/dnsimple.py b/network/dnsimple.py index 4e6ae4ec57e..9aa52172f19 100755 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -130,13 +130,12 @@ EXAMPLES = ''' ''' import os -from sys import exit try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException + HAS_DNSIMPLE = True except ImportError: - print "failed=True msg='dnsimple required for this module'" - exit(1) + HAS_DNSIMPLE = False def main(): module = AnsibleModule( @@ -159,6 +158,9 @@ def main(): supports_check_mode = True, ) + if not HAS_DNSIMPLE: + module.fail_json("dnsimple required for this module") + account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') domain = module.params.get('domain') From 698098ae5720bf940a64643dc39c0faa37a88781 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Apr 2015 09:45:23 -0400 Subject: [PATCH 196/224] doc update for zypper --- packaging/os/zypper.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 5daec8d1429..ccf901d4fa1 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -56,6 +56,7 @@ options: required: false choices: [ package, patch, pattern, product, srcpackage ] default: "package" + version_added: "2.0" disable_gpg_check: description: - Whether to disable to GPG signature checking of the package From c7853c9f0891a048486f68dfd4c1fc2d107da8bd Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Fri, 17 Apr 2015 14:48:53 -0500 Subject: [PATCH 197/224] Issue #403: Add support for multiple bricks to gluster_volume --- system/gluster_volume.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index c1607f627c3..b27d41ca342 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -66,7 +66,7 @@ options: required: false default: null description: - - Brick path on servers + - Brick path on servers. Multiple bricks can be specified by commas start_on_create: choices: [ 'yes', 'no'] required: false @@ -256,7 +256,7 @@ def probe_all_peers(hosts, peers, myhostname): if myhostname != host: probe(host) -def create_volume(name, stripe, replica, transport, hosts, brick, force): +def create_volume(name, stripe, replica, transport, hosts, bricks, force): args = [ 'volume', 'create' ] args.append(name) if stripe: @@ -267,8 +267,9 @@ def create_volume(name, stripe, replica, transport, hosts, brick, force): args.append(str(replica)) args.append('transport') args.append(transport) - for host in hosts: - args.append(('%s:%s' % (host, brick))) + for brick in bricks: + for host in hosts: + args.append(('%s:%s' % (host, brick))) if force: args.append('force') run_gluster(args) @@ -329,7 +330,7 @@ def main(): action = module.params['state'] volume_name = module.params['name'] cluster= module.params['cluster'] - brick_path = module.params['brick'] + brick_paths = module.params['brick'] stripes = module.params['stripes'] replicas = module.params['replicas'] transport = module.params['transport'] @@ -341,6 +342,11 @@ def main(): if not myhostname: myhostname = socket.gethostname() + if brick_paths != None and "," in brick_paths: + brick_paths = brick_paths.split(",") + else: + brick_paths = [brick_paths] + options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] @@ -364,7 +370,7 @@ def main(): # create if it doesn't exist if volume_name not in volumes: - create_volume(volume_name, stripes, replicas, transport, cluster, brick_path, force) + create_volume(volume_name, stripes, replicas, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True @@ -378,10 +384,11 @@ def main(): removed_bricks = [] all_bricks = [] for node in cluster: - brick = '%s:%s' % (node, brick_path) - all_bricks.append(brick) - if brick not in volumes[volume_name]['bricks']: - new_bricks.append(brick) + for brick_path in brick_paths: + brick = '%s:%s' % (node, brick_path) + all_bricks.append(brick) + if brick not in volumes[volume_name]['bricks']: + new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: From 20d998a6d680d94fa9146ee590fd33d0dd5609d3 Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Fri, 17 Apr 2015 14:59:10 -0500 Subject: [PATCH 198/224] Improve documentation for gluster_volume brick parameter --- system/gluster_volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index b27d41ca342..3af54e8d551 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -66,7 +66,7 @@ options: required: false default: null description: - - Brick path on servers. Multiple bricks can be specified by commas + - Brick path on servers. Multiple brick paths can be separated by commas start_on_create: choices: [ 'yes', 'no'] required: false From 2535a4928423b783ee1a8df939253e18abd545ea Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 18 Apr 2015 21:50:06 -0400 Subject: [PATCH 199/224] fixed 2.4 compatibility --- system/known_hosts.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index b332528ed19..30ea7755553 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -128,18 +128,23 @@ def enforce_state(module, params): module.fail_json(msg="Failed to read %s: %s" % \ (path,str(e))) try: - outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path), - delete=False) + outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path)) if inf is not None: for line in inf: outf.write(line) inf.close() outf.write(key) - outf.close() + outf.flush() module.atomic_move(outf.name,path) except (IOError,OSError),e: module.fail_json(msg="Failed to write to file %s: %s" % \ (path,str(e))) + + try: + outf.close() + except: + pass + params['changed'] = True return params @@ -162,16 +167,20 @@ def sanity_check(module,host,key,sshkeygen): #The approach is to write the key to a temporary file, #and then attempt to look up the specified host in that file. try: - outf=tempfile.NamedTemporaryFile(delete=False) + outf=tempfile.NamedTemporaryFile() outf.write(key) - outf.close() + outf.flush() except IOError,e: module.fail_json(msg="Failed to write to temporary file %s: %s" % \ (outf.name,str(e))) rc,stdout,stderr=module.run_command([sshkeygen,'-F',host, '-f',outf.name], check_rc=True) - os.remove(outf.name) + try: + outf.close() + except: + pass + if stdout=='': #host not found module.fail_json(msg="Host parameter does not match hashed host field in supplied key") From 9d720f52c805e3843fca49926127de7f94afff31 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Sat, 4 Apr 2015 19:27:37 +0100 Subject: [PATCH 200/224] Add an accept_licenses option to pkg5 module This accepts any software licences required by the package. --- packaging/os/pkg5.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index eea860e7be2..0e4c565b90e 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -39,6 +39,13 @@ options: required: false default: present choices: [ present, latest, absent ] + accept_licenses: + description: + - Accept any licences. + required: false + default: false + choices: [ true, false ] + aliases: [ accept_licences, accept ] ''' EXAMPLES = ''' # Install Vim: @@ -70,6 +77,11 @@ def main(): 'removed', ] ), + accept_licenses=dict( + choices=BOOLEANS, + default=False + aliases=['accept_licences', 'accept'] + ), ) ) @@ -89,14 +101,14 @@ def main(): packages.append(fragment) if params['state'] in ['present', 'installed']: - ensure(module, 'present', packages) + ensure(module, 'present', packages, params) elif params['state'] in ['latest']: - ensure(module, 'latest', packages) + ensure(module, 'latest', packages, params) elif params['state'] in ['absent', 'uninstalled', 'removed']: - ensure(module, 'absent', packages) + ensure(module, 'absent', packages, params) -def ensure(module, state, packages): +def ensure(module, state, packages, params): response = { 'results': [], 'msg': '', @@ -119,7 +131,13 @@ def ensure(module, state, packages): to_modify = filter(behaviour[state]['filter'], packages) if to_modify: rc, out, err = module.run_command( - ['pkg', behaviour[state]['subcommand'], '-q', '--'] + to_modify + [ + 'pkg', behaviour[state]['subcommand'] + ] + + (['--accept'] if params['accept_licenses'] else []) + + [ + '-q', '--' + ] + to_modify ) response['rc'] = rc response['results'].append(out) From ae34395f01fa3ef5df1ad38895a260c849f65074 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Sun, 19 Apr 2015 15:29:30 -0400 Subject: [PATCH 201/224] Fixes bug where state=absent did not work --- system/gluster_volume.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index c1607f627c3..2e45a58cc8a 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -356,7 +356,9 @@ def main(): # do the work! if action == 'absent': if volume_name in volumes: - run_gluster([ 'volume', 'delete', name ]) + if volumes[volume_name]['status'].lower() != 'stopped': + stop_volume(volume_name) + run_gluster_yes([ 'volume', 'delete', volume_name ]) changed = True if action == 'present': From 6d66beb7936fc13b4b873d88504fdda067bab87b Mon Sep 17 00:00:00 2001 From: RJ Nowling Date: Mon, 20 Apr 2015 09:13:52 -0500 Subject: [PATCH 202/224] Change 'brick' to 'bricks' and add 'brick' as an alias. Add example with multiple bricks. --- system/gluster_volume.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index 3af54e8d551..37dc6bbc27d 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -62,11 +62,11 @@ options: default: 'tcp' description: - Transport type for volume - brick: + bricks: required: false default: null description: - - Brick path on servers. Multiple brick paths can be separated by commas + - Brick paths on servers. Multiple brick paths can be separated by commas start_on_create: choices: [ 'yes', 'no'] required: false @@ -107,7 +107,7 @@ author: Taneli Leppä EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 brick=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" + gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster:"{{ play_hosts }}" run_once: true - name: tune @@ -124,6 +124,10 @@ EXAMPLES = """ - name: remove gluster volume gluster_volume: state=absent name=test1 + +- name: create gluster volume with multiple bricks + gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster:"{{ play_hosts }}" + run_once: true """ import shutil @@ -312,7 +316,7 @@ def main(): stripes=dict(required=False, default=None, type='int'), replicas=dict(required=False, default=None, type='int'), transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]), - brick=dict(required=False, default=None), + bricks=dict(required=False, default=None, aliases=['brick']), start_on_create=dict(required=False, default=True, type='bool'), rebalance=dict(required=False, default=False, type='bool'), options=dict(required=False, default={}, type='dict'), From 3c9131b09c3ca16d25a9340b965bdd4df70b8325 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Apr 2015 10:20:50 -0400 Subject: [PATCH 203/224] added aliases docs --- system/gluster_volume.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/gluster_volume.py b/system/gluster_volume.py index af4a70b7094..2a8bc74df72 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -67,6 +67,7 @@ options: default: null description: - Brick paths on servers. Multiple brick paths can be separated by commas + aliases: ['brick'] start_on_create: choices: [ 'yes', 'no'] required: false From cb05f0834b522c02745007fd08c4d184b8ce2e95 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Mon, 20 Apr 2015 19:52:31 +0100 Subject: [PATCH 204/224] Fix typo. --- packaging/os/pkg5.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index 0e4c565b90e..b250a02850c 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -79,8 +79,8 @@ def main(): ), accept_licenses=dict( choices=BOOLEANS, - default=False - aliases=['accept_licences', 'accept'] + default=False, + aliases=['accept_licences', 'accept'], ), ) ) From 7458cdd72253b5dcdbd8fa892b722deb7ee1b69b Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Tue, 21 Apr 2015 10:38:40 -0400 Subject: [PATCH 205/224] New VMware Module to support adding a datacenter --- cloud/vmware_datacenter.py | 175 +++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 cloud/vmware_datacenter.py diff --git a/cloud/vmware_datacenter.py b/cloud/vmware_datacenter.py new file mode 100644 index 00000000000..c3125760484 --- /dev/null +++ b/cloud/vmware_datacenter.py @@ -0,0 +1,175 @@ +#!/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_datacenter +short_description: Create VMware vSphere Datacenter +description: + - Create VMware vSphere Datacenter +version_added: 2.0 +author: Joseph Callen +notes: +requirements: + - Tested on vSphere 5.5 + - PyVmomi installed +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter + required: True + version_added: 2.0 + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + version_added: 2.0 + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + version_added: 2.0 + datacenter_name: + description: + - The name of the datacenter the cluster will be created in. + required: True + version_added: 2.0 +''' + +EXAMPLES = ''' +# Example vmware_datacenter command from Ansible Playbooks +- name: Create Datacenter + local_action: > + vmware_datacenter + hostname="{{ ansible_ssh_host }}" username=root password=vmware + datacenter_name="datacenter" +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def state_create_datacenter(module): + datacenter_name = module.params['datacenter_name'] + content = module.params['content'] + changed = True + datacenter = None + + folder = content.rootFolder + + try: + if not module.check_mode: + datacenter = folder.CreateDatacenter(name=datacenter_name) + module.exit_json(changed=changed, result=str(datacenter)) + except vim.fault.DuplicateName: + module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name) + except vim.fault.InvalidName: + module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name) + except vmodl.fault.NotSupported: + # This should never happen + module.fail_json(msg="Trying to create a datacenter on an incorrect folder object") + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def check_datacenter_state(module): + datacenter_name = module.params['datacenter_name'] + + try: + content = connect_to_api(module) + datacenter = find_datacenter_by_name(content, datacenter_name) + module.params['content'] = content + + if datacenter is None: + return 'absent' + else: + module.params['datacenter'] = datacenter + return 'present' + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_destroy_datacenter(module): + datacenter = module.params['datacenter'] + changed = True + result = None + + try: + if not module.check_mode: + task = datacenter.Destroy_Task() + changed, result = wait_for_task(task) + module.exit_json(changed=changed, result=result) + except vim.fault.VimFault as vim_fault: + module.fail_json(msg=vim_fault.msg) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def state_update_datacenter(module): + module.exit_json(changed=False, msg="Currently Not Implemented") + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(datacenter_name=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'))) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + datacenter_states = { + 'absent': { + 'present': state_destroy_datacenter, + 'absent': state_exit_unchanged, + }, + 'present': { + 'update': state_update_datacenter, + 'present': state_exit_unchanged, + 'absent': state_create_datacenter, + } + } + desired_state = module.params['state'] + current_state = check_datacenter_state(module) + + datacenter_states[desired_state][current_state](module) + + +from ansible.module_utils.basic import * +from ansible.module_utils.vmware import * + +if __name__ == '__main__': + main() From 1fa73cd0a3ce99cefbc4712e2a562cf19296d51a Mon Sep 17 00:00:00 2001 From: Doug Luce Date: Tue, 10 Mar 2015 18:03:20 -0700 Subject: [PATCH 206/224] Add the cronvar module. This manages environment variables in Vixie crontabs. It includes addition/removal/replacement of variables and ordering via the insertbefore/insertafter parameters. --- system/cronvar.py | 430 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 430 insertions(+) create mode 100755 system/cronvar.py diff --git a/system/cronvar.py b/system/cronvar.py new file mode 100755 index 00000000000..23a626472c3 --- /dev/null +++ b/system/cronvar.py @@ -0,0 +1,430 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Cronvar Plugin: The goal of this plugin is to provide an indempotent +# method for set cron variable values. It should play well with the +# existing cron module as well as allow for manually added variables. +# Each variable entered will be preceded with a comment describing the +# variable so that it can be found later. This is required to be +# present in order for this plugin to find/modify the variable +# +# This module is based on the crontab module. +# + +DOCUMENTATION = """ +--- +module: cronvar +short_description: Manage variables in crontabs +description: + - Use this module to manage crontab variables. This module allows + you to create, update, or delete cron variable definitions. +version_added: "2.0" +options: + name: + description: + - Name of the crontab variable. + default: null + required: true + value: + description: + - The value to set this variable to. Required if state=present. + required: false + default: null + insertafter: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + after the variable specified. + insertbefore: + required: false + default: null + description: + - Used with C(state=present). If specified, the variable will be inserted + just before the variable specified. + state: + description: + - Whether to ensure that the variable is present or absent. + required: false + default: present + choices: [ "present", "absent" ] + user: + description: + - The specific user whose crontab should be modified. + required: false + default: root + cron_file: + description: + - If specified, uses this file in cron.d instead of an individual user's crontab. + required: false + default: null + backup: + description: + - If set, create a backup of the crontab before it is modified. + The location of the backup is returned in the C(backup) variable by this module. + required: false + default: false +requirements: + - cron +author: Doug Luce +""" + +EXAMPLES = ''' +# Ensure a variable exists. +# Creates an entry like "EMAIL=doug@ansibmod.con.com" +- cronvar: name="EMAIL" value="doug@ansibmod.con.com" + +# Make sure a variable is gone. This will remove any variable named +# "LEGACY" +- cronvar: name="LEGACY" state=absent + +# Adds a variable to a file under /etc/cron.d +- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log" + user="root" cron_file=ansible_yum-autoupdate +''' + +import os +import re +import tempfile +import platform +import pipes +import shlex + +CRONCMD = "/usr/bin/crontab" + +class CronVarError(Exception): + pass + +class CronVar(object): + """ + CronVar object to write variables to crontabs. + + user - the user of the crontab (defaults to root) + cron_file - a cron file under /etc/cron.d + """ + def __init__(self, module, user=None, cron_file=None): + self.module = module + self.user = user + if self.user is None: + self.user = 'root' + self.lines = None + self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', )) + # select whether we dump additional debug info through syslog + self.syslogging = False + + if cron_file: + self.cron_file = '/etc/cron.d/%s' % cron_file + else: + self.cron_file = None + + self.read() + + def read(self): + # Read in the crontab from the system + self.lines = [] + if self.cron_file: + # read the cronfile + try: + f = open(self.cron_file, 'r') + self.lines = f.read().splitlines() + f.close() + except IOError, e: + # cron file does not exist + return + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + else: + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) + + if rc != 0 and rc != 1: # 1 can mean that there are no jobs. + raise CronVarError("Unable to read crontab") + + lines = out.splitlines() + count = 0 + for l in lines: + if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and + not re.match( r'# \(/tmp/.*installed on.*\)', l) and + not re.match( r'# \(.*version.*\)', l)): + self.lines.append(l) + count += 1 + + def log_message(self, message): + if self.syslogging: + syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message) + + def write(self, backup_file=None): + """ + Write the crontab to the system. Saves all information. + """ + if backup_file: + fileh = open(backup_file, 'w') + elif self.cron_file: + fileh = open(self.cron_file, 'w') + else: + filed, path = tempfile.mkstemp(prefix='crontab') + fileh = os.fdopen(filed, 'w') + + fileh.write(self.render()) + fileh.close() + + # return if making a backup + if backup_file: + return + + # Add the entire crontab back to the user crontab + if not self.cron_file: + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) + os.unlink(path) + + if rc != 0: + self.module.fail_json(msg=err) + + def remove_variable_file(self): + try: + os.unlink(self.cron_file) + return True + except OSError, e: + # cron file does not exist + return False + except: + raise CronVarError("Unexpected error:", sys.exc_info()[0]) + + def parse_for_var(self, line): + lexer = shlex.shlex(line) + lexer.wordchars = self.wordchars + varname = lexer.get_token() + is_env_var = lexer.get_token() == '=' + value = ''.join(lexer) + if is_env_var: + return (varname, value) + raise CronVarError("Not a variable.") + + def find_variable(self, name): + comment = None + for l in self.lines: + try: + (varname, value) = self.parse_for_var(l) + if varname == name: + return value + except CronVarError: + pass + return None + + def get_var_names(self): + var_names = [] + for l in self.lines: + try: + (var_name, _) = self.parse_for_var(l) + var_names.append(var_name) + except CronVarError: + pass + return var_names + + def add_variable(self, name, value, insertbefore, insertafter): + if insertbefore is None and insertafter is None: + # Add the variable to the top of the file. + self.lines.insert(0, "%s=%s" % (name, value)) + else: + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname == insertbefore: + newlines.append("%s=%s" % (name, value)) + newlines.append(l) + elif varname == insertafter: + newlines.append(l) + newlines.append("%s=%s" % (name, value)) + else: + raise CronVarError # Append. + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def remove_variable(self, name): + self.update_variable(name, None, remove=True) + + def update_variable(self, name, value, remove=False): + newlines = [] + for l in self.lines: + try: + (varname, _) = self.parse_for_var(l) # Throws if not a var line + if varname != name: + raise CronVarError # Append. + if not remove: + newlines.append("%s=%s" % (name, value)) + except CronVarError: + newlines.append(l) + + self.lines = newlines + + def render(self): + """ + Render a proper crontab + """ + result = '\n'.join(self.lines) + if result and result[-1] not in ['\n', '\r']: + result += '\n' + return result + + def _read_user_execute(self): + """ + Returns the command line for reading a crontab + """ + user = '' + + if self.user: + if platform.system() == 'SunOS': + return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) + elif platform.system() == 'AIX': + return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) + elif platform.system() == 'HP-UX': + return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, '-l') + + def _write_execute(self, path): + """ + Return the command line for writing a crontab + """ + user = '' + if self.user: + if platform.system() in ['SunOS', 'HP-UX', 'AIX']: + return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) + else: + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) + +#================================================== + +def main(): + # The following example playbooks: + # + # - cronvar: name="SHELL" value="/bin/bash" + # + # - name: Set the email + # cronvar: name="EMAILTO" value="doug@ansibmod.con.com" + # + # - name: Get rid of the old new host variable + # cronvar: name="NEW_HOST" state=absent + # + # Would produce: + # SHELL = /bin/bash + # EMAILTO = doug@ansibmod.con.com + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + value=dict(required=False), + user=dict(required=False), + cron_file=dict(required=False), + insertafter=dict(default=None), + insertbefore=dict(default=None), + state=dict(default='present', choices=['present', 'absent']), + backup=dict(default=False, type='bool'), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + supports_check_mode=False, + ) + + name = module.params['name'] + value = module.params['value'] + user = module.params['user'] + cron_file = module.params['cron_file'] + insertafter = module.params['insertafter'] + insertbefore = module.params['insertbefore'] + state = module.params['state'] + backup = module.params['backup'] + ensure_present = state == 'present' + + changed = False + res_args = dict() + + # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. + os.umask(022) + cronvar = CronVar(module, user, cron_file) + + if cronvar.syslogging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, 'cronvar instantiated - name: "%s"' % name) + + # --- user input validation --- + + if name is None and ensure_present: + module.fail_json(msg="You must specify 'name' to insert a new cron variabale") + + if value is None and ensure_present: + module.fail_json(msg="You must specify 'value' to insert a new cron variable") + + if name is None and not ensure_present: + module.fail_json(msg="You must specify 'name' to remove a cron variable") + + # if requested make a backup before making a change + if backup: + (_, backup_file) = tempfile.mkstemp(prefix='cronvar') + cronvar.write(backup_file) + + if cronvar.cron_file and not name and not ensure_present: + changed = cronvar.remove_job_file() + module.exit_json(changed=changed, cron_file=cron_file, state=state) + + old_value = cronvar.find_variable(name) + + if ensure_present: + if old_value is None: + cronvar.add_variable(name, value, insertbefore, insertafter) + changed = True + elif old_value != value: + cronvar.update_variable(name, value) + changed = True + else: + if old_value is not None: + cronvar.remove_variable(name) + changed = True + + res_args = { + "vars": cronvar.get_var_names(), + "changed": changed + } + + if changed: + cronvar.write() + + # retain the backup only if crontab or cron file have changed + if backup: + if changed: + res_args['backup_file'] = backup_file + else: + os.unlink(backup_file) + + if cron_file: + res_args['cron_file'] = cron_file + + module.exit_json(**res_args) + + # --- should never get here + module.exit_json(msg="Unable to execute cronvar task.") + +# import module snippets +from ansible.module_utils.basic import * + +main() From 3148aafa17c64b002693dc3f41216a87b4456b83 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 10:18:34 -0400 Subject: [PATCH 207/224] moved new module to proper subdir fixed doc issues minor code adjustments --- cloud/vmware/__init__.py | 0 cloud/{ => vmware}/vmware_datacenter.py | 36 ++++++++++++------------- 2 files changed, 18 insertions(+), 18 deletions(-) create mode 100644 cloud/vmware/__init__.py rename cloud/{ => vmware}/vmware_datacenter.py (88%) diff --git a/cloud/vmware/__init__.py b/cloud/vmware/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py similarity index 88% rename from cloud/vmware_datacenter.py rename to cloud/vmware/vmware_datacenter.py index c3125760484..35cf7fa4692 100644 --- a/cloud/vmware_datacenter.py +++ b/cloud/vmware/vmware_datacenter.py @@ -1,4 +1,4 @@ -#!/bin/python +#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen @@ -21,38 +21,39 @@ DOCUMENTATION = ''' --- module: vmware_datacenter -short_description: Create VMware vSphere Datacenter +short_description: Manage VMware vSphere Datacenters description: - - Create VMware vSphere Datacenter + - Manage VMware vSphere Datacenters version_added: 2.0 author: Joseph Callen notes: -requirements: - Tested on vSphere 5.5 - - PyVmomi installed +requirements: + - PyVmomi options: - hostname: + hostname: description: - - The hostname or IP address of the vSphere vCenter + - The hostname or IP address of the vSphere vCenter API server required: True - version_added: 2.0 username: description: - The username of the vSphere vCenter required: True aliases: ['user', 'admin'] - version_added: 2.0 password: description: - The password of the vSphere vCenter required: True aliases: ['pass', 'pwd'] - version_added: 2.0 datacenter_name: description: - The name of the datacenter the cluster will be created in. required: True - version_added: 2.0 + state: + description: + - If the datacenter should be present or absent + choices: ['present', 'absent'] + required: True ''' EXAMPLES = ''' @@ -133,10 +134,6 @@ def state_destroy_datacenter(module): module.fail_json(msg=method_fault.msg) -def state_update_datacenter(module): - module.exit_json(changed=False, msg="Currently Not Implemented") - - def state_exit_unchanged(module): module.exit_json(changed=False) @@ -144,8 +141,12 @@ def state_exit_unchanged(module): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(datacenter_name=dict(required=True, type='str'), - state=dict(default='present', choices=['present', 'absent'], type='str'))) + argument_spec.update( + dict( + datacenter_name=dict(required=True, type='str'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + ) + ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_PYVMOMI: @@ -157,7 +158,6 @@ def main(): 'absent': state_exit_unchanged, }, 'present': { - 'update': state_update_datacenter, 'present': state_exit_unchanged, 'absent': state_create_datacenter, } From a56c8ebff19a1c6ccc1d801e04e344cae6322b0a Mon Sep 17 00:00:00 2001 From: Matt Jaynes Date: Wed, 22 Apr 2015 21:14:47 +0200 Subject: [PATCH 208/224] Document 'msg' param and fix examples The 'msg' alias for 'subject' isn't in the documentation, so adding it. In the gmail example, it uses both the 'subject' and 'msg' params, but 'msg' is an alias of 'subject', so you are essentially declaring the same param twice. If you use this example, then no subject is sent (I tested with gmail). Documentation example is updated to use 'body' as intended. Also, updated the simple example to use 'subject' instead of the 'msg' alias since it is more explicit. --- notification/mail.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index a1ec44087dd..10ef61ef2be 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -61,7 +61,7 @@ options: required: false subject: description: - - The subject of the email being sent. + - The subject of the email being sent. Alias: I(msg) aliases: [ msg ] required: true body: @@ -115,7 +115,7 @@ options: EXAMPLES = ''' # Example playbook sending mail to root -- local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' +- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.' # Sending an e-mail using Gmail SMTP servers - local_action: mail @@ -125,7 +125,7 @@ EXAMPLES = ''' password='mysecret' to="John Smith " subject='Ansible-report' - msg='System {{ ansible_hostname }} has been successfully provisioned.' + body='System {{ ansible_hostname }} has been successfully provisioned.' # Send e-mail to a bunch of users, attaching files - local_action: mail From 39028d6cefc9617da0585c93f7d11f3968e09647 Mon Sep 17 00:00:00 2001 From: Matt Jaynes Date: Wed, 22 Apr 2015 22:23:07 +0200 Subject: [PATCH 209/224] Remove mentions of 'msg' alias --- notification/mail.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/notification/mail.py b/notification/mail.py index 10ef61ef2be..ae33c5ca4ca 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -61,8 +61,7 @@ options: required: false subject: description: - - The subject of the email being sent. Alias: I(msg) - aliases: [ msg ] + - The subject of the email being sent. required: true body: description: From 3900643352b1da677847f00eb4e0b9f9d5cee9b6 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Sun, 26 Apr 2015 22:27:53 +0100 Subject: [PATCH 210/224] documentation tweaks to fix missing arguments and specification of defaults --- clustering/consul | 99 ++++++++++++++++++++++++++------------- clustering/consul_acl | 13 ++++- clustering/consul_kv | 30 ++++++++---- clustering/consul_session | 38 +++++++++++++++ 4 files changed, 138 insertions(+), 42 deletions(-) diff --git a/clustering/consul b/clustering/consul index 15a68f068a2..fec55726539 100644 --- a/clustering/consul +++ b/clustering/consul @@ -24,19 +24,19 @@ short_description: "Add, modify & delete services within a consul cluster. description: - registers services and checks for an agent with a consul cluster. A service is some process running on the agent node that should be advertised by - consul's discovery mechanism. It may optionally supply a check definition - that will be used to notify the consul cluster of the health of the service. + consul's discovery mechanism. It may optionally supply a check definition, + a periodic service test to notify the consul cluster of service's health. Checks may also be registered per node e.g. disk usage, or cpu usage and notify the health of the entire node to the cluster. Service level checks do not require a check name or id as these are derived by Consul from the Service name and id respectively by appending 'service:'. - Node level checks require a check_name and optionally a check_id Currently, - there is no complete way to retrieve the script, interval or ttl metadata for - a registered check. Without this metadata it is not possible to tell if - the data supplied with ansible represents a change to a check. As a result - this does not attempt to determine changes and will always report a changed - occurred. An api method is planned to supply this metadata so at that stage - change management will be added. + Node level checks require a check_name and optionally a check_id. + Currently, there is no complete way to retrieve the script, interval or ttl + metadata for a registered check. Without this metadata it is not possible to + tell if the data supplied with ansible represents a change to a check. As a + result this does not attempt to determine changes and will always report a + changed occurred. An api method is planned to supply this metadata so at that + stage change management will be added. version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -45,71 +45,105 @@ options: - register or deregister the consul service, defaults to present required: true choices: ['present', 'absent'] + service_name: + desciption: + - Unique name for the service on a node, must be unique per node, + required if registering a service. May be ommitted if registering + a node level check + required: false service_id: description: - the ID for the service, must be unique per node, defaults to the - service name + service name if the service name is supplied required: false + default: service_name if supplied host: description: - - host of the consul agent with which to register the service, - defaults to localhost + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running required: false + default: 8500 notes: description: - Notes to attach to check when registering it. - service_name: - desciption: - - Unique name for the service on a node, must be unique per node, - required if registering a service. May be ommitted if registering - a node level check required: false + default: None service_port: description: - the port on which the service is listening required for - registration of a service. - required: true + registration of a service, i.e. if service_name or service_id is set + required: false tags: description: - a list of tags that will be attached to the service registration. required: false + default: None script: description: - the script/command that will be run periodically to check the health - of the service + of the service. Scripts require an interval and vise versa required: false + default: None interval: description: - - the interval at which the service check will be run. This is by - convention a number with a s or m to signify the units of seconds - or minutes. if none is supplied, m will be appended + - the interval at which the service check will be run. This is a number + with a s or m suffix to signify the units of seconds or minutes e.g + 15s or 1m. If no suffix is supplied, m will be used by default e.g. + 1 will be 1m. Required if the script param is specified. + required: false + default: None check_id: description: - an ID for the service check, defaults to the check name, ignored if - part of service definition. + part of a service definition. + required: false + default: None check_name: description: - a name for the service check, defaults to the check id. required if standalone, ignored if part of service definition. + required: false + default: None + ttl: + description: + - checks can be registered with a ttl instead of a script and interval + this means that the service will check in with the agent before the + ttl expires. If it doesn't the check will be considered failed. + Required if registering a check and the script an interval are missing + Similar to the interval this is a number with a s or m suffix to + signify the units of seconds or minutes e.g 15s or 1m. If no suffix + is supplied, m will be used by default e.g. 1 will be 1m + required: false + default: None + token: + description: + - the token key indentifying an ACL rule set. May be required to + register services. + required: false + default: None """ EXAMPLES = ''' - name: register nginx service with the local consul agent consul: name: nginx - port: 80 + service_port: 80 - name: register nginx service with curl check consul: name: nginx - port: 80 + service_port: 80 script: "curl http://localhost" interval: 60s - name: register nginx with some service tags consul: name: nginx - port: 80 + service_port: 80 tags: - prod - webservers @@ -432,23 +466,22 @@ class ConsulCheck(): def main(): module = AnsibleModule( argument_spec=dict( + host=dict(default='localhost'), + port=dict(default=8500, type='int'), check_id=dict(required=False), check_name=dict(required=False), - host=dict(default='localhost'), - interval=dict(required=False, type='str'), - ttl=dict(required=False, type='str'), check_node=dict(required=False), check_host=dict(required=False), notes=dict(required=False), - port=dict(default=8500, type='int'), script=dict(required=False), service_id=dict(required=False), service_name=dict(required=False), service_port=dict(required=False, type='int'), state=dict(default='present', choices=['present', 'absent']), + interval=dict(required=False, type='str'), + ttl=dict(required=False, type='str'), tags=dict(required=False, type='list'), - token=dict(required=False), - url=dict(default='http://localhost:8500') + token=dict(required=False) ), supports_check_mode=False, ) diff --git a/clustering/consul_acl b/clustering/consul_acl index cd5466c53b1..5e50c54431e 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -22,7 +22,8 @@ module: consul_acl short_description: "manipulate consul acl keys and rules" description: - allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. + rules in a consul cluster via the agent. For more details on using and + configuring ACLs, see https://www.consul.io/docs/internals/acl.html version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -53,6 +54,16 @@ options: description: - an list of the rules that should be associated with a given key/token. required: false + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 """ EXAMPLES = ''' diff --git a/clustering/consul_kv b/clustering/consul_kv index 8999a43319f..a9132a3d1c2 100644 --- a/clustering/consul_kv +++ b/clustering/consul_kv @@ -42,8 +42,9 @@ options: 'release' respectively. a valid session must be supplied to make the attempt changed will be true if the attempt is successful, false otherwise. - required: true + required: false choices: ['present', 'absent', 'acquire', 'release'] + default: present key: description: - the key at which the value should be stored. @@ -57,30 +58,43 @@ options: description: - if the key represents a prefix, each entry with the prefix can be retrieved by setting this to true. - required: true + required: false + default: false session: description: - the session that should be used to acquire or release a lock associated with a key/value pair + required: false + default: None token: description: - the token key indentifying an ACL rule set that controls access to the key value pair required: false - url: - description: - - location of the consul agent with which access the keay/value store, - defaults to http://localhost:8500 - required: false + default: None cas: description: - used when acquiring a lock with a session. If the cas is 0, then Consul will only put the key if it does not already exist. If the cas value is non-zero, then the key is only set if the index matches the ModifyIndex of that key. + required: false + default: None flags: description: - opaque integer value that can be passed when setting a value. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 """ @@ -214,8 +228,8 @@ def main(): argument_spec = dict( cas=dict(required=False), flags=dict(required=False), - host=dict(default='localhost'), key=dict(required=True), + host=dict(default='localhost'), port=dict(default=8500, type='int'), recurse=dict(required=False, type='bool'), retrieve=dict(required=False, default=True), diff --git a/clustering/consul_session b/clustering/consul_session index 00f4cae7344..7088dc275ba 100644 --- a/clustering/consul_session +++ b/clustering/consul_session @@ -39,35 +39,73 @@ options: node name or session id is required as parameter. required: false choices: ['present', 'absent', 'info', 'node', 'list'] + default: present name: description: - the name that should be associated with the session. This is opaque to Consul and not required. required: false + default: None delay: description: - the optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar blocked from being acquired until this delay has expired. default: 15s + required: false node: description: - the name of the node that with which the session will be associated. by default this is the name of the agent. + required: false + default: None datacenter: description: - name of the datacenter in which the session exists or should be created. + required: false + default: None checks: description: - a list of checks that will be used to verify the session health. If all the checks fail, the session will be invalidated and any locks associated with the session will be release and can be acquired once the associated lock delay has expired. + required: false + default: None + host: + description: + - host of the consul agent defaults to localhost + required: false + default: localhost + port: + description: + - the port on which the consul agent is running + required: false + default: 8500 """ EXAMPLES = ''' +- name: register basic session with consul + consul_session: + name: session1 + +- name: register a session with an existing check + consul_session: + name: session_with_check + checks: + - existing_check_name + +- name: register a session with lock_delay + consul_session: + name: session_with_delay + delay: 20 + +- name: retrieve info about session by id + consul_session: id=session_id state=info +- name: retrieve active sessions + consul_session: state=list ''' import sys From ef019e61631396637d7fd61668d97a213c99823c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 27 Apr 2015 00:05:41 +0200 Subject: [PATCH 211/224] cloudstack: fix missing zone param used in get_vm() in utils --- cloud/cloudstack/cs_vmsnapshot.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index d53a33ac72e..7d9b47b56d8 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -41,6 +41,11 @@ options: - Snapshot memory if set to true. required: false default: false + zone: + description: + - Name of the zone in which the VM is in. If not set, default zone is used. + required: false + default: null project: description: - Name of the project the VM is assigned to. @@ -241,6 +246,7 @@ def main(): vm = dict(required=True), description = dict(default=None), project = dict(default=None), + zone = dict(default=None), snapshot_memory = dict(choices=BOOLEANS, default=False), state = dict(choices=['present', 'absent', 'revert'], default='present'), poll_async = dict(choices=BOOLEANS, default=True), From b0fb6b08a98665115ca4634d3351d67653cda4c4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 27 Apr 2015 00:06:42 +0200 Subject: [PATCH 212/224] cloudstack: fix misssing doc about vm param --- cloud/cloudstack/cs_vmsnapshot.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 7d9b47b56d8..89c0ec081d6 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -31,6 +31,10 @@ options: - Unique Name of the snapshot. In CloudStack terms C(displayname). required: true aliases: ['displayname'] + vm: + description: + - Name of the virtual machine. + required: true description: description: - Description of the snapshot. From 3a6f57cbc0d1f307e87cdb8cf256452bb30d6cf6 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Mon, 27 Apr 2015 13:40:21 +0100 Subject: [PATCH 213/224] use module.fail_json to report import errors. document valid duration units --- clustering/consul | 20 ++++++++++++++------ clustering/consul_acl | 31 +++++++++++++++++++++--------- clustering/consul_kv | 19 ++++++++++++++----- clustering/consul_session | 40 +++++++++++++++++++++++++++------------ 4 files changed, 78 insertions(+), 32 deletions(-) diff --git a/clustering/consul b/clustering/consul index fec55726539..5db79e20c40 100644 --- a/clustering/consul +++ b/clustering/consul @@ -37,6 +37,9 @@ description: result this does not attempt to determine changes and will always report a changed occurred. An api method is planned to supply this metadata so at that stage change management will be added. +requirements: + - python-consul + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -172,13 +175,11 @@ except ImportError: try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print "failed=True msg='python-consul required for this module. "\ - "see http://python-consul.readthedocs.org/en/latest/#installation'" - sys.exit(1) - -from requests.exceptions import ConnectionError - + python_consul_installed = False + def register_with_consul(module): state = module.params.get('state') @@ -462,6 +463,10 @@ class ConsulCheck(): except: pass +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") def main(): module = AnsibleModule( @@ -485,6 +490,9 @@ def main(): ), supports_check_mode=False, ) + + test_dependencies(module) + try: register_with_consul(module) except ConnectionError, e: diff --git a/clustering/consul_acl b/clustering/consul_acl index 5e50c54431e..c481b780a64 100644 --- a/clustering/consul_acl +++ b/clustering/consul_acl @@ -23,7 +23,11 @@ short_description: "manipulate consul acl keys and rules" description: - allows the addition, modification and deletion of ACL keys and associated rules in a consul cluster via the agent. For more details on using and - configuring ACLs, see https://www.consul.io/docs/internals/acl.html + configuring ACLs, see https://www.consul.io/docs/internals/acl.html. +requirements: + - python-consul + - pyhcl + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -91,17 +95,16 @@ import urllib2 try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print "failed=True msg='python-consul required for this module. "\ - "see http://python-consul.readthedocs.org/en/latest/#installation'" - sys.exit(1) + python_consul_installed = False try: import hcl + pyhcl_installed = True except ImportError: - print "failed=True msg='pyhcl required for this module."\ - " see https://pypi.python.org/pypi/pyhcl'" - sys.exit(1) + pyhcl_installed = False from requests.exceptions import ConnectionError @@ -271,6 +274,7 @@ class Rule: def __str__(self): return '%s %s' % (self.key, self.policy) + def get_consul_api(module, token=None): if not token: token = token = module.params.get('token') @@ -278,6 +282,14 @@ def get_consul_api(module, token=None): port=module.params.get('port'), token=token) +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + + if not pyhcl_installed: + module.fail_json( msg="pyhcl required for this module."\ + " see https://pypi.python.org/pypi/pyhcl") def main(): argument_spec = dict( @@ -291,9 +303,10 @@ def main(): token_type=dict( required=False, choices=['client', 'management'], default='client') ) + module = AnsibleModule(argument_spec, supports_check_mode=False) - module = AnsibleModule(argument_spec, supports_check_mode=True) - + test_dependencies(module) + try: execute(module) except ConnectionError, e: diff --git a/clustering/consul_kv b/clustering/consul_kv index a9132a3d1c2..e5a010a8c18 100644 --- a/clustering/consul_kv +++ b/clustering/consul_kv @@ -27,6 +27,9 @@ description: the indices, flags and session are returned as 'value'. If the key represents a prefix then Note that when a value is removed, the existing value if any is returned as part of the results. +requirements: + - python-consul + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -126,10 +129,10 @@ except ImportError: try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print """failed=True msg='python-consul required for this module. \ - see http://python-consul.readthedocs.org/en/latest/#installation'""" - sys.exit(1) + python_consul_installed = False from requests.exceptions import ConnectionError @@ -222,7 +225,11 @@ def get_consul_api(module, token=None): port=module.params.get('port'), token=module.params.get('token')) - +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") + def main(): argument_spec = dict( @@ -238,8 +245,10 @@ def main(): value=dict(required=False) ) - module = AnsibleModule(argument_spec, supports_check_mode=True) + module = AnsibleModule(argument_spec, supports_check_mode=False) + test_dependencies(module) + try: execute(module) except ConnectionError, e: diff --git a/clustering/consul_session b/clustering/consul_session index 7088dc275ba..8e6516891d2 100644 --- a/clustering/consul_session +++ b/clustering/consul_session @@ -25,6 +25,9 @@ description: cluster. These sessions can then be used in conjunction with key value pairs to implement distributed locks. In depth documentation for working with sessions can be found here http://www.consul.io/docs/internals/sessions.html +requirements: + - python-consul + - requests version_added: "1.9" author: Steve Gargan (steve.gargan@gmail.com) options: @@ -50,7 +53,8 @@ options: description: - the optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar blocked from being - acquired until this delay has expired. + acquired until this delay has expired. Valid units for delays + include 'ns', 'us', 'ms', 's', 'm', 'h' default: 15s required: false node: @@ -99,7 +103,7 @@ EXAMPLES = ''' - name: register a session with lock_delay consul_session: name: session_with_delay - delay: 20 + delay: 20s - name: retrieve info about session by id consul_session: id=session_id state=info @@ -113,12 +117,10 @@ import urllib2 try: import consul + from requests.exceptions import ConnectionError + python_consul_installed = True except ImportError, e: - print "failed=True msg='python-consul required for this module. see "\ - "http://python-consul.readthedocs.org/en/latest/#installation'" - sys.exit(1) - -from requests.errors import ConnectionError + python_consul_installed = False def execute(module): @@ -182,11 +184,11 @@ def update_session(module): changed = True try: - + session = consul.session.create( name=name, node=node, - lock_delay=delay, + lock_delay=validate_duration('delay', delay), dc=datacenter, checks=checks ) @@ -219,15 +221,27 @@ def remove_session(module): module.fail_json(msg="Could not remove session with id '%s' %s" % ( session_id, e)) +def validate_duration(name, duration): + if duration: + duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] + if not any((duration.endswith(suffix) for suffix in duration_units)): + raise Exception('Invalid %s %s you must specify units (%s)' % + (name, duration, ', '.join(duration_units))) + return duration def get_consul_api(module): return consul.Consul(host=module.params.get('host'), port=module.params.get('port')) + +def test_dependencies(module): + if not python_consul_installed: + module.fail_json(msg="python-consul required for this module. "\ + "see http://python-consul.readthedocs.org/en/latest/#installation") def main(): argument_spec = dict( checks=dict(default=None, required=False, type='list'), - delay=dict(required=False,type='int', default=15), + delay=dict(required=False,type='str', default='15s'), host=dict(default='localhost'), port=dict(default=8500, type='int'), id=dict(required=False), @@ -237,8 +251,10 @@ def main(): choices=['present', 'absent', 'info', 'node', 'list']) ) - module = AnsibleModule(argument_spec, supports_check_mode=True) - + module = AnsibleModule(argument_spec, supports_check_mode=False) + + test_dependencies(module) + try: execute(module) except ConnectionError, e: From 764a0e26b6df02cf2924254589a065918b6ca5d6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Apr 2015 11:12:34 -0700 Subject: [PATCH 214/224] doc formatting --- cloud/cloudstack/cs_securitygroup_rule.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index a170230acac..709a9b562b3 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -53,28 +53,28 @@ options: - CIDR (full notation) to be used for security group rule. required: false default: '0.0.0.0/0' - user_security_group + user_security_group: description: - Security group this rule is based of. required: false default: null - start_port + start_port: description: - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp). required: false default: null aliases: [ 'port' ] - end_port + end_port: description: - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set. required: false default: null - icmp_type + icmp_type: description: - Type of the icmp message being sent. Required if C(protocol=icmp). required: false default: null - icmp_code + icmp_code: description: - Error code for this icmp message. Required if C(protocol=icmp). required: false From 4ffb5f065ba66c36d06c1b7da5767e1cb5679bdc Mon Sep 17 00:00:00 2001 From: Robert Osowiecki Date: Thu, 30 Apr 2015 17:50:07 +0200 Subject: [PATCH 215/224] Using get_bin_path to find rmmod and modprobe --- system/modprobe.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/modprobe.py b/system/modprobe.py index 50c8f72fb2a..af845ae8cf5 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -97,13 +97,13 @@ def main(): # Add/remove module as needed if args['state'] == 'present': if not present: - rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) + rc, _, err = module.run_command([module.get_bin_path('modprobe', True), args['name'], args['params']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True elif args['state'] == 'absent': if present: - rc, _, err = module.run_command(['rmmod', args['name']]) + rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True From 69e27f40396736e444f50c9c0339da25db465285 Mon Sep 17 00:00:00 2001 From: Roland Ramthun Date: Fri, 1 May 2015 10:01:56 +0200 Subject: [PATCH 216/224] disable ask on sync action --- packaging/os/portage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/portage.py b/packaging/os/portage.py index ab96cb22e60..eb77baa14f6 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -231,7 +231,7 @@ def sync_repositories(module, webrsync=False): webrsync_path = module.get_bin_path('emerge-webrsync', required=True) cmd = '%s --quiet' % webrsync_path else: - cmd = '%s --sync --quiet' % module.emerge_path + cmd = '%s --sync --quiet --ask=n' % module.emerge_path rc, out, err = module.run_command(cmd) if rc != 0: From 0b18bdc57fbc119406522a78042c46a5170ce5b7 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 1 May 2015 17:32:29 +0200 Subject: [PATCH 217/224] cloudstack: add new module cs_instance Manages instances and virtual machines --- cloud/cloudstack/cs_instance.py | 787 ++++++++++++++++++++++++++++++++ 1 file changed, 787 insertions(+) create mode 100644 cloud/cloudstack/cs_instance.py diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py new file mode 100644 index 00000000000..62856c6d177 --- /dev/null +++ b/cloud/cloudstack/cs_instance.py @@ -0,0 +1,787 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_instance +short_description: Manages instances and virtual machines on Apache CloudStack based clouds. +description: + - Deploy, start, restart, stop and destroy instances on Apache CloudStack, Citrix CloudPlatform and Exoscale. +version_added: '2.0' +author: René Moser +options: + name: + description: + - Host name of the instance. C(name) can only contain ASCII letters. + required: true + display_name: + description: + - Custom display name of the instances. + required: false + default: null + group: + description: + - Group in where the new instance should be in. + required: false + default: null + state: + description: + - State of the instance. + required: false + default: 'present' + choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ] + service_offering: + description: + - Name or id of the service offering of the new instance. If not set, first found service offering is used. + required: false + default: null + template: + description: + - Name or id of the template to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(ISO) option. + required: false + default: null + iso: + description: + - Name or id of the ISO to be used for creating the new instance. Required when using C(state=present). Mutually exclusive with C(template) option. + required: false + default: null + hypervisor: + description: + - Name the hypervisor to be used for creating the new instance. Relevant when using C(state=present) and option C(ISO) is used. If not set, first found hypervisor will be used. + required: false + default: null + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + keyboard: + description: + - Keyboard device type for the instance. + required: false + default: null + choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ] + networks: + description: + - List of networks to use for the new instance. + required: false + default: [] + aliases: [ 'network' ] + ip_address: + description: + - IPv4 address for default instance's network during creation + required: false + default: null + ip6_address: + description: + - IPv6 address for default instance's network. + required: false + default: null + disk_offering: + description: + - Name of the disk offering to be used. + required: false + default: null + disk_size: + description: + - Disk size in GByte required if deploying instance from ISO. + required: false + default: null + security_groups: + description: + - List of security groups the instance to be applied to. + required: false + default: [] + aliases: [ 'security_group' ] + project: + description: + - Name of the project the instance to be deployed in. + required: false + default: null + zone: + description: + - Name of the zone in which the instance shoud be deployed. If not set, default zone is used. + required: false + default: null + ssh_key: + description: + - Name of the SSH key to be deployed on the new instance. + required: false + default: null + affinity_groups: + description: + - Affinity groups names to be applied to the new instance. + required: false + default: [] + aliases: [ 'affinity_group' ] + user_data: + description: + - Optional data (ASCII) that can be sent to the instance upon a successful deployment. + - The data will be automatically base64 encoded. + - Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB. + required: false + default: null + force: + description: + - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. + required: false + default: true + tags: + description: + - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). + - If you want to delete all tags, set a empty list e.g. C(tags: []). + required: false + default: null + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +''' + +EXAMPLES = ''' +--- +# Create a instance on CloudStack from an ISO +# NOTE: Names of offerings and ISOs depending on the CloudStack configuration. +- local_action: + module: cs_instance + name: web-vm-1 + iso: Linux Debian 7 64-bit + hypervisor: VMware + project: Integration + zone: ch-zrh-ix-01 + service_offering: 1cpu_1gb + disk_offering: PerfPlus Storage + disk_size: 20 + networks: + - Server Integration + - Sync Integration + - Storage Integration + + +# For changing a running instance, use the 'force' parameter +- local_action: + module: cs_instance + name: web-vm-1 + display_name: web-vm-01.example.com + iso: Linux Debian 7 64-bit + service_offering: 2cpu_2gb + force: yes + + +# Create or update a instance on Exoscale's public cloud +- local_action: + module: cs_instance + name: web-vm-1 + template: Linux Debian 7 64-bit + service_offering: Tiny + ssh_key: john@example.com + tags: + - { key: admin, value: john } + - { key: foo, value: bar } + register: vm + +- debug: msg='default ip {{ vm.default_ip }} and is in state {{ vm.state }}' + + +# Ensure a instance has stopped +- local_action: cs_instance name=web-vm-1 state=stopped + + +# Ensure a instance is running +- local_action: cs_instance name=web-vm-1 state=started + + +# Remove a instance +- local_action: cs_instance name=web-vm-1 state=absent +''' + +RETURN = ''' +--- +id: + description: ID of the instance. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the instance. + returned: success + type: string + sample: web-01 +display_name: + description: Display name of the instance. + returned: success + type: string + sample: web-01 +group: + description: Group name of the instance is related. + returned: success + type: string + sample: web +created: + description: Date of the instance was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +password_enabled: + description: True if password setting is enabled. + returned: success + type: boolean + sample: true +password: + description: The password of the instance if exists. + returned: success + type: string + sample: Ge2oe7Do +ssh_key: + description: Name of ssh key deployed to instance. + returned: success + type: string + sample: key@work +project: + description: Name of project the instance is related to. + returned: success + type: string + sample: Production +default_ip: + description: Default IP address of the instance. + returned: success + type: string + sample: 10.23.37.42 +public_ip: + description: Public IP address with instance via static nat rule. + returned: success + type: string + sample: 1.2.3.4 +iso: + description: Name of ISO the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +template: + description: Name of template the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +service_offering: + description: Name of the service offering the instance has. + returned: success + type: string + sample: 2cpu_2gb +zone: + description: Name of zone the instance is in. + returned: success + type: string + sample: ch-gva-2 +state: + description: State of the instance. + returned: success + type: string + sample: Running +security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' +affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' +tags: + description: List of resource tags associated with the instance. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +import base64 + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackInstance(AnsibleCloudStack): + + def __init__(self, module): + AnsibleCloudStack.__init__(self, module) + self.instance = None + + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + + service_offerings = self.cs.listServiceOfferings() + if service_offerings: + if not service_offering: + return service_offerings['serviceoffering'][0]['id'] + + for s in service_offerings['serviceoffering']: + if service_offering in [ s['name'], s['id'] ]: + return s['id'] + self.module.fail_json(msg="Service offering '%s' not found" % service_offering) + + + def get_template_or_iso_id(self): + template = self.module.params.get('template') + iso = self.module.params.get('iso') + + if not template and not iso: + self.module.fail_json(msg="Template or ISO is required.") + + if template and iso: + self.module.fail_json(msg="Template are ISO are mutually exclusive.") + + if template: + templates = self.cs.listTemplates(templatefilter='executable') + if templates: + for t in templates['template']: + if template in [ t['displaytext'], t['name'], t['id'] ]: + return t['id'] + self.module.fail_json(msg="Template '%s' not found" % template) + + elif iso: + isos = self.cs.listIsos() + if isos: + for i in isos['iso']: + if iso in [ i['displaytext'], i['name'], i['id'] ]: + return i['id'] + self.module.fail_json(msg="ISO '%s' not found" % iso) + + + def get_disk_offering_id(self): + disk_offering = self.module.params.get('disk_offering') + + if not disk_offering: + return None + + disk_offerings = self.cs.listDiskOfferings() + if disk_offerings: + for d in disk_offerings['diskoffering']: + if disk_offering in [ d['displaytext'], d['name'], d['id'] ]: + return d['id'] + self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering) + + + def get_instance(self): + instance = self.instance + if not instance: + instance_name = self.module.params.get('name') + + args = {} + args['projectid'] = self.get_project_id() + args['zoneid'] = self.get_zone_id() + instances = self.cs.listVirtualMachines(**args) + if instances: + for v in instances['virtualmachine']: + if instance_name in [ v['name'], v['displayname'], v['id'] ]: + self.instance = v + break + return self.instance + + + def get_network_ids(self): + network_names = self.module.params.get('networks') + if not network_names: + return None + + args = {} + args['zoneid'] = self.get_zone_id() + args['projectid'] = self.get_project_id() + networks = self.cs.listNetworks(**args) + if not networks: + self.module.fail_json(msg="No networks available") + + network_ids = [] + network_displaytexts = [] + for network_name in network_names: + for n in networks['network']: + if network_name in [ n['displaytext'], n['name'], n['id'] ]: + network_ids.append(n['id']) + network_displaytexts.append(n['name']) + break + + if len(network_ids) != len(network_names): + self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts) + + return ','.join(network_ids) + + + def present_instance(self): + instance = self.get_instance() + if not instance: + instance = self.deploy_instance() + else: + instance = self.update_instance(instance) + + instance = self.ensure_tags(resource=instance, resource_type='UserVm') + + return instance + + + def get_user_data(self): + user_data = self.module.params.get('user_data') + if user_data: + user_data = base64.b64encode(user_data) + return user_data + + + def get_display_name(self): + display_name = self.module.params.get('display_name') + if not display_name: + display_name = self.module.params.get('name') + return display_name + + + def deploy_instance(self): + self.result['changed'] = True + + args = {} + args['templateid'] = self.get_template_or_iso_id() + args['zoneid'] = self.get_zone_id() + args['serviceofferingid'] = self.get_service_offering_id() + args['projectid'] = self.get_project_id() + args['diskofferingid'] = self.get_disk_offering_id() + args['networkids'] = self.get_network_ids() + args['hypervisor'] = self.get_hypervisor() + args['userdata'] = self.get_user_data() + args['keyboard'] = self.module.params.get('keyboard') + args['ipaddress'] = self.module.params.get('ip_address') + args['ip6address'] = self.module.params.get('ip6_address') + args['name'] = self.module.params.get('name') + args['group'] = self.module.params.get('group') + args['keypair'] = self.module.params.get('ssh_key') + args['size'] = self.module.params.get('disk_size') + args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) + args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) + + instance = None + if not self.module.check_mode: + instance = self.cs.deployVirtualMachine(**args) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def update_instance(self, instance): + args_service_offering = {} + args_service_offering['id'] = instance['id'] + args_service_offering['serviceofferingid'] = self.get_service_offering_id() + + args_instance_update = {} + args_instance_update['id'] = instance['id'] + args_instance_update['group'] = self.module.params.get('group') + args_instance_update['displayname'] = self.get_display_name() + args_instance_update['userdata'] = self.get_user_data() + args_instance_update['ostypeid'] = self.get_os_type_id() + + args_ssh_key = {} + args_ssh_key['id'] = instance['id'] + args_ssh_key['keypair'] = self.module.params.get('ssh_key') + args_ssh_key['projectid'] = self.get_project_id() + + if self._has_changed(args_service_offering, instance) or \ + self._has_changed(args_instance_update, instance) or \ + self._has_changed(args_ssh_key, instance): + + force = self.module.params.get('force') + instance_state = instance['state'].lower() + + if instance_state == 'stopped' or force: + self.result['changed'] = True + if not self.module.check_mode: + + # Ensure VM has stopped + instance = self.stop_instance() + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Change service offering + if self._has_changed(args_service_offering, instance): + res = self.cs.changeServiceForVirtualMachine(**args_service_offering) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Update VM + if self._has_changed(args_instance_update, instance): + res = self.cs.updateVirtualMachine(**args_instance_update) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + self.instance = instance + + # Reset SSH key + if self._has_changed(args_ssh_key, instance): + instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key) + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + instance = self._poll_job(instance, 'virtualmachine') + self.instance = instance + + # Start VM again if it was running before + if instance_state == 'running': + instance = self.start_instance() + return instance + + + def absent_instance(self): + instance = self.get_instance() + if instance: + if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def expunge_instance(self): + instance = self.get_instance() + if instance: + res = {} + if instance['state'].lower() in [ 'destroying', 'destroyed' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.expungeVirtualMachine(id=instance['id']) + + elif instance['state'].lower() not in [ 'expunging' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True) + + if res and 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(res, 'virtualmachine') + return instance + + + def stop_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in ['stopping', 'stopped']: + return instance + + if instance['state'].lower() in ['starting', 'running']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.stopVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def start_instance(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance named '%s' not found" % module.params.get('name')) + + if instance['state'].lower() in ['starting', 'running']: + return instance + + if instance['state'].lower() in ['stopped', 'stopping']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.startVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + return instance + + + def restart_instance(self): + instance = self.get_instance() + if not instance: + module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + + if instance['state'].lower() in [ 'running', 'starting' ]: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.rebootVirtualMachine(id=instance['id']) + + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self._poll_job(instance, 'virtualmachine') + + elif instance['state'].lower() in [ 'stopping', 'stopped' ]: + instance = self.start_instance() + return instance + + + def get_result(self, instance): + if instance: + if 'id' in instance: + self.result['id'] = instance['id'] + if 'name' in instance: + self.result['name'] = instance['name'] + if 'displayname' in instance: + self.result['display_name'] = instance['displayname'] + if 'group' in instance: + self.result['group'] = instance['group'] + if 'project' in instance: + self.result['project'] = instance['project'] + if 'publicip' in instance: + self.result['public_ip'] = instance['public_ip'] + if 'passwordenabled' in instance: + self.result['password_enabled'] = instance['passwordenabled'] + if 'password' in instance: + self.result['password'] = instance['password'] + if 'serviceofferingname' in instance: + self.result['service_offering'] = instance['serviceofferingname'] + if 'zonename' in instance: + self.result['zone'] = instance['zonename'] + if 'templatename' in instance: + self.result['template'] = instance['templatename'] + if 'isoname' in instance: + self.result['iso'] = instance['isoname'] + if 'keypair' in instance: + self.result['ssh_key'] = instance['keypair'] + if 'created' in instance: + self.result['created'] = instance['created'] + if 'state' in instance: + self.result['state'] = instance['state'] + if 'tags' in instance: + self.result['tags'] = [] + for tag in instance['tags']: + result_tag = {} + result_tag['key'] = tag['key'] + result_tag['value'] = tag['value'] + self.result['tags'].append(result_tag) + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + self.result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + self.result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault']: + self.result['default_ip'] = nic['ipaddress'] + return self.result + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + display_name = dict(default=None), + group = dict(default=None), + state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'), + service_offering = dict(default=None), + template = dict(default=None), + iso = dict(default=None), + networks = dict(type='list', aliases=[ 'network' ], default=None), + ip_address = dict(defaul=None), + ip6_address = dict(defaul=None), + disk_offering = dict(default=None), + disk_size = dict(type='int', default=None), + keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), + hypervisor = dict(default=None), + security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), + affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), + project = dict(default=None), + user_data = dict(default=None), + zone = dict(default=None), + ssh_key = dict(default=None), + force = dict(choices=BOOLEANS, default=False), + tags = dict(type='list', aliases=[ 'tag' ], default=None), + poll_async = dict(choices=BOOLEANS, default=True), + api_key = dict(default=None), + api_secret = dict(default=None), + api_url = dict(default=None), + api_http_method = dict(default='get'), + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_instance = AnsibleCloudStackInstance(module) + + state = module.params.get('state') + + if state in ['absent', 'destroyed']: + instance = acs_instance.absent_instance() + + elif state in ['expunged']: + instance = acs_instance.expunge_instance() + + elif state in ['present', 'deployed']: + instance = acs_instance.present_instance() + + elif state in ['stopped']: + instance = acs_instance.stop_instance() + + elif state in ['started']: + instance = acs_instance.start_instance() + + elif state in ['restarted']: + instance = acs_instance.restart_instance() + + if instance and 'state' in instance and instance['state'].lower() == 'error': + module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name')) + + result = acs_instance.get_result(instance) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() From 7c675705f32ae8fcd26942bfc7e9b2c26b63dba5 Mon Sep 17 00:00:00 2001 From: Sterling Windmill Date: Mon, 4 May 2015 15:57:40 -0400 Subject: [PATCH 218/224] Allow for specifying name instead of host as per the documentation at http://docs.ansible.com/known_hosts_module.html --- system/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/known_hosts.py b/system/known_hosts.py index 30ea7755553..86876cd4931 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -82,7 +82,7 @@ def enforce_state(module, params): Add or remove key. """ - host = params["host"] + host = params["name"] key = params.get("key",None) port = params.get("port",None) #expand the path parameter; otherwise module.add_path_info From 28b0f3ce132dd78e0407d5f95838d97fd69824b6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 May 2015 13:24:21 -0700 Subject: [PATCH 219/224] Fix documentation formatting --- cloud/cloudstack/cs_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 62856c6d177..9b14f1a9834 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -142,7 +142,7 @@ options: tags: description: - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). - - If you want to delete all tags, set a empty list e.g. C(tags: []). + - "If you want to delete all tags, set a empty list e.g. C(tags: [])." required: false default: null poll_async: From 1e744a885a6c0f890165846397d8eecbfb175cd2 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 5 May 2015 15:32:06 +0200 Subject: [PATCH 220/224] cloudstack: doc fixes --- cloud/cloudstack/cs_affinitygroup.py | 3 ++- cloud/cloudstack/cs_firewall.py | 3 ++- cloud/cloudstack/cs_iso.py | 3 ++- cloud/cloudstack/cs_securitygroup.py | 4 +++- cloud/cloudstack/cs_securitygroup_rule.py | 3 ++- cloud/cloudstack/cs_sshkeypair.py | 4 ++-- cloud/cloudstack/cs_vmsnapshot.py | 3 ++- 7 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 59c21ee46f6..593f0840bae 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_affinitygroup short_description: Manages affinity groups on Apache CloudStack based clouds. -description: Create and remove affinity groups. +description: + - Create and remove affinity groups. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 9049f40f7c4..91e9e0405c6 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -21,7 +21,8 @@ DOCUMENTATION = ''' module: cs_firewall short_description: Manages firewall rules on Apache CloudStack based clouds. -description: Creates and removes firewall rules. +description: + - Creates and removes firewall rules. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 42f00fb1f00..b416fbb3356 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_iso short_description: Manages ISOs images on Apache CloudStack based clouds. -description: Register and remove ISO images. +description: + - Register and remove ISO images. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 4e2856d5a90..8b8659cdc94 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -19,9 +19,11 @@ # along with Ansible. If not, see . DOCUMENTATION = ''' +--- module: cs_securitygroup short_description: Manages security groups on Apache CloudStack based clouds. -description: Create and remove security groups. +description: + - Create and remove security groups. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 709a9b562b3..80d271d90c0 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_securitygroup_rule short_description: Manages security group rules on Apache CloudStack based clouds. -description: Add and remove security group rules. +description: + - Add and remove security group rules. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 9cc514c05ea..34ace0aa1f2 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -23,8 +23,8 @@ DOCUMENTATION = ''' module: cs_sshkeypair short_description: Manages SSH keys on Apache CloudStack based clouds. description: - - If no key was found and no public key was provided and a new SSH - private/public key pair will be created and the private key will be returned. + - If no key was found and no public key was provided and a new SSH + private/public key pair will be created and the private key will be returned. version_added: '2.0' author: René Moser options: diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index 89c0ec081d6..bb27b2de978 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -22,7 +22,8 @@ DOCUMENTATION = ''' --- module: cs_vmsnapshot short_description: Manages VM snapshots on Apache CloudStack based clouds. -description: Create, remove and revert VM from snapshots. +description: + - Create, remove and revert VM from snapshots. version_added: '2.0' author: René Moser options: From dfa9037091cdd5414f653b6045ad900af3006f6f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 5 May 2015 15:53:55 +0200 Subject: [PATCH 221/224] cloudstack: fix missing doc fragments --- cloud/cloudstack/cs_affinitygroup.py | 1 + cloud/cloudstack/cs_firewall.py | 1 + cloud/cloudstack/cs_instance.py | 1 + cloud/cloudstack/cs_iso.py | 1 + cloud/cloudstack/cs_securitygroup.py | 1 + cloud/cloudstack/cs_sshkeypair.py | 1 + cloud/cloudstack/cs_vmsnapshot.py | 1 + 7 files changed, 7 insertions(+) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 593f0840bae..07b9cf42d6a 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -52,6 +52,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index 91e9e0405c6..13f114c1b35 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -72,6 +72,7 @@ options: - Name of the project. required: false default: null +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 9b14f1a9834..8680f20ada5 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -150,6 +150,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index b416fbb3356..83af1e1783e 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -95,6 +95,7 @@ options: required: false default: 'present' choices: [ 'present', 'absent' ] +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index 8b8659cdc94..50556da5bb3 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -47,6 +47,7 @@ options: - Name of the project the security group to be created in. required: false default: null +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index 34ace0aa1f2..8dd02dcd1f1 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -48,6 +48,7 @@ options: - String of the public key. required: false default: null +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index bb27b2de978..dad660cd77c 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -67,6 +67,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' From 8438ef995e206c230be3a06e4c016a0c1142c151 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 5 May 2015 16:17:05 +0200 Subject: [PATCH 222/224] cloudstack: fix missing doc fragment in cs_securitygroup_rule --- cloud/cloudstack/cs_securitygroup_rule.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index 80d271d90c0..1f2dac6f267 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -90,6 +90,7 @@ options: - Poll async jobs until job has finished. required: false default: true +extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' From 4e21eb09db44406a065dbfabc83fa94869acf849 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Fievet?= <_@sebastien-fievet.fr> Date: Tue, 5 May 2015 16:58:25 +0200 Subject: [PATCH 223/224] Typo --- monitoring/pingdom.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/pingdom.py b/monitoring/pingdom.py index 6f658cd9505..0ae1af357e0 100644 --- a/monitoring/pingdom.py +++ b/monitoring/pingdom.py @@ -111,7 +111,7 @@ def main(): ) if not HAS_PINGDOM: - module.fail_json(msg="Missing requried pingdom module (check docs)") + module.fail_json(msg="Missing required pingdom module (check docs)") checkid = module.params['checkid'] state = module.params['state'] From 34b81a875691bb111aa5062bcadc3802ec4d4813 Mon Sep 17 00:00:00 2001 From: Julien Vey Date: Wed, 6 May 2015 10:48:28 +0200 Subject: [PATCH 224/224] [homebew_cask] Be consistent in the documentation The documentation for the `state` field is not very clear. It says possible values are "installed, uninstalled" and default value is "present" The examples below alow uses `present` and `absent`. This patch uses "absent" and "present" instead of "installed" and "uninstalled" Moreover, this is consistent with other packaging modules, like homebrew itself --- packaging/os/homebrew_cask.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py index dede8d4bb36..75acead517b 100644 --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -32,7 +32,7 @@ options: state: description: - state of the cask - choices: [ 'installed', 'uninstalled' ] + choices: [ 'present', 'absent' ] required: false default: present '''