From cc484b629e6e3ee6480a296204a965fdc2f4e55a Mon Sep 17 00:00:00 2001 From: Ivan Vanderbyl Date: Fri, 30 Aug 2013 12:20:57 +1000 Subject: [PATCH 001/772] Added LogEntries module --- library/monitoring/logentries | 130 ++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 library/monitoring/logentries diff --git a/library/monitoring/logentries b/library/monitoring/logentries new file mode 100644 index 00000000000..480ab9954df --- /dev/null +++ b/library/monitoring/logentries @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Ivan Vanderbyl +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: logentries +author: Ivan Vanderbyl +short_description: Log aggregation service +description: + - Sends logs to LogEntries in realtime +version_added: "1.0" +options: + path: + description: + - path to a log file + required: true + state: + description: + - following state of the log + choices: [ 'present', 'absent' ] + required: false + default: present +notes: + - Requires the LogEntries agent which can be installed following the instructions at logentires.com +''' +EXAMPLES = ''' +- logentries: path=/var/log/nginx/access.log state=present +- logentries: path=/var/log/nginx/error.log state=absent +''' + +def query_log_status(module, le_path, path, state="present"): + """ Returns whether a log is followed or not. """ + + if state == "present": + rc, out, err = module.run_command("%s followed %s" % (le_path, path)) + if rc == 0: + return True + + return False + +def follow_log(module, le_path, logs): + """ Follows one or more logs if not already followed. """ + + followed_count = 0 + + for log in logs: + if query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command([le_path, 'follow', log]) + + if not query_log_status(module, le_path, log): + module.fail_json(msg="failed to follow '%s': %s" % (log, out.strip())) + + followed_count += 1 + + if followed_count > 0: + module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) + + module.exit_json(changed=False, msg="logs(s) already followed") + +def unfollow_log(module, le_path, logs): + """ Unfollows one or more logs if followed. """ + + removed_count = 0 + + # Using a for loop incase of error, we can report the package that failed + for log in logs: + # Query the log first, to see if we even need to remove. + if not query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command([le_path, 'unfollow', log]) + + if query_log_status(module, le_path, log): + module.fail_json(msg="failed to remove '%s': %s" % (log, out.strip())) + + removed_count += 1 + + if removed_count > 0: + module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) + + module.exit_json(changed=False, msg="logs(s) already unfollowed") + +def main(): + module = AnsibleModule( + argument_spec = dict( + path = dict(aliases=["name"], required=True), + state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]) + ), + supports_check_mode=True + ) + + le_path = module.get_bin_path('le', True, ['/usr/local/bin']) + + p = module.params + + # Handle multiple log files + logs = p["path"].split(",") + logs = filter(None, logs) + + if p["state"] in ["present", "followed"]: + follow_log(module, le_path, logs) + + elif p["state"] in ["absent", "unfollowed"]: + unfollow_log(module, le_path, logs) + +# this is magic, see lib/ansible/module_common.py +#<> + +main() From b214f4a7088e941dfe3afca0cb1d37165629f256 Mon Sep 17 00:00:00 2001 From: Ivan Vanderbyl Date: Fri, 30 Aug 2013 12:43:27 +1000 Subject: [PATCH 002/772] Fixed unfollow logic --- library/monitoring/logentries | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/monitoring/logentries b/library/monitoring/logentries index 480ab9954df..08a2d18264c 100644 --- a/library/monitoring/logentries +++ b/library/monitoring/logentries @@ -67,7 +67,7 @@ def follow_log(module, le_path, logs): rc, out, err = module.run_command([le_path, 'follow', log]) if not query_log_status(module, le_path, log): - module.fail_json(msg="failed to follow '%s': %s" % (log, out.strip())) + module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) followed_count += 1 @@ -89,10 +89,10 @@ def unfollow_log(module, le_path, logs): if module.check_mode: module.exit_json(changed=True) - rc, out, err = module.run_command([le_path, 'unfollow', log]) + rc, out, err = module.run_command([le_path, 'rm', log]) if query_log_status(module, le_path, log): - module.fail_json(msg="failed to remove '%s': %s" % (log, out.strip())) + module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) removed_count += 1 From dfb9d8c2fb9a1d56ea8bfa155f33748ad7a78bf6 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 8 Sep 2013 15:03:28 +0200 Subject: [PATCH 003/772] Add a plugin that permit to use ansible for lxc system, using libvirt Using -c lxc and the domain name as hostname, this permit to manage a whole range of container, provided they are managed and created by libvirtd. --- lib/ansible/runner/connection_plugins/lxc.py | 121 +++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 lib/ansible/runner/connection_plugins/lxc.py diff --git a/lib/ansible/runner/connection_plugins/lxc.py b/lib/ansible/runner/connection_plugins/lxc.py new file mode 100644 index 00000000000..b1de672ddd8 --- /dev/null +++ b/lib/ansible/runner/connection_plugins/lxc.py @@ -0,0 +1,121 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import distutils.spawn +import os +import subprocess +from ansible import errors +from ansible.callbacks import vvv + +class Connection(object): + ''' Local lxc based connections ''' + + def _search_executable(self, executable): + cmd = distutils.spawn.find_executable(executable) + if not cmd: + raise errors.AnsibleError("%s command not found in PATH") % executable + return cmd + + def _check_domain(self, domain): + p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p.communicate() + if p.returncode: + raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain) + + def __init__(self, runner, host, port, *args, **kwargs): + self.lxc = host + + self.cmd = self._search_executable('virsh') + + self._check_domain(host) + + self.runner = runner + self.host = host + # port is unused, since this is local + self.port = port + + def connect(self, port=None): + ''' connect to the lxc; nothing to do here ''' + + vvv("THIS IS A LOCAL LXC DIR", host=self.lxc) + + return self + + def _generate_cmd(self, executable, cmd): + if executable: + local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd] + else: + local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd) + return local_cmd + + def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): + ''' run a command on the chroot ''' + + # We enter lxc as root so sudo stuff can be ignored + local_cmd = self._generate_cmd(executable, cmd) + + vvv("EXEC %s" % (local_cmd), host=self.lxc) + p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), + cwd=self.runner.basedir, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + stdout, stderr = p.communicate() + return (p.returncode, '', stdout, stderr) + + def _normalize_path(self, path, prefix): + if not path.startswith(os.path.sep): + path = os.path.join(os.path.sep, path) + normpath = os.path.normpath(path) + return os.path.join(prefix, normpath[1:]) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to lxc ''' + + out_path = self._normalize_path(out_path, '/') + vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc) + + local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path] + vvv("EXEC %s" % (local_cmd), host=self.lxc) + + p = subprocess.Popen(local_cmd, cwd=self.runner.basedir, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate(open(in_path,'rb').read()) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from lxc to local ''' + + in_path = self._normalize_path(in_path, '/') + vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc) + + local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path] + vvv("EXEC %s" % (local_cmd), host=self.lxc) + + p = subprocess.Popen(local_cmd, cwd=self.runner.basedir, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + open(out_path,'wb').write(stdout) + + + def close(self): + ''' terminate the connection; nothing to do here ''' + pass From 5bf1c776143923618357fddd3c031d452e0ba435 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 8 Sep 2013 22:59:38 +0100 Subject: [PATCH 004/772] add a inventory script that go with the lxc connexion module --- plugins/inventory/lxc.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100755 plugins/inventory/lxc.py diff --git a/plugins/inventory/lxc.py b/plugins/inventory/lxc.py new file mode 100755 index 00000000000..f588a671faf --- /dev/null +++ b/plugins/inventory/lxc.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# (c) 2013, Michael Scherer +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from subprocess import Popen,PIPE +import sys +import json + +result = {} +result['all'] = {} + +pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True) +result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] +result['all']['vars'] = {} +result['all']['vars']['ansible_connection'] = 'lxc' + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print json.dumps(result) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print json.dumps({'ansible_connection': 'lxc'}) +else: + print "Need a argument, either --list or --host " From 8276face94207cbbb6a803a3be38a36f46e3a9f6 Mon Sep 17 00:00:00 2001 From: Michael Gregson Date: Mon, 30 Sep 2013 19:22:07 -0600 Subject: [PATCH 005/772] [digital_ocean] Don't die when the id parameter is missing It's okay for this to happen now, because we might move on to the name parameter if unique_name is enabled. --- library/cloud/digital_ocean | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index cd485aedf91..e5d24608fa7 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -328,13 +328,15 @@ def core(module): elif state in ('absent', 'deleted'): # First, try to find a droplet by id. - droplet = Droplet.find(id=getkeyordie('id')) + droplet = None + if 'id' in module.params: + droplet = Droplet.find(id=module.params['id']) # If we couldn't find the droplet and the user is allowing unique # hostnames, then check to see if a droplet with the specified # hostname already exists. - if not droplet and module.params['unique_name']: - droplet = Droplet.find(name=getkeyordie('name')) + if not droplet and module.params['unique_name'] and 'name' in module.params: + droplet = Droplet.find(name=module.params['name']) if not droplet: module.exit_json(changed=False, msg='The droplet is not found.') From b47df3e1f16edf04f69067440e4c24f09dd02619 Mon Sep 17 00:00:00 2001 From: Michael Gregson Date: Mon, 30 Sep 2013 20:50:22 -0600 Subject: [PATCH 006/772] Add basic domain handling to digital_ocean cloud action --- library/cloud/digital_ocean | 140 +++++++++++++++++++++++++++++++++++- 1 file changed, 137 insertions(+), 3 deletions(-) diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index e5d24608fa7..fe304e0151e 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -27,7 +27,7 @@ options: description: - Which target you want to operate on. default: droplet - choices: ['droplet', 'ssh'] + choices: ['droplet', 'ssh', 'domain'] state: description: - Indicate desired state of the target. @@ -44,7 +44,7 @@ options: - Numeric, the droplet id you want to operate on. name: description: - - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. + - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain. unique_name: description: - Bool, require unique hostnames. By default, digital ocean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. @@ -75,6 +75,9 @@ options: ssh_pub_key: description: - The public SSH key you want to add to your account. + ip: + description: + - The IP address to point a domain at. notes: - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. @@ -141,6 +144,31 @@ EXAMPLES = ''' size_id=1 region_id=2 image_id=3 + +# Create a domain record + +- digital_ocean: > + state=present + command=domain + name=my.digitalocean.domain + ip=127.0.0.1 + +# Create a droplet and a corresponding domain record + +- digital_cean: > + state=present + command=droplet + name=test_droplet + size_id=1 + region_id=2 + image_id=3 + register: test_droplet + +- digital_ocean: > + state=present + command=domain + name={{ test_droplet.name }}.my.domain + ip={{ test_droplet.ip_address }} ''' import sys @@ -275,6 +303,72 @@ class SSH(JsonfyMixIn): json = cls.manager.new_ssh_key(name, key_pub) return cls(json) +class DomainRecord(JsonfyMixIn): + manager = None + + def __init__(self, json): + self.__dict__.update(json) + update_attr = __init__ + + def update(self, data = None, record_type = None): + json = self.manager.edit_domain_record(self.domain_id, + self.id, + record_type if record_type is not None else self.record_type, + data if data is not None else self.data) + self.__dict__.update(json) + return self + + def destroy(self): + json = self.manager.destroy_domain_record(self.domain_id, self.id) + return json + +class Domain(JsonfyMixIn): + manager = None + + def __init__(self, domain_json): + self.__dict__.update(domain_json) + + def destroy(self): + self.manager.destroy_domain(self.id) + + def records(self): + json = self.manager.all_domain_records(self.id) + return map(DomainRecord, json) + + @classmethod + def add(cls, name, ip): + json = cls.manager.new_domain(name, ip) + return cls(json) + + @classmethod + def setup(cls, client_id, api_key): + cls.manager = DoManager(client_id, api_key) + DomainRecord.manager = cls.manager + + @classmethod + def list_all(cls): + domains = cls.manager.all_domains() + return map(cls, domains) + + @classmethod + def find(cls, name=None, id=None): + if name is None and id is None: + return False + + domains = Domain.list_all() + + if id is not None: + for domain in domains: + if domain.id == id: + return domain + + if name is not None: + for domain in domains: + if domain.name == name: + return domain + + return False + def core(module): def getkeyordie(k): v = module.params[k] @@ -361,11 +455,50 @@ def core(module): key.destroy() module.exit_json(changed=True) + elif command == 'domain': + Domain.setup(client_id, api_key) + if state in ('present', 'active'): + domain = Domain.find(id=module.params["id"]) + + if not domain: + domain = Domain.find(name=getkeyordie("name")) + + if not domain: + domain = Domain.add(getkeyordie("name"), + getkeyordie("ip")) + module.exit_json(changed=True, domain=domain.to_json()) + else: + records = domain.records() + at_record = None + for record in records: + if record.name == "@": + at_record = record + + if not at_record.data == getkeyordie("ip"): + record.update(data=getkeyordie("ip"), record_type='A') + module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json()) + + module.exit_json(changed=False, domain=domain.to_json()) + + elif state in ('absent', 'deleted'): + domain = None + if "id" in module.params: + domain = Domain.find(id=module.params["id"]) + + if not domain and "name" in module.params: + domain = Domain.find(name=module.params["name"]) + + if not domain: + module.exit_json(changed=False, msg="Domain not found.") + + event_json = domain.destroy() + module.exit_json(changed=True, event=event_json) + def main(): module = AnsibleModule( argument_spec = dict( - command = dict(choices=['droplet', 'ssh'], default='droplet'), + command = dict(choices=['droplet', 'ssh', 'domain'], default='droplet'), state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), client_id = dict(aliases=['CLIENT_ID'], no_log=True), api_key = dict(aliases=['API_KEY'], no_log=True), @@ -379,6 +512,7 @@ def main(): wait = dict(type='bool', choices=BOOLEANS, default='yes'), wait_timeout = dict(default=300, type='int'), ssh_pub_key = dict(type='str'), + ip = dict(type='str'), ), required_together = ( ['size_id', 'image_id', 'region_id'], From b4fdb4c86bd7a1b69c45d212143417b462d18d28 Mon Sep 17 00:00:00 2001 From: Michael Gregson Date: Tue, 1 Oct 2013 15:12:41 -0600 Subject: [PATCH 007/772] [digital_ocean] revert original module and add one new module for each command --- library/cloud/digital_ocean | 148 +------------ library/cloud/digital_ocean_domain | 242 +++++++++++++++++++++ library/cloud/digital_ocean_droplet | 320 ++++++++++++++++++++++++++++ library/cloud/digital_ocean_ssh | 179 ++++++++++++++++ 4 files changed, 747 insertions(+), 142 deletions(-) create mode 100644 library/cloud/digital_ocean_domain create mode 100644 library/cloud/digital_ocean_droplet create mode 100644 library/cloud/digital_ocean_ssh diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index fe304e0151e..cd485aedf91 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -27,7 +27,7 @@ options: description: - Which target you want to operate on. default: droplet - choices: ['droplet', 'ssh', 'domain'] + choices: ['droplet', 'ssh'] state: description: - Indicate desired state of the target. @@ -44,7 +44,7 @@ options: - Numeric, the droplet id you want to operate on. name: description: - - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain. + - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. unique_name: description: - Bool, require unique hostnames. By default, digital ocean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. @@ -75,9 +75,6 @@ options: ssh_pub_key: description: - The public SSH key you want to add to your account. - ip: - description: - - The IP address to point a domain at. notes: - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. @@ -144,31 +141,6 @@ EXAMPLES = ''' size_id=1 region_id=2 image_id=3 - -# Create a domain record - -- digital_ocean: > - state=present - command=domain - name=my.digitalocean.domain - ip=127.0.0.1 - -# Create a droplet and a corresponding domain record - -- digital_cean: > - state=present - command=droplet - name=test_droplet - size_id=1 - region_id=2 - image_id=3 - register: test_droplet - -- digital_ocean: > - state=present - command=domain - name={{ test_droplet.name }}.my.domain - ip={{ test_droplet.ip_address }} ''' import sys @@ -303,72 +275,6 @@ class SSH(JsonfyMixIn): json = cls.manager.new_ssh_key(name, key_pub) return cls(json) -class DomainRecord(JsonfyMixIn): - manager = None - - def __init__(self, json): - self.__dict__.update(json) - update_attr = __init__ - - def update(self, data = None, record_type = None): - json = self.manager.edit_domain_record(self.domain_id, - self.id, - record_type if record_type is not None else self.record_type, - data if data is not None else self.data) - self.__dict__.update(json) - return self - - def destroy(self): - json = self.manager.destroy_domain_record(self.domain_id, self.id) - return json - -class Domain(JsonfyMixIn): - manager = None - - def __init__(self, domain_json): - self.__dict__.update(domain_json) - - def destroy(self): - self.manager.destroy_domain(self.id) - - def records(self): - json = self.manager.all_domain_records(self.id) - return map(DomainRecord, json) - - @classmethod - def add(cls, name, ip): - json = cls.manager.new_domain(name, ip) - return cls(json) - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - DomainRecord.manager = cls.manager - - @classmethod - def list_all(cls): - domains = cls.manager.all_domains() - return map(cls, domains) - - @classmethod - def find(cls, name=None, id=None): - if name is None and id is None: - return False - - domains = Domain.list_all() - - if id is not None: - for domain in domains: - if domain.id == id: - return domain - - if name is not None: - for domain in domains: - if domain.name == name: - return domain - - return False - def core(module): def getkeyordie(k): v = module.params[k] @@ -422,15 +328,13 @@ def core(module): elif state in ('absent', 'deleted'): # First, try to find a droplet by id. - droplet = None - if 'id' in module.params: - droplet = Droplet.find(id=module.params['id']) + droplet = Droplet.find(id=getkeyordie('id')) # If we couldn't find the droplet and the user is allowing unique # hostnames, then check to see if a droplet with the specified # hostname already exists. - if not droplet and module.params['unique_name'] and 'name' in module.params: - droplet = Droplet.find(name=module.params['name']) + if not droplet and module.params['unique_name']: + droplet = Droplet.find(name=getkeyordie('name')) if not droplet: module.exit_json(changed=False, msg='The droplet is not found.') @@ -455,50 +359,11 @@ def core(module): key.destroy() module.exit_json(changed=True) - elif command == 'domain': - Domain.setup(client_id, api_key) - if state in ('present', 'active'): - domain = Domain.find(id=module.params["id"]) - - if not domain: - domain = Domain.find(name=getkeyordie("name")) - - if not domain: - domain = Domain.add(getkeyordie("name"), - getkeyordie("ip")) - module.exit_json(changed=True, domain=domain.to_json()) - else: - records = domain.records() - at_record = None - for record in records: - if record.name == "@": - at_record = record - - if not at_record.data == getkeyordie("ip"): - record.update(data=getkeyordie("ip"), record_type='A') - module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json()) - - module.exit_json(changed=False, domain=domain.to_json()) - - elif state in ('absent', 'deleted'): - domain = None - if "id" in module.params: - domain = Domain.find(id=module.params["id"]) - - if not domain and "name" in module.params: - domain = Domain.find(name=module.params["name"]) - - if not domain: - module.exit_json(changed=False, msg="Domain not found.") - - event_json = domain.destroy() - module.exit_json(changed=True, event=event_json) - def main(): module = AnsibleModule( argument_spec = dict( - command = dict(choices=['droplet', 'ssh', 'domain'], default='droplet'), + command = dict(choices=['droplet', 'ssh'], default='droplet'), state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), client_id = dict(aliases=['CLIENT_ID'], no_log=True), api_key = dict(aliases=['API_KEY'], no_log=True), @@ -512,7 +377,6 @@ def main(): wait = dict(type='bool', choices=BOOLEANS, default='yes'), wait_timeout = dict(default=300, type='int'), ssh_pub_key = dict(type='str'), - ip = dict(type='str'), ), required_together = ( ['size_id', 'image_id', 'region_id'], diff --git a/library/cloud/digital_ocean_domain b/library/cloud/digital_ocean_domain new file mode 100644 index 00000000000..21a9132381d --- /dev/null +++ b/library/cloud/digital_ocean_domain @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: digital_ocean_domain +short_description: Create/delete a DNS record in DigitalOcean +description: + - Create/delete a DNS record in DigitalOcean. +version_added: "1.4" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'active', 'absent', 'deleted'] + client_id: + description: + - Digital Ocean manager id. + api_key: + description: + - Digital Ocean api key. + id: + description: + - Numeric, the droplet id you want to operate on. + name: + description: + - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain. + ip: + description: + - The IP address to point a domain at. + +notes: + - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. +''' + + +EXAMPLES = ''' +# Create a domain record + +- digital_ocean_domain: > + state=present + name=my.digitalocean.domain + ip=127.0.0.1 + +# Create a droplet and a corresponding domain record + +- digital_cean_droplet: > + state=present + name=test_droplet + size_id=1 + region_id=2 + image_id=3 + register: test_droplet + +- digital_ocean_domain: > + state=present + name={{ test_droplet.name }}.my.domain + ip={{ test_droplet.ip_address }} +''' + +import sys +import os +import time + +try: + from dopy.manager import DoError, DoManager +except ImportError as e: + print "failed=True msg='dopy required for this module'" + sys.exit(1) + +class TimeoutError(DoError): + def __init__(self, msg, id): + super(TimeoutError, self).__init__(msg) + self.id = id + +class JsonfyMixIn(object): + def to_json(self): + return self.__dict__ + +class DomainRecord(JsonfyMixIn): + manager = None + + def __init__(self, json): + self.__dict__.update(json) + update_attr = __init__ + + def update(self, data = None, record_type = None): + json = self.manager.edit_domain_record(self.domain_id, + self.id, + record_type if record_type is not None else self.record_type, + data if data is not None else self.data) + self.__dict__.update(json) + return self + + def destroy(self): + json = self.manager.destroy_domain_record(self.domain_id, self.id) + return json + +class Domain(JsonfyMixIn): + manager = None + + def __init__(self, domain_json): + self.__dict__.update(domain_json) + + def destroy(self): + self.manager.destroy_domain(self.id) + + def records(self): + json = self.manager.all_domain_records(self.id) + return map(DomainRecord, json) + + @classmethod + def add(cls, name, ip): + json = cls.manager.new_domain(name, ip) + return cls(json) + + @classmethod + def setup(cls, client_id, api_key): + cls.manager = DoManager(client_id, api_key) + DomainRecord.manager = cls.manager + + @classmethod + def list_all(cls): + domains = cls.manager.all_domains() + return map(cls, domains) + + @classmethod + def find(cls, name=None, id=None): + if name is None and id is None: + return False + + domains = Domain.list_all() + + if id is not None: + for domain in domains: + if domain.id == id: + return domain + + if name is not None: + for domain in domains: + if domain.name == name: + return domain + + return False + +def core(module): + def getkeyordie(k): + v = module.params[k] + if v is None: + module.fail_json(msg='Unable to load %s' % k) + return v + + try: + # params['client_id'] will be None even if client_id is not passed in + client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] + api_key = module.params['api_key'] or os.environ['DO_API_KEY'] + except KeyError, e: + module.fail_json(msg='Unable to load %s' % e.message) + + changed = True + state = module.params['state'] + + Domain.setup(client_id, api_key) + if state in ('present'): + domain = Domain.find(id=module.params["id"]) + + if not domain: + domain = Domain.find(name=getkeyordie("name")) + + if not domain: + domain = Domain.add(getkeyordie("name"), + getkeyordie("ip")) + module.exit_json(changed=True, domain=domain.to_json()) + else: + records = domain.records() + at_record = None + for record in records: + if record.name == "@": + at_record = record + + if not at_record.data == getkeyordie("ip"): + record.update(data=getkeyordie("ip"), record_type='A') + module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json()) + + module.exit_json(changed=False, domain=domain.to_json()) + + elif state in ('absent'): + domain = None + if "id" in module.params: + domain = Domain.find(id=module.params["id"]) + + if not domain and "name" in module.params: + domain = Domain.find(name=module.params["name"]) + + if not domain: + module.exit_json(changed=False, msg="Domain not found.") + + event_json = domain.destroy() + module.exit_json(changed=True, event=event_json) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), + client_id = dict(aliases=['CLIENT_ID'], no_log=True), + api_key = dict(aliases=['API_KEY'], no_log=True), + name = dict(type='str'), + id = dict(aliases=['droplet_id'], type='int'), + ip = dict(type='str'), + ), + required_one_of = ( + ['id', 'name'], + ), + ) + + try: + core(module) + except TimeoutError as e: + module.fail_json(msg=str(e), id=e.id) + except (DoError, Exception) as e: + module.fail_json(msg=str(e)) + +# this is magic, see lib/ansible/module_common.py +#<> + +main() diff --git a/library/cloud/digital_ocean_droplet b/library/cloud/digital_ocean_droplet new file mode 100644 index 00000000000..b59d79ce605 --- /dev/null +++ b/library/cloud/digital_ocean_droplet @@ -0,0 +1,320 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: digital_ocean_droplet +short_description: Create/delete a droplet in DigitalOcean +description: + - Create/delete a droplet in DigitalOcean and optionally waits for it to be 'running'. +version_added: "1.4" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + client_id: + description: + - Digital Ocean manager id. + api_key: + description: + - Digital Ocean api key. + id: + description: + - Numeric, the droplet id you want to operate on. + name: + description: + - String, this is the name of the droplet - must be formatted by hostname rules. + unique_name: + description: + - Bool, require unique hostnames. By default, digital ocean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. + default: "no" + choices: [ "yes", "no" ] + size_id: + description: + - Numeric, this is the id of the size you would like the droplet created at. + image_id: + description: + - Numeric, this is the id of the image you would like the droplet created with. + region_id: + description: + - "Numeric, this is the id of the region you would like your server" + ssh_key_ids: + description: + - Optional, comma separated list of ssh_key_ids that you would like to be added to the server + wait: + description: + - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. + default: "yes" + choices: [ "yes", "no" ] + wait_timeout: + description: + - How long before wait gives up, in seconds. + default: 300 + +notes: + - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. +''' + +EXAMPLES = ''' +# Create a new Droplet +# Will return the droplet details including the droplet id (used for idempotence) + +- digital_ocean_droplet: > + state=present + name=my_new_droplet + client_id=XXX + api_key=XXX + size_id=1 + region_id=2 + image_id=3 + wait_timeout=500 + register: my_droplet +- debug: msg="ID is {{ my_droplet.droplet.id }}" +- debug: msg="IP is {{ my_droplet.droplet.ip_address }}" + +# Ensure a droplet is present +# If droplet id already exist, will return the droplet details and changed = False +# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True. + +- digital_ocean_droplet: > + state=present + id=123 + name=my_new_droplet + client_id=XXX + api_key=XXX + size_id=1 + region_id=2 + image_id=3 + wait_timeout=500 + +# Create a droplet with ssh key +# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids). +# Several keys can be added to ssh_key_ids as id1,id2,id3 +# The keys are used to connect as root to the droplet. + +- digital_ocean_droplet: > + state=present + ssh_key_ids=id1,id2 + name=my_new_droplet + client_id=XXX + api_key=XXX + size_id=1 + region_id=2 + image_id=3 +''' + +import sys +import os +import time + +try: + from dopy.manager import DoError, DoManager +except ImportError as e: + print "failed=True msg='dopy required for this module'" + sys.exit(1) + +class TimeoutError(DoError): + def __init__(self, msg, id): + super(TimeoutError, self).__init__(msg) + self.id = id + +class JsonfyMixIn(object): + def to_json(self): + return self.__dict__ + +class Droplet(JsonfyMixIn): + manager = None + + def __init__(self, droplet_json): + self.status = 'new' + self.__dict__.update(droplet_json) + + def is_powered_on(self): + return self.status == 'active' + + def update_attr(self, attrs=None): + if attrs: + for k, v in attrs.iteritems(): + setattr(self, k, v) + else: + json = self.manager.show_droplet(self.id) + if json['ip_address']: + self.update_attr(json) + + def power_on(self): + assert self.status == 'off', 'Can only power on a closed one.' + json = self.manager.power_on_droplet(self.id) + self.update_attr(json) + + def ensure_powered_on(self, wait=True, wait_timeout=300): + if self.is_powered_on(): + return + if self.status == 'off': # powered off + self.power_on() + + if wait: + end_time = time.time() + wait_timeout + while time.time() < end_time: + time.sleep(min(20, end_time - time.time())) + self.update_attr() + if self.is_powered_on(): + if not self.ip_address: + raise TimeoutError('No ip is found.', self.id) + return + raise TimeoutError('Wait for droplet running timeout', self.id) + + def destroy(self): + return self.manager.destroy_droplet(self.id) + + @classmethod + def setup(cls, client_id, api_key): + cls.manager = DoManager(client_id, api_key) + + @classmethod + def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None): + json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids) + droplet = cls(json) + return droplet + + @classmethod + def find(cls, id=None, name=None): + if not id and not name: + return False + + droplets = cls.list_all() + + # Check first by id. digital ocean requires that it be unique + for droplet in droplets: + if droplet.id == id: + return droplet + + # Failing that, check by hostname. + for droplet in droplets: + if droplet.name == name: + return droplet + + return False + + @classmethod + def list_all(cls): + json = cls.manager.all_active_droplets() + return map(cls, json) + +def core(module): + def getkeyordie(k): + v = module.params[k] + if v is None: + module.fail_json(msg='Unable to load %s' % k) + return v + + try: + # params['client_id'] will be None even if client_id is not passed in + client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] + api_key = module.params['api_key'] or os.environ['DO_API_KEY'] + except KeyError, e: + module.fail_json(msg='Unable to load %s' % e.message) + + changed = True + state = module.params['state'] + + Droplet.setup(client_id, api_key) + if state in ('present'): + + # First, try to find a droplet by id. + droplet = Droplet.find(id=module.params['id']) + + # If we couldn't find the droplet and the user is allowing unique + # hostnames, then check to see if a droplet with the specified + # hostname already exists. + if not droplet and module.params['unique_name']: + droplet = Droplet.find(name=getkeyordie('name')) + + # If both of those attempts failed, then create a new droplet. + if not droplet: + droplet = Droplet.add( + name=getkeyordie('name'), + size_id=getkeyordie('size_id'), + image_id=getkeyordie('image_id'), + region_id=getkeyordie('region_id'), + ssh_key_ids=module.params['ssh_key_ids'] + ) + + if droplet.is_powered_on(): + changed = False + + droplet.ensure_powered_on( + wait=getkeyordie('wait'), + wait_timeout=getkeyordie('wait_timeout') + ) + + module.exit_json(changed=changed, droplet=droplet.to_json()) + + elif state in ('absent'): + # First, try to find a droplet by id. + droplet = None + if 'id' in module.params: + droplet = Droplet.find(id=module.params['id']) + + # If we couldn't find the droplet and the user is allowing unique + # hostnames, then check to see if a droplet with the specified + # hostname already exists. + if not droplet and module.params['unique_name'] and 'name' in module.params: + droplet = Droplet.find(name=module.params['name']) + + if not droplet: + module.exit_json(changed=False, msg='The droplet is not found.') + + event_json = droplet.destroy() + module.exit_json(changed=True, event_id=event_json['event_id']) + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(choices=['present', 'absent'], default='present'), + client_id = dict(aliases=['CLIENT_ID'], no_log=True), + api_key = dict(aliases=['API_KEY'], no_log=True), + name = dict(type='str'), + size_id = dict(type='int'), + image_id = dict(type='int'), + region_id = dict(type='int'), + ssh_key_ids = dict(default=''), + id = dict(aliases=['droplet_id'], type='int'), + unique_name = dict(type='bool', choices=BOOLEANS, default='no'), + wait = dict(type='bool', choices=BOOLEANS, default='yes'), + wait_timeout = dict(default=300, type='int'), + ), + required_together = ( + ['size_id', 'image_id', 'region_id'], + ), + required_one_of = ( + ['id', 'name'], + ), + ) + + try: + core(module) + except TimeoutError as e: + module.fail_json(msg=str(e), id=e.id) + except (DoError, Exception) as e: + module.fail_json(msg=str(e)) + +# this is magic, see lib/ansible/module_common.py +#<> + +main() diff --git a/library/cloud/digital_ocean_ssh b/library/cloud/digital_ocean_ssh new file mode 100644 index 00000000000..1c361f69d77 --- /dev/null +++ b/library/cloud/digital_ocean_ssh @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: digital_ocean_ssh +short_description: Create/delete an SSH key in DigitalOcean +description: + - Create/delete an SSH key. +version_added: "1.4" +options: + state: + description: + - Indicate desired state of the target. + default: present + choices: ['present', 'absent'] + client_id: + description: + - Digital Ocean manager id. + api_key: + description: + - Digital Ocean api key. + id: + description: + - Numeric, the SSH key id you want to operate on. + name: + description: + - String, this is the name of an SSH key to create or destroy. + ssh_pub_key: + description: + - The public SSH key you want to add to your account. + +notes: + - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. +''' + + +EXAMPLES = ''' +# Ensure a SSH key is present +# If a key matches this name, will return the ssh key id and changed = False +# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False + +- digital_ocean_ssh: > + state=present + command=ssh + name=my_ssh_key + ssh_pub_key='ssh-rsa AAAA...' + client_id=XXX + api_key=XXX + +''' + +import sys +import os +import time + +try: + from dopy.manager import DoError, DoManager +except ImportError as e: + print "failed=True msg='dopy required for this module'" + sys.exit(1) + +class TimeoutError(DoError): + def __init__(self, msg, id): + super(TimeoutError, self).__init__(msg) + self.id = id + +class JsonfyMixIn(object): + def to_json(self): + return self.__dict__ + +class SSH(JsonfyMixIn): + manager = None + + def __init__(self, ssh_key_json): + self.__dict__.update(ssh_key_json) + update_attr = __init__ + + def destroy(self): + self.manager.destroy_ssh_key(self.id) + return True + + @classmethod + def setup(cls, client_id, api_key): + cls.manager = DoManager(client_id, api_key) + + @classmethod + def find(cls, name): + if not name: + return False + keys = cls.list_all() + for key in keys: + if key.name == name: + return key + return False + + @classmethod + def list_all(cls): + json = cls.manager.all_ssh_keys() + return map(cls, json) + + @classmethod + def add(cls, name, key_pub): + json = cls.manager.new_ssh_key(name, key_pub) + return cls(json) + +def core(module): + def getkeyordie(k): + v = module.params[k] + if v is None: + module.fail_json(msg='Unable to load %s' % k) + return v + + try: + # params['client_id'] will be None even if client_id is not passed in + client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] + api_key = module.params['api_key'] or os.environ['DO_API_KEY'] + except KeyError, e: + module.fail_json(msg='Unable to load %s' % e.message) + + changed = True + state = module.params['state'] + + SSH.setup(client_id, api_key) + name = getkeyordie('name') + if state in ('present'): + key = SSH.find(name) + if key: + module.exit_json(changed=False, ssh_key=key.to_json()) + key = SSH.add(name, getkeyordie('ssh_pub_key')) + module.exit_json(changed=True, ssh_key=key.to_json()) + + elif state in ('absent'): + key = SSH.find(name) + if not key: + module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name) + key.destroy() + module.exit_json(changed=True) + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(choices=['present', 'absent'], default='present'), + client_id = dict(aliases=['CLIENT_ID'], no_log=True), + api_key = dict(aliases=['API_KEY'], no_log=True), + name = dict(type='str'), + id = dict(aliases=['droplet_id'], type='int'), + ssh_pub_key = dict(type='str'), + ), + required_one_of = ( + ['id', 'name'], + ), + ) + + try: + core(module) + except TimeoutError as e: + module.fail_json(msg=str(e), id=e.id) + except (DoError, Exception) as e: + module.fail_json(msg=str(e)) + +# this is magic, see lib/ansible/module_common.py +#<> + +main() From 7e0a56d2a45df3c1de91c895ed94c8b31ddac1d8 Mon Sep 17 00:00:00 2001 From: Michael Gregson Date: Tue, 1 Oct 2013 15:18:15 -0600 Subject: [PATCH 008/772] [digital_ocean] Remove command usage from digital_ocean_ssh examples I seem to have forgotten to do this in my last commit. --- library/cloud/digital_ocean_ssh | 1 - 1 file changed, 1 deletion(-) diff --git a/library/cloud/digital_ocean_ssh b/library/cloud/digital_ocean_ssh index 1c361f69d77..f03fae14f80 100644 --- a/library/cloud/digital_ocean_ssh +++ b/library/cloud/digital_ocean_ssh @@ -56,7 +56,6 @@ EXAMPLES = ''' - digital_ocean_ssh: > state=present - command=ssh name=my_ssh_key ssh_pub_key='ssh-rsa AAAA...' client_id=XXX From 812fd22e989c736fc0585f7889461d1cdc6b3a8f Mon Sep 17 00:00:00 2001 From: Yap Sok Ann Date: Wed, 4 Sep 2013 11:10:58 +0800 Subject: [PATCH 009/772] Add packaging module for Gentoo Portage. This is in no way comprehensive enough to cover all use cases, but hopefully is sufficient to cover the common ones. --- library/packaging/portage | 389 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 389 insertions(+) create mode 100644 library/packaging/portage diff --git a/library/packaging/portage b/library/packaging/portage new file mode 100644 index 00000000000..c68dc0ebfa4 --- /dev/null +++ b/library/packaging/portage @@ -0,0 +1,389 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Yap Sok Ann +# Written by Yap Sok Ann +# Based on apt module written by Matthew Williams +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: portage +short_description: Package manager for Gentoo +description: + - Manages Gentoo packages + +version_added: "1.4" + +options: + package: + description: + - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) + required: false + default: null + + state: + description: + - State of the package atom + required: false + default: "present" + choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged" ] + + update: + description: + - Update packages to the best version available (--update) + required: false + default: null + choices: [ "yes" ] + + deep: + description: + - Consider the entire dependency tree of packages (--deep) + required: false + default: null + choices: [ "yes" ] + + newuse: + description: + - Include installed packages where USE flags have changed (--newuse) + required: false + default: null + choices: [ "yes" ] + + oneshot: + description: + - Do not add the packages to the world file (--oneshot) + required: false + default: null + choices: [ "yes" ] + + noreplace: + description: + - Do not re-emerge installed packages (--noreplace) + required: false + default: null + choices: [ "yes" ] + + nodeps: + description: + - Only merge packages but not their dependencies (--nodeps) + required: false + default: null + choices: [ "yes" ] + + onlydeps: + description: + - Only merge packages' dependencies but not the packages (--onlydeps) + required: false + default: null + choices: [ "yes" ] + + depclean: + description: + - Remove packages not needed by explicitly merged packages (--depclean) + - If no package is specified, clean up the world's dependencies + - Otherwise, --depclean serves as a dependency aware version of --unmerge + required: false + default: null + choices: [ "yes" ] + + quiet: + description: + - Run emerge in quiet mode (--quiet) + required: false + default: null + choices: [ "yes" ] + + verbose: + description: + - Run emerge in verbose mode (--verbose) + required: false + default: null + choices: [ "yes" ] + + sync: + description: + - Sync package repositories first + - If yes, perform "emerge --sync" + - If web, perform "emerge-webrsync" + required: false + default: null + choices: [ "yes", "web" ] + +requirements: [ gentoolkit ] +author: Yap Sok Ann +notes: [] +''' + +EXAMPLES = ''' +# Make sure package foo is installed +- portage: package=foo state=present + +# Make sure package foo is not installed +- portage: package=foo state=absent + +# Update package foo to the "best" version +- portage: package=foo update=yes + +# Sync repositories and update world +- portage: package=@world update=yes deep=yes sync=yes + +# Remove unneeded packages +- portage: depclean=yes + +# Remove package foo if it is not explicitly needed +- portage: package=foo state=absent depclean=yes +''' + + +import os +import pipes + + +def query_package(module, package, action): + if package.startswith('@'): + return query_set(module, package, action) + return query_atom(module, package, action) + + +def query_atom(module, atom, action): + cmd = '%s list %s' % (module.equery_path, atom) + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def query_set(module, set, action): + system_sets = [ + '@live-rebuild', + '@module-rebuild', + '@preserved-rebuild', + '@security', + '@selected', + '@system', + '@world', + '@x11-module-rebuild', + ] + + if set in system_sets: + if action == 'unmerge': + module.fail_json(msg='set %s cannot be removed' % set) + return False + + world_sets_path = '/var/lib/portage/world_sets' + if not os.path.exists(world_sets_path): + return False + + cmd = 'grep %s %s' % (set, world_sets_path) + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def sync_repositories(module, webrsync=False): + if module.check_mode: + module.fail_json(msg='check mode not supported by sync') + + if webrsync: + webrsync_path = module.get_bin_path('emerge-webrsync', required=True) + cmd = '%s --quiet' % webrsync_path + else: + cmd = '%s --sync --quiet' % module.emerge_path + + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg='could not sync package repositories') + + +# Note: In the 3 functions below, equery is done one-by-one, but emerge is done +# in one go. If that is not desirable, split the packages into multiple tasks +# instead of joining them together with comma. + + +def emerge_packages(module, packages): + p = module.params + + if not (p['update'] or p['noreplace']): + for package in packages: + if not query_package(module, package, 'emerge'): + break + else: + module.exit_json(changed=False, msg='Packages already present.') + + args = [] + for flag in [ + 'update', 'deep', 'newuse', + 'oneshot', 'noreplace', + 'nodeps', 'onlydeps', + 'quiet', 'verbose', + ]: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not installed.', + ) + + changed = True + for line in out.splitlines(): + if line.startswith('>>> Emerging (1 of'): + break + else: + changed = False + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages installed.', + ) + + +def unmerge_packages(module, packages): + p = module.params + + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--unmerge'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not removed.', + ) + + module.exit_json( + changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages removed.', + ) + + +def cleanup_packages(module, packages): + p = module.params + + if packages: + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--depclean'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) + + removed = 0 + for line in out.splitlines(): + if not line.startswith('Number removed:'): + continue + parts = line.split(':') + removed = int(parts[1].strip()) + changed = removed > 0 + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Depclean completed.', + ) + + +def run_emerge(module, packages, *args): + args = list(args) + + if module.check_mode: + args.append('--pretend') + + cmd = [module.emerge_path] + args + packages + return cmd, module.run_command(cmd) + + +portage_present_states = ['present', 'emerged', 'installed'] +portage_absent_states = ['absent', 'unmerged', 'removed'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + package=dict(default=None, aliases=['name']), + state=dict( + default=portage_present_states[0], + choices=portage_present_states + portage_absent_states, + ), + update=dict(default=None, choices=['yes']), + deep=dict(default=None, choices=['yes']), + newuse=dict(default=None, choices=['yes']), + oneshot=dict(default=None, choices=['yes']), + noreplace=dict(default=None, choices=['yes']), + nodeps=dict(default=None, choices=['yes']), + onlydeps=dict(default=None, choices=['yes']), + depclean=dict(default=None, choices=['yes']), + quiet=dict(default=None, choices=['yes']), + verbose=dict(default=None, choices=['yes']), + sync=dict(default=None, choices=['yes', 'web']), + ), + required_one_of=[['package', 'sync', 'depclean']], + mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], + supports_check_mode=True, + ) + + module.emerge_path = module.get_bin_path('emerge', required=True) + module.equery_path = module.get_bin_path('equery', required=True) + + p = module.params + + if p['sync']: + sync_repositories(module, webrsync=(p['sync'] == 'web')) + if not p['package']: + return + + packages = p['package'].split(',') if p['package'] else [] + + if p['depclean']: + if packages and p['state'] not in portage_absent_states: + module.fail_json( + msg='Depclean can only be used with package when the state is ' + 'one of: %s' % portage_absent_states, + ) + + cleanup_packages(module, packages) + + elif p['state'] in portage_present_states: + emerge_packages(module, packages) + + elif p['state'] in portage_absent_states: + unmerge_packages(module, packages) + + +# this is magic, see lib/ansible/module_common.py +#<> + +main() From ab5098fd7ef6959fd7f03fee585645cd9698b3cc Mon Sep 17 00:00:00 2001 From: Jake Kreider Date: Fri, 1 Nov 2013 09:23:01 -0500 Subject: [PATCH 010/772] Added metadata support to s3 module --- library/cloud/s3 | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/library/cloud/s3 b/library/cloud/s3 index 36ddd6ef800..3ce6e1b0b38 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -83,6 +83,11 @@ options: required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] + metadata: + description: + - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + required: false + default: null requirements: [ "boto" ] author: Lester Wade, Ralph Tice ''' @@ -97,7 +102,9 @@ EXAMPLES = ''' # GET/download and do not overwrite local file (trust remote) - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false # PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put +# PUT/upload with metadata +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' # PUT/upload and do not overwrite remote file (trust local) - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false # Download an object as a string to use else where in your playbook @@ -201,10 +208,14 @@ def path_check(path): else: return False -def upload_s3file(module, s3, bucket, obj, src, expiry): +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): try: bucket = s3.lookup(bucket) - key = bucket.new_key(obj) + key = bucket.new_key(obj) + if metadata: + for meta_key in metadata.keys(): + key.set_metadata(meta_key, metadata[meta_key]) + key.set_contents_from_filename(src) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) @@ -261,6 +272,7 @@ def main(): aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False), overwrite = dict(aliases=['force'], default=True, type='bool'), + metadata = dict(type='dict'), ), ) @@ -275,6 +287,7 @@ def main(): aws_secret_key = module.params.get('aws_secret_key') aws_access_key = module.params.get('aws_access_key') overwrite = module.params.get('overwrite') + metadata = module.params.get('metadata') if module.params.get('object'): obj = os.path.expanduser(module.params['object']) @@ -381,24 +394,24 @@ def main(): if md5_local == md5_remote: sum_matches = True if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: create_bucket(module, s3, bucket) - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) # If bucket exists but key doesn't, just upload. if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) # Support for deleting an object if we have both params. if mode == 'delete': From 08541e291a36e7d7ce7083efcca08a5901e08a0b Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Thu, 7 Nov 2013 21:40:29 -0500 Subject: [PATCH 011/772] New module: ec2_ubuntu_ami This module will retrieve the AMI of the most recent official Ubuntu EC2 image for a given Ubuntu release. --- library/cloud/ec2_ubuntu_ami | 177 +++++++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 library/cloud/ec2_ubuntu_ami diff --git a/library/cloud/ec2_ubuntu_ami b/library/cloud/ec2_ubuntu_ami new file mode 100644 index 00000000000..062971ab25f --- /dev/null +++ b/library/cloud/ec2_ubuntu_ami @@ -0,0 +1,177 @@ +#!/usr/bin/python +# +# (c) 2013, Nimbis Services +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: ec2_ubuntu_ami +short_description: Retrieve AWS AMIs for official Ubuntu images +description: + - The Ubuntu project maintains a list of the latest version of Ubuntu images on EC2 accessible via http. + - This module retrieves the AMI for a given Ubuntu release by making an http query against the appropriate cloud-images.ubuntu.com url and parsing the output. + - For example: https://cloud-images.ubuntu.com/query/precise/server/released.current.txt has information about Ubuntu 12.04 (precise pangolin) release, server edition. + - Returns C(ami), C(aki), C(ari), C(serial), C(tag) + - If there is no AKI or ARI associated with an image, these will be C(null). + - Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"}) +options: + release: + description: short name of the release (e.g., C(precise)) + required: true + stream: + description: Type of release. + required: false + default: "server" + choices: ["server", "desktop"] + store: + description: Back-end store for instance + required: false + default: "ebs" + choices: ["ebs", "instance-store"] + arch: + description: CPU architecture + required: false + default: "amd64" + choices: ["i386", "amd64"] + region: + description: EC2 region + required: false + default: us-east-1 + choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", + "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2"] + virt: + description: virutalization type + required: false + default: paravirtual + choices: ["paravirtual", "hvm"] + +author: Lorin Hochstein +''' + +EXAMPLES = ''' +- name: Lauch an Ubuntu 12.04 (Precise Pangolin) EC2 instance + hosts: 127.0.0.1 + connection: local + tasks: + - name: Get the Ubuntu precise AMI + ec2_ubuntu_ami: release=precise region=us-west-1 store=instance-store + register: ubuntu_image + - name: Start the EC2 instance + ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey +''' + +import csv +import json +import urllib2 +import urlparse + +AWS_REGIONS = ['ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'eu-west-1', + 'sa-east-1', + 'us-east-1', + 'us-west-1', + 'us-west-2'] + + +def get_url(module, url): + """ Get url and return response """ + try: + r = urllib2.urlopen(url) + except (urllib2.HTTPError, urllib2.URLError), e: + code = getattr(e, 'code', -1) + module.fail_json(msg="Request failed: %s" % str(e), status_code=code) + return r + + +def get_ami(table, release, stream, store, + arch, region, virt): + """ Get the Ubuntu AMI that matches query given a table of AMIs + + table: an iterable that returns a row of + (release, stream, tag, serial, region, ami, aki, ari, virt) + release: ubuntu release name + stream: 'server' or 'desktop' + store: 'ebs' or 'instance-store' + arch: 'i386' or 'amd64' + region: EC2 region + virt: 'paravirtual' or 'hvm' + + Returns (ami, aki, ari, tag, serial)""" + expected = (release, stream, store, arch, region, virt) + + for row in table: + (actual_release, actual_stream, tag, serial, + actual_store, actual_arch, actual_region, ami, aki, ari, + actual_virt) = row + actual = (actual_release, actual_stream, actual_store, actual_arch, + actual_region, actual_virt) + if actual == expected: + # aki and ari are sometimes blank + if aki == '': + aki = None + if ari == '': + ari = None + return (ami, aki, ari, tag, serial) + + raise KeyError() + + +def get_ubuntu_url(release, stream): + url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt" + return url % (release, stream) + + +def main(): + arg_spec = dict( + release=dict(required=True), + stream=dict(required=False, default='server', + choices=['desktop', 'server']), + store=dict(required=False, default='ebs', + choices=['ebs', 'instance-store']), + arch=dict(required=False, default='amd64', + choices=['i386', 'amd64']), + region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), + virt=dict(required=False, default='paravirtual', + choices=['paravirtual', 'hvm']) + ) + module = AnsibleModule(argument_spec=arg_spec) + release = module.params['release'] + stream = module.params['stream'] + store = module.params['store'] + arch = module.params['arch'] + region = module.params['region'] + virt = module.params['virt'] + + url = get_ubuntu_url(release, stream) + + req = get_url(module, url) + reader = csv.reader(req, delimiter='\t') + try: + ami, aki, ari, tag, serial = get_ami(reader, release, stream, store, + arch, region, virt) + module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag, + serial=serial) + except KeyError: + module.fail_json(msg="No matching AMI found") + + +# this is magic, see lib/ansible/module_common.py +#<> + +if __name__ == '__main__': + main() From 99fc134881f19d8e1b69242de9584f7b22ca01b9 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sat, 9 Nov 2013 14:54:56 -0500 Subject: [PATCH 012/772] Rename to ec2_ami_search --- library/cloud/{ec2_ubuntu_ami => ec2_ami_search} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename library/cloud/{ec2_ubuntu_ami => ec2_ami_search} (100%) diff --git a/library/cloud/ec2_ubuntu_ami b/library/cloud/ec2_ami_search similarity index 100% rename from library/cloud/ec2_ubuntu_ami rename to library/cloud/ec2_ami_search From 0df1195fb984bb90b891cb9da519fa53acbb401f Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sat, 9 Nov 2013 14:56:02 -0500 Subject: [PATCH 013/772] Genericize module to support multiple distros Make the module implementatino more generic to support distributions other than Ubuntu in the future. Adds distro as a new parameter. --- library/cloud/ec2_ami_search | 66 ++++++++++++++++++++++-------------- 1 file changed, 41 insertions(+), 25 deletions(-) diff --git a/library/cloud/ec2_ami_search b/library/cloud/ec2_ami_search index 062971ab25f..0bb3fab0078 100644 --- a/library/cloud/ec2_ami_search +++ b/library/cloud/ec2_ami_search @@ -18,16 +18,18 @@ # along with Ansible. If not, see . DOCUMENTATION = ''' --- -module: ec2_ubuntu_ami -short_description: Retrieve AWS AMIs for official Ubuntu images +module: ec2_ami_search +short_description: Retrieve AWS AMI for a given operating system. description: - - The Ubuntu project maintains a list of the latest version of Ubuntu images on EC2 accessible via http. - - This module retrieves the AMI for a given Ubuntu release by making an http query against the appropriate cloud-images.ubuntu.com url and parsing the output. - - For example: https://cloud-images.ubuntu.com/query/precise/server/released.current.txt has information about Ubuntu 12.04 (precise pangolin) release, server edition. + - Look up the most recent AMI on AWS for a given operating system. - Returns C(ami), C(aki), C(ari), C(serial), C(tag) - If there is no AKI or ARI associated with an image, these will be C(null). - Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"}) options: + distro: + description: Linux distribution (e.g., C(ubuntu)) + required: true + choices: ["ubuntu"] release: description: short name of the release (e.g., C(precise)) required: true @@ -67,7 +69,7 @@ EXAMPLES = ''' connection: local tasks: - name: Get the Ubuntu precise AMI - ec2_ubuntu_ami: release=precise region=us-west-1 store=instance-store + ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store register: ubuntu_image - name: Start the EC2 instance ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey @@ -78,6 +80,8 @@ import json import urllib2 import urlparse +SUPPORTED_DISTROS = ['ubuntu'] + AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', @@ -98,9 +102,31 @@ def get_url(module, url): return r -def get_ami(table, release, stream, store, - arch, region, virt): - """ Get the Ubuntu AMI that matches query given a table of AMIs +def ubuntu(module): + """ Get the ami for ubuntu """ + + release = module.params['release'] + stream = module.params['stream'] + store = module.params['store'] + arch = module.params['arch'] + region = module.params['region'] + virt = module.params['virt'] + + url = get_ubuntu_url(release, stream) + + req = get_url(module, url) + reader = csv.reader(req, delimiter='\t') + try: + ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream, + store, arch, region, virt) + module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag, + serial=serial) + except KeyError: + module.fail_json(msg="No matching AMI found") + + +def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt): + """ Look up the Ubuntu AMI that matches query given a table of AMIs table: an iterable that returns a row of (release, stream, tag, serial, region, ami, aki, ari, virt) @@ -138,6 +164,7 @@ def get_ubuntu_url(release, stream): def main(): arg_spec = dict( + distro=dict(required=True, choices=SUPPORTED_DISTROS), release=dict(required=True), stream=dict(required=False, default='server', choices=['desktop', 'server']), @@ -150,24 +177,13 @@ def main(): choices=['paravirtual', 'hvm']) ) module = AnsibleModule(argument_spec=arg_spec) - release = module.params['release'] - stream = module.params['stream'] - store = module.params['store'] - arch = module.params['arch'] - region = module.params['region'] - virt = module.params['virt'] + distro = module.params['distro'] - url = get_ubuntu_url(release, stream) + if distro == 'ubuntu': + ubuntu(module) + else: + module.fail_json(msg="Unsupported distro: %s" % distro) - req = get_url(module, url) - reader = csv.reader(req, delimiter='\t') - try: - ami, aki, ari, tag, serial = get_ami(reader, release, stream, store, - arch, region, virt) - module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag, - serial=serial) - except KeyError: - module.fail_json(msg="No matching AMI found") # this is magic, see lib/ansible/module_common.py From 5f1dd479540f4b5cc86ce0aeefca2dc73da7b142 Mon Sep 17 00:00:00 2001 From: Thomas van Noort Date: Fri, 15 Nov 2013 10:14:13 +0100 Subject: [PATCH 014/772] Allow for reuse of allocated but unassociated Elastic IPs. --- library/cloud/ec2_eip | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip index 1c5db8cf4c1..420f2cc9f97 100644 --- a/library/cloud/ec2_eip +++ b/library/cloud/ec2_eip @@ -53,6 +53,11 @@ options: required: false default: false version_added: "1.4" + reuse: + description: + - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. + required: false + default: false requirements: [ "boto" ] author: Lorin Hochstein notes: @@ -198,13 +203,27 @@ def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): return False -def allocate_address(ec2, domain, module): - """ Allocate a new elastic IP address and return it """ +def allocate_address(ec2, domain, module, reuse): + """ Allocate a new elastic IP address (when needed) and return it """ # If we're in check mode, nothing else to do if module.check_mode: module.exit_json(change=True) - address = ec2.allocate_address(domain=domain) + if reuse: + if domain: + domain_filter = { 'domain' : domain } + else: + domain_filter = { 'domain' : 'standard' } + all_addresses = ec2.get_all_addresses(filters=domain_filter) + + unassociated_addresses = filter(lambda a: a.instance_id is None, all_addresses) + if unassociated_addresses: + address = unassociated_addresses[0]; + else: + address = ec2.allocate_address(domain=domain) + else: + address = ec2.allocate_address(domain=domain) + return address @@ -252,6 +271,7 @@ def main(): ec2_access_key = dict(required=False, aliases=['EC2_ACCESS_KEY']), region = dict(required=False, aliases=['ec2_region']), in_vpc = dict(required=False, choices=BOOLEANS, default=False), + reuse = dict(required=False, choices=BOOLEANS, default=False), ), supports_check_mode=True ) @@ -272,18 +292,19 @@ def main(): state = module.params.get('state') in_vpc = module.params.get('in_vpc') domain = "vpc" if in_vpc else None + reuse = module.params.get('reuse'); if state == 'present': if public_ip is None: if instance_id is None: - address = allocate_address(ec2, domain, module) + address = allocate_address(ec2, domain, module, reuse) module.exit_json(changed=True, public_ip=address.public_ip) else: # Determine if the instance is inside a VPC or not instance = find_instance(ec2, instance_id, module) if instance.vpc_id != None: domain = "vpc" - address = allocate_address(ec2, domain, module) + address = allocate_address(ec2, domain, module, reuse) else: address = find_address(ec2, public_ip, module) associate_ip_and_instance(ec2, address, instance_id, module) From e88ab431f06bb724353a08c263a102a547e68ab3 Mon Sep 17 00:00:00 2001 From: Evan Kaufman Date: Tue, 12 Nov 2013 13:30:18 -0600 Subject: [PATCH 015/772] Added replace module Heavily based on existing lineinfile module, but where it literally tests a regexp against *each individual line* of a file, this replace module is more analogous to common uses of a `sed` or `perl` match + replacement of all instances of a pattern anywhere in the file. Was debating adding `all` boolean or `count` numeric options to control how many replacements to make in the destfile (vs currently replacing all instances) Noted use of MULTILINE mode in docs, per suggestion from @jarv --- library/files/replace | 160 ++++++++++++++++++++++++++++++++++++++++++ test/TestRunner.py | 53 ++++++++++++++ test/known_hosts.txt | 4 ++ 3 files changed, 217 insertions(+) create mode 100644 library/files/replace create mode 100644 test/known_hosts.txt diff --git a/library/files/replace b/library/files/replace new file mode 100644 index 00000000000..b008d1b39db --- /dev/null +++ b/library/files/replace @@ -0,0 +1,160 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Evan Kaufman . + +import re +import os +import tempfile + +DOCUMENTATION = """ +--- +module: replace +author: Evan Kaufman +short_description: Replace all instances of a particular string in a + file using a back-referenced regular expression. +description: + - This module will replace all instances of a pattern within a file. + - It is up to the user to maintain idempotence by ensuring that the + same pattern would never match any replacements made. +version_added: "1.4" +options: + dest: + required: true + aliases: [ name, destfile ] + description: + - The file to modify. + regexp: + required: true + description: + - The regular expression to look for in the contents of the file. + Uses Python regular expressions; see + U(http://docs.python.org/2/library/re.html). + Uses multiline mode, which means C(^) and C($) match the beginning + and end respectively of I(each line) of the file. + replace: + required: false + description: + - The string to replace regexp matches. May contain backreferences + that will get expanded with the regexp capture groups if the regexp + matches. If not set, matches are removed entirely. + backup: + required: false + default: "no" + choices: [ "yes", "no" ] + description: + - Create a backup file including the timestamp information so you can + get the original file back if you somehow clobbered it incorrectly. + validate: + required: false + description: + - validation to run before copying into place + required: false + default: None + others: + description: + - All arguments accepted by the M(file) module also work here. + required: false +""" + +EXAMPLES = r""" +- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes + +- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644 + +- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t' +""" + +def write_changes(module,contents,dest): + + tmpfd, tmpfile = tempfile.mkstemp() + f = os.fdopen(tmpfd,'wb') + f.write(contents) + f.close() + + validate = module.params.get('validate', None) + valid = not validate + if validate: + (rc, out, err) = module.run_command(validate % tmpfile) + valid = rc == 0 + if rc != 0: + module.fail_json(msg='failed to validate: ' + 'rc:%s error:%s' % (rc,err)) + if valid: + module.atomic_move(tmpfile, dest) + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_file_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(required=True, aliases=['name', 'destfile']), + regexp=dict(required=True), + replace=dict(default='', type='str'), + backup=dict(default=False, type='bool'), + validate=dict(default=None, type='str'), + ), + add_file_common_args=True, + supports_check_mode=True + ) + + params = module.params + dest = os.path.expanduser(params['dest']) + + if os.path.isdir(dest): + module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) + + if not os.path.exists(dest): + module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) + else: + f = open(dest, 'rb') + contents = f.read() + f.close() + + mre = re.compile(params['regexp'], re.MULTILINE) + result = re.subn(mre, params['replace'], contents, 0) + + if result[1] > 0: + msg = '%s replacements made' % result[1] + changed = True + else: + msg = '' + changed = False + + if changed and not module.check_mode: + if params['backup'] and os.path.exists(dest): + module.backup_local(dest) + write_changes(module, result[0], dest) + + msg, changed = check_file_attrs(module, changed, msg) + module.exit_json(changed=changed, msg=msg) + +# this is magic, see lib/ansible/module_common.py +#<> + +main() diff --git a/test/TestRunner.py b/test/TestRunner.py index f991d02bd3c..9b25887f777 100644 --- a/test/TestRunner.py +++ b/test/TestRunner.py @@ -625,3 +625,56 @@ class TestRunner(unittest.TestCase): assert result['failed'] os.unlink(sample) + + def test_replace(self): + origin = self._get_test_file('known_hosts.txt') + scratch = self._get_stage_file('known_hosts.tmp') + shutil.copy(origin, scratch) + + # regexp should not match + testcase = ('replace', [ + "dest=%s" % scratch, + "regexp='^zeta.example.com(.+)$'" + r"replace='zulu.example.com\1'" + ]) + result = self._run(*testcase) + assert result['changed'] == False + assert result['msg'] == '' + + # regexp w one match, replace w backref + teststr = 'omega.example.com' + testip = '10.11.12.14' + testcase = ('replace', [ + "dest=%s" % scratch, + "regexp='^[^,]+(,%s\s+.+)$'" % testip, + r"replace='%s\1'" % teststr + ]) + result = self._run(*testcase) + assert result['changed'] + assert result['msg'] == '1 replacements made' + assert file(scratch).read().find(teststr) != -1 + assert file(scratch).read().find(testip) != -1 + + # regexp w multiple match, simple replace + teststr = '10.11.12.13' + testcase = ('replace', [ + "dest=%s" % scratch, + "regexp='%s'" % teststr, + "replace='11.12.13.14'" + ]) + result = self._run(*testcase) + assert result['changed'] + assert result['msg'] == '2 replacements made' + assert file(scratch).read().find(teststr) == -1 + + # no replace should remove all matches + testcase = ('replace', [ + "dest=%s" % scratch, + "regexp='^[^,]+,'" + ]) + result = self._run(*testcase) + assert result['changed'] + assert result['msg'] == '3 replacements made' + assert file(scratch).read().find('.example.com') == -1 + + os.unlink(scratch) diff --git a/test/known_hosts.txt b/test/known_hosts.txt new file mode 100644 index 00000000000..70fcf353633 --- /dev/null +++ b/test/known_hosts.txt @@ -0,0 +1,4 @@ +alpha.example.com,10.11.12.13 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== +bravo.example.com,10.11.12.14 ssh-rsa AAAAB3NzaC1yom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrvic2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9TjQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMcAW4OZPnTPI89ZPmVMLuayrD2cE86Z/iliLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== +charlie.example.com,10.11.12.15 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== +10.11.12.13 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== From 1e646a3112a084448f336eea527a3be5f90d786e Mon Sep 17 00:00:00 2001 From: John Dewey Date: Wed, 27 Nov 2013 00:30:50 -0800 Subject: [PATCH 016/772] Added module to handle nova security groups This module is loosely based on ec2_group module. However, rules are handled slightly differently. Specific rules are able to be removed vs removing all "rogue" [1] rules. [1] Rogue rules are existing security group rules, which are not included in the `rules` dict. --- library/cloud/nova_group | 333 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 333 insertions(+) create mode 100644 library/cloud/nova_group diff --git a/library/cloud/nova_group b/library/cloud/nova_group new file mode 100644 index 00000000000..18e00c9c7ba --- /dev/null +++ b/library/cloud/nova_group @@ -0,0 +1,333 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, John Dewey +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import locale +import os +import six + +try: + from novaclient.openstack.common import uuidutils + from novaclient.openstack.common import strutils + from novaclient.v1_1 import client + from novaclient.v1_1 import security_groups + from novaclient.v1_1 import security_group_rules + from novaclient import exceptions +except ImportError: + print("failed=True msg='novaclient is required for this module to work'") + +DOCUMENTATION = ''' +--- +module: security_group +version_added: "1.5" +short_description: Maintain nova security groups. +description: + - Manage nova security groups using the python-novaclient library. +options: + + login_username: + description: + - Login username to authenticate to keystone. If not set then the value of the OS_USERNAME environment variable is used. + required: false + default: None + login_password: + description: + - Password of login user. If not set then the value of the OS_PASSWORD environment variable is used. + required: false + default: None + login_tenant_name: + description: + - The tenant name of the login user. If not set then the value of the OS_TENANT_NAME environment variable is used. + required: false + default: None + auth_url: + description: + - The keystone url for authentication. If not set then the value of the OS_AUTH_URL environment variable is used. + required: false + default: None + region_name: + description: + - Name of the region. + required: false + default: None + name: + description: + - Name of the security group. + required: true + description: + description: + - Description of the security group. + required: true + rules: + description: + - List of firewall rules to enforce in this group (see example). + Must specify either an IPv4 'cidr' address or 'group' UUID. + required: true + state: + description: + - Indicate desired state of the resource. + choices: ['present', 'absent'] + required: false + default: 'present' + +requirements: ["novaclient"] +''' + +EXAMPLES = ''' +- name: create example group and rules + local_action: + module: security_group + name: example + description: an example nova group + rules: + - ip_protocol: tcp + from_port: 80 + to_port: 80 + cidr: 0.0.0.0/0 + - ip_protocol: tcp + from_port: 3306 + to_port: 3306 + group: "{{ group_uuid }}" + - ip_protocol: icmp + from_port: -1 + to_port: -1 + cidr: 0.0.0.0/0 + +- name: delete rule from example group + local_action: + module: security_group + name: example + description: an example nova group + rules: + - ip_protocol: tcp + from_port: 80 + to_port: 80 + cidr: 0.0.0.0/0 + - ip_protocol: icmp + from_port: -1 + to_port: -1 + cidr: 0.0.0.0/0 + state: absent +''' + +class NovaGroup(object): + def __init__(self, client): + self._sg = security_groups.SecurityGroupManager(client) + + # Taken from novaclient/v1_1/shell.py. + def _get_secgroup(self, secgroup): + # Check secgroup is an UUID + if uuidutils.is_uuid_like(strutils.safe_encode(secgroup)): + try: + sg = self._sg.get(secgroup) + return sg + except exceptions.NotFound: + return False + + # Check secgroup as a name + for s in self._sg.list(): + encoding = (locale.getpreferredencoding() or + sys.stdin.encoding or + 'UTF-8') + if not six.PY3: + s.name = s.name.encode(encoding) + if secgroup == s.name: + return s + return False + + +class SecurityGroup(NovaGroup): + def __init__(self, client, module): + super(SecurityGroup, self).__init__(client) + self._module = module + self._name = module.params.get('name') + self._description = module.params.get('description') + + def exists(self): + return self._get_secgroup(self._name) + + def create(self): + self._sg.create(self._name, self._description) + + def delete(self): + self._sg.delete(self._name) + + +class SecurityGroupRule(NovaGroup): + def __init__(self, client, module): + super(SecurityGroupRule, self).__init__(client) + self._module = module + self._name = module.params.get('name') + self._rules = module.params.get('rules') + self._validate_rules() + self._sgr = security_group_rules.SecurityGroupRuleManager(client) + self._secgroup = self._get_secgroup(self._name) + self._current_rules = self._lookup_dict(self._secgroup.rules) + + def _concat_security_group_rule(self, rule): + """ + Normalize the given rule into a string in the format of: + protocol-from_port-to_port-group + The `group` needs a bit of massaging. + 1. If an empty dict -- return None. + 2. If a dict -- lookup group UUID (novaclient only returns the name). + 3. Return `group` from rules dict. + + :param rule: A novaclient SecurityGroupRule object. + """ + group = rule.get('group') + # Oddly novaclient occasionaly returns None as {}. + if group is not None and not any(group): + group = None + elif type(group) == dict: + g = group.get('name') + group = self._get_secgroup(g) + r = "%s-%s-%s-%s" % (rule.get('ip_protocol'), + rule.get('from_port'), + rule.get('to_port'), + group) + return r + + def _lookup_dict(self, rules): + """ + Populate a dict with current rules. + + :param rule: A novaclient SecurityGroupRule object. + """ + return {self._concat_security_group_rule(rule): rule for rule in rules} + + def _get_rule(self, rule): + """ + Return rule when found and False when not. + + :param rule: A novaclient SecurityGroupRule object. + """ + r = self._concat_security_group_rule(rule) + if r in self._current_rules: + return self._current_rules[r] + else: + return False + + def _validate_rules(self): + for rule in self._rules: + if 'group' in rule and 'cidr' in rule: + self._module.fail_json(msg="Specify group OR cidr") + + def create(self): + changed = False + filtered = [rule for rule in self._rules + if rule.get('state') != 'absent'] + for rule in filtered: + if not self._get_rule(rule): + if 'cidr' in rule: + self._sgr.create(self._secgroup.id, + rule.get('ip_protocol'), + rule.get('from_port'), + rule.get('to_port'), + cidr=rule.get('cidr')) + changed = True + if 'group' in rule: + self._sgr.create(self._secgroup.id, + rule.get('ip_protocol'), + rule.get('from_port'), + rule.get('to_port'), + group_id=rule.get('group')) + changed = True + return changed + + def delete(self): + changed = False + filtered = [rule for rule in self._rules + if rule.get('state') == 'absent'] + for rule in filtered: + r = self._get_rule(rule) + if r: + self._sgr.delete(r.get('id')) + changed = True + return changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + description=dict(required=True), + rules=dict(), + login_username=dict(), + login_password=dict(no_log=True), + login_tenant_name=dict(), + auth_url= dict(), + region_name=dict(default=None), + state = dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True, + ) + login_username = module.params.get('login_username') + login_password = module.params.get('login_password') + login_tenant_name = module.params.get('login_tenant_name') + auth_url = module.params.get('auth_url') + + # allow stackrc environment variables to be used if ansible vars aren't set + if not login_username and 'OS_USERNAME' in os.environ: + login_username = os.environ['OS_USERNAME'] + + if not login_password and 'OS_PASSWORD' in os.environ: + login_password = os.environ['OS_PASSWORD'] + + if not login_tenant_name and 'OS_TENANT_NAME' in os.environ: + login_tenant_name = os.environ['OS_TENANT_NAME'] + + if not auth_url and 'OS_AUTH_URL' in os.environ: + auth_url = os.environ['OS_AUTH_URL'] + + nova = client.Client(login_username, + login_password, + login_tenant_name, + auth_url, + service_type='compute') + try: + nova.authenticate() + except exceptions.Unauthorized as e: + module.fail_json(msg="Invalid OpenStack Nova credentials.: %s" % e.message) + except exceptions.AuthorizationFailure as e: + module.fail_json(msg="Unable to authorize user: %s" % e.message) + + rules = module.params.get('rules') + state = module.params.get('state') + security_group = SecurityGroup(nova, module) + security_group_rules = SecurityGroupRule(nova, module) + + changed = False + if security_group.exists(): + if state == 'absent': + security_group.delete() + changed = True + elif state == 'present': + security_group.create() + changed = True + + if rules: + if security_group_rules.create(): + changed = True + if security_group_rules.delete(): + changed = True + + module.exit_json(changed=changed, group_id=None) + +# this is magic, see lib/ansible/module_common.py +#<> +main() From 3b07f848a25317c7cb0789e9a49e3707a7bf6070 Mon Sep 17 00:00:00 2001 From: Brian Candler Date: Wed, 27 Nov 2013 16:58:34 +0000 Subject: [PATCH 017/772] Additional idempotent states for virt module: "destroyed" and "paused" --- library/cloud/virt | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/library/cloud/virt b/library/cloud/virt index 69dcf055366..3ef126f8547 100644 --- a/library/cloud/virt +++ b/library/cloud/virt @@ -36,7 +36,7 @@ options: since these refer only to VM states. After starting a guest, it may not be immediately accessible. required: false - choices: [ "running", "shutdown" ] + choices: [ "running", "shutdown", "destroyed", "paused" ] default: "no" command: description: @@ -414,13 +414,24 @@ def core(module): res['changed'] = False if state == 'running': - if v.status(guest) is not 'running': + if v.status(guest) is 'paused': + res['changed'] = True + res['msg'] = v.unpause(guest) + elif v.status(guest) is not 'running': res['changed'] = True res['msg'] = v.start(guest) elif state == 'shutdown': if v.status(guest) is not 'shutdown': res['changed'] = True res['msg'] = v.shutdown(guest) + elif state == 'destroyed': + if v.status(guest) is not 'shutdown': + res['changed'] = True + res['msg'] = v.destroy(guest) + elif state == 'paused': + if v.status(guest) is 'running': + res['changed'] = True + res['msg'] = v.pause(guest) else: module.fail_json(msg="unexpected state") @@ -459,7 +470,7 @@ def main(): module = AnsibleModule(argument_spec=dict( name = dict(aliases=['guest']), - state = dict(choices=['running', 'shutdown']), + state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']), command = dict(choices=ALL_COMMANDS), uri = dict(default='qemu:///system'), xml = dict(), From 79f3c56dddebb51b228583d48a9c4e7ac28380ac Mon Sep 17 00:00:00 2001 From: Tom Berger Date: Mon, 2 Dec 2013 01:57:23 -0500 Subject: [PATCH 018/772] Add a `recursive` option to the git command. Make it possible to clone without submodules by setting recursive to no. Default is yes, so no change is needed in existing plays. --- library/source_control/git | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/library/source_control/git b/library/source_control/git index 39e7ac60cbb..c32983c9fe3 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -95,6 +95,15 @@ options: description: - if C(yes), repository will be created as a bare repo, otherwise it will be a standard repo with a workspace. + + recursive: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "1.5" + description: + - if C(no), repository will be cloned without the --recursive + option, skipping sub-modules. notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, @@ -125,7 +134,8 @@ def get_version(git_path, dest, ref="HEAD"): sha = os.popen(cmd).read().rstrip("\n") return sha -def clone(git_path, module, repo, dest, remote, depth, version, bare, reference): +def clone(git_path, module, repo, dest, remote, depth, version, bare, + reference, recursive): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -137,7 +147,9 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, reference) if bare: cmd.append('--bare') else: - cmd.extend([ '--origin', remote, '--recursive' ]) + cmd.extend([ '--origin', remote ]) + if recursive: + cmd.extend([ '--recursive' ]) if is_remote_branch(git_path, module, dest, repo, version) \ or is_remote_tag(git_path, module, dest, repo, version): cmd.extend([ '--branch', version ]) @@ -354,6 +366,7 @@ def main(): update=dict(default='yes', type='bool'), executable=dict(default=None), bare=dict(default='no', type='bool'), + recursive=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -368,6 +381,7 @@ def main(): bare = module.params['bare'] reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) + recursive = module.params['recursive'] if bare: gitconfig = os.path.join(dest, 'config') @@ -384,7 +398,8 @@ def main(): if module.check_mode: remote_head = get_remote_head(git_path, module, dest, version, repo) module.exit_json(changed=True, before=before, after=remote_head) - clone(git_path, module, repo, dest, remote, depth, version, bare, reference) + clone(git_path, module, repo, dest, remote, depth, version, bare, + reference, recursive) elif not update: # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo From 4e547e1228be33739baa77b182b7ba68e28566a1 Mon Sep 17 00:00:00 2001 From: John Dewey Date: Tue, 3 Dec 2013 22:07:08 -0800 Subject: [PATCH 019/772] Added ability to manage nova floating IPs This module is based off the ec2_eip module, but accounts for pools, which is openstack related functionality. --- library/cloud/nova_fip | 233 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 233 insertions(+) create mode 100644 library/cloud/nova_fip diff --git a/library/cloud/nova_fip b/library/cloud/nova_fip new file mode 100644 index 00000000000..b236e82b908 --- /dev/null +++ b/library/cloud/nova_fip @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, John Dewey +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + from novaclient import utils + from novaclient.v1_1 import client + from novaclient.v1_1 import servers +except ImportError: + print("failed=True msg='novaclient is required for this module to work'") + +DOCUMENTATION = ''' +--- +module: nova_fip +version_added: "1.5" +short_description: Associate an OpenStack floating IP with a server. +description: + - Manage nova floating IPs using the python-novaclient library. +options: + + login_username: + description: + - Login username to authenticate to keystone. If not set then the value of the OS_USERNAME environment variable is used. + required: false + default: None + login_password: + description: + - Password of login user. If not set then the value of the OS_PASSWORD environment variable is used. + required: false + default: None + login_tenant_name: + description: + - The tenant name of the login user. If not set then the value of the OS_TENANT_NAME environment variable is used. + required: false + default: None + auth_url: + description: + - The keystone url for authentication. If not set then the value of the OS_AUTH_URL environment variable is used. + required: false + default: None + region_name: + description: + - Name of the region. + required: false + default: None + server: + description: + - Name or ID of server. + required: false + default: None + floating_ip: + description: + - The public IP address to associate with the instance. + - If absent, allocate a new address + required: false + default: None + pool: + description: + - The pool the floating_ip belongs to. + required: false + default: external + state: + description: + - Indicate desired state of the resource. + choices: ['present', 'absent'] + required: false + default: 'present' + +requirements: ["novaclient"] +notes: + - This module will return C(floating_ip) on success, which will contain the + public IP address associated with the instance. + - There may be a delay between the time the floating IP is assigned and when + the cloud instance is reachable via the new address. Use wait_for and pause + to delay further playbook execution until the instance is reachable, if + necessary. +''' + +EXAMPLES = ''' +- name: associate a floating IP with a server + nova_fip: server={{ UUID or name }} ip={{ IP }} + +- name: disassociate a floating IP from a server + nova_fip: server={{ UUID or name }} ip={{ IP }} state=absent + +- name: allocate a new floating IP and associate it with a server + nova_fip: server={{ UUID or name }} + +- name: allocate a new floating IP without associating it to anything + nova_fip: + register: fip + +- name: deallocate a floating IP + nova_fip: ip={{ IP }} state=absent + +- name: output the IP + debug: msg="Allocated IP is {{ fip.floating_ip }}" +''' + +def _floating_ip_already_associated(server, floating_ip): + changed = False + for network, ip_list in server.networks.iteritems(): + if floating_ip in ip_list: + changed = True + return changed + +def _associate_floating_ip(nova, floating_ip, server): + s = _find_server(nova, server) + if not _floating_ip_already_associated(s, floating_ip): + s.add_floating_ip(floating_ip) + return True + +def _disassociate_floating_ip(nova, floating_ip, server): + s = _find_server(nova, server) + if _floating_ip_already_associated(s, floating_ip): + s.remove_floating_ip(floating_ip) + return True + +def _find_server(nova, server): + return utils.find_resource(nova.servers, server) + +def _allocate_address(nova, pool): + address = None + floating_ips = nova.floating_ips.list() + for fip in floating_ips: + # allocated but not assigned + if fip.pool == pool and fip.instance_id is None: + address = fip + + # return an available floating ip + if address: + return address + # allocate and return a floating ip + else: + return nova.floating_ips.create(pool=pool) + +def _deallocate_address(nova, floating_ip): + changed = False + floating_ips = nova.floating_ips.list() + + for fip in floating_ips: + if fip.ip == floating_ip: + nova.floating_ips.delete(fip.id) + changed = True + return changed + +def main(): + module = AnsibleModule( + argument_spec=dict( + server=dict(required=False), + floating_ip=dict(required=False, aliases=['ip']), + pool=dict(default='external'), + login_username=dict(), + login_password=dict(no_log=True), + login_tenant_name=dict(), + auth_url= dict(), + region_name=dict(default=None), + state = dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True, + ) + login_username = module.params.get('login_username') + login_password = module.params.get('login_password') + login_tenant_name = module.params.get('login_tenant_name') + auth_url = module.params.get('auth_url') + + # allow stackrc environment variables to be used if ansible vars aren't set + if not login_username and 'OS_USERNAME' in os.environ: + login_username = os.environ['OS_USERNAME'] + + if not login_password and 'OS_PASSWORD' in os.environ: + login_password = os.environ['OS_PASSWORD'] + + if not login_tenant_name and 'OS_TENANT_NAME' in os.environ: + login_tenant_name = os.environ['OS_TENANT_NAME'] + + if not auth_url and 'OS_AUTH_URL' in os.environ: + auth_url = os.environ['OS_AUTH_URL'] + + nova = client.Client(login_username, + login_password, + login_tenant_name, + auth_url, + service_type='compute') + try: + nova.authenticate() + except exceptions.Unauthorized as e: + module.fail_json(msg="Invalid OpenStack Nova credentials.: %s" % e.message) + except exceptions.AuthorizationFailure as e: + module.fail_json(msg="Unable to authorize user: %s" % e.message) + + server = module.params.get('server') + floating_ip = module.params.get('floating_ip') + pool = module.params.get('pool') + state = module.params.get('state') + + if state == 'present': + if floating_ip is None: + if server is None: + address = _allocate_address(nova, pool) + module.exit_json(changed=True, floating_ip=address.ip) + else: + address = _allocate_address(nova, pool) + changed = _associate_floating_ip(nova, address.ip, server) + module.exit_json(changed=True, floating_ip=address.ip) + else: + changed = _associate_floating_ip(nova, floating_ip, server) + module.exit_json(changed=changed) + else: + if server is None: + changed = _deallocate_address(nova, floating_ip) + module.exit_json(changed=changed) + else: + changed = _disassociate_floating_ip(nova, floating_ip, server) + module.exit_json(changed=changed) + +# this is magic, see lib/ansible/module_common.py +#<> +main() From 6fe8496ab3f12b86c36a96d201b2b591c916a61f Mon Sep 17 00:00:00 2001 From: John Dewey Date: Sat, 7 Dec 2013 10:19:28 -0800 Subject: [PATCH 020/772] Made a few canges as I learn more about modules * Set check_mode to False, am not supporting this ATM. * Cleaned up delete/create() into single update(). * Return the group_id if created or found so tasks can be chained. --- library/cloud/nova_group | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/library/cloud/nova_group b/library/cloud/nova_group index 18e00c9c7ba..c96fb725ed5 100644 --- a/library/cloud/nova_group +++ b/library/cloud/nova_group @@ -157,14 +157,14 @@ class SecurityGroup(NovaGroup): self._name = module.params.get('name') self._description = module.params.get('description') - def exists(self): + def get(self): return self._get_secgroup(self._name) def create(self): - self._sg.create(self._name, self._description) + return self._sg.create(self._name, self._description) def delete(self): - self._sg.delete(self._name) + return self._sg.delete(self._name) class SecurityGroupRule(NovaGroup): @@ -219,8 +219,6 @@ class SecurityGroupRule(NovaGroup): r = self._concat_security_group_rule(rule) if r in self._current_rules: return self._current_rules[r] - else: - return False def _validate_rules(self): for rule in self._rules: @@ -260,6 +258,14 @@ class SecurityGroupRule(NovaGroup): changed = True return changed + def update(self): + changed = False + if self.create(): + changed = True + if self.delete(): + changed = True + return changed + def main(): module = AnsibleModule( @@ -274,7 +280,7 @@ def main(): region_name=dict(default=None), state = dict(default='present', choices=['present', 'absent']), ), - supports_check_mode=True, + supports_check_mode=False, ) login_username = module.params.get('login_username') login_password = module.params.get('login_password') @@ -309,24 +315,26 @@ def main(): rules = module.params.get('rules') state = module.params.get('state') security_group = SecurityGroup(nova, module) - security_group_rules = SecurityGroupRule(nova, module) changed = False - if security_group.exists(): + group_id = None + group = security_group.get() + if group: + group_id = group.id if state == 'absent': security_group.delete() changed = True elif state == 'present': - security_group.create() + group = security_group.create() changed = True + group_id = group.id - if rules: - if security_group_rules.create(): - changed = True - if security_group_rules.delete(): + if rules is not None: + security_group_rules = SecurityGroupRule(nova, module) + if security_group_rules.update(): changed = True - module.exit_json(changed=changed, group_id=None) + module.exit_json(changed=changed, group_id=group_id) # this is magic, see lib/ansible/module_common.py #<> From 4357e6f31ea1f036f302bf90bd7407f6abbc3192 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Leszek=20Krupin=CC=81ski?= Date: Sun, 8 Dec 2013 11:53:33 +0100 Subject: [PATCH 021/772] support for apt-key keyserver parameter --- library/packaging/apt_key | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/library/packaging/apt_key b/library/packaging/apt_key index 17b52888d47..6ea0ddbd1bd 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -58,6 +58,11 @@ options: default: none description: - url to retrieve key from. + keyserver: + required: false + default: none + description: + - keyserver to retrieve key from. state: required: false choices: [ absent, present ] @@ -141,6 +146,10 @@ def download_key(module, url): except: module.fail_json(msg="error getting key id from url", traceback=format_exc()) +def import_key(module, keyserver, key_id): + cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id) + (rc, out, err) = module.run_command(cmd, check_rc=True) + return True def add_key(module, keyfile, keyring, data=None): if data is not None: @@ -175,6 +184,7 @@ def main(): file=dict(required=False), key=dict(required=False), keyring=dict(required=False), + keyserver=dict(required=False), state=dict(required=False, choices=['present', 'absent'], default='present') ), supports_check_mode=True @@ -186,6 +196,7 @@ def main(): filename = module.params['file'] keyring = module.params['keyring'] state = module.params['state'] + keyserver = module.params['keyserver'] changed = False if key_id: @@ -206,7 +217,7 @@ def main(): if key_id and key_id in keys: module.exit_json(changed=False) else: - if not filename and not data: + if not filename and not data and not keyserver: data = download_key(module, url) if key_id and key_id in keys: module.exit_json(changed=False) @@ -215,6 +226,8 @@ def main(): module.exit_json(changed=True) if filename: add_key(module, filename, keyring) + elif keyserver: + import_key(module, keyserver, key_id) else: add_key(module, "-", keyring, data) changed=False From 3ac4611093f57745db4a6c73db61ee259bd6e070 Mon Sep 17 00:00:00 2001 From: Michael Gregson Date: Wed, 11 Dec 2013 12:03:53 -0700 Subject: [PATCH 022/772] Rename digital_ocean_ssh to digital_ocean_sshkey per https://github.com/ansible/ansible/pull/4315#issuecomment-30286556 --- library/cloud/{digital_ocean_ssh => digital_ocean_sshkey} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename library/cloud/{digital_ocean_ssh => digital_ocean_sshkey} (98%) diff --git a/library/cloud/digital_ocean_ssh b/library/cloud/digital_ocean_sshkey similarity index 98% rename from library/cloud/digital_ocean_ssh rename to library/cloud/digital_ocean_sshkey index f03fae14f80..19305c1e42e 100644 --- a/library/cloud/digital_ocean_ssh +++ b/library/cloud/digital_ocean_sshkey @@ -17,7 +17,7 @@ # along with Ansible. If not, see . DOCUMENTATION = ''' --- -module: digital_ocean_ssh +module: digital_ocean_sshkey short_description: Create/delete an SSH key in DigitalOcean description: - Create/delete an SSH key. @@ -54,7 +54,7 @@ EXAMPLES = ''' # If a key matches this name, will return the ssh key id and changed = False # If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False -- digital_ocean_ssh: > +- digital_ocean_sshkey: > state=present name=my_ssh_key ssh_pub_key='ssh-rsa AAAA...' From f71e67087f8db2f68354e5e3367531eaebf3903b Mon Sep 17 00:00:00 2001 From: Andrii Radyk Date: Thu, 12 Dec 2013 10:28:01 +0200 Subject: [PATCH 023/772] Adding absent condition for to the wait_for module --- library/utilities/wait_for | 54 +++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/library/utilities/wait_for b/library/utilities/wait_for index bee2f003b6f..1dd40984c35 100644 --- a/library/utilities/wait_for +++ b/library/utilities/wait_for @@ -34,8 +34,8 @@ description: which is true of certain Java application servers. It is also useful when starting guests with the M(virt) module and needing to pause until they are ready. This module can - also be used to wait for a file to be available on the filesystem - or with a regex match a string to be present in a file. + also be used to wait for a file to be available or absent on the + filesystem or with a regex match a string to be present in a file. version_added: "0.7" options: host: @@ -60,10 +60,10 @@ options: required: false state: description: - - either C(present), C(started), or C(stopped) + - either C(present), C(started), or C(stopped), C(absent) - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed - - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing - choices: [ "present", "started", "stopped" ] + - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed + choices: [ "present", "started", "stopped", "absent" ] default: "started" path: version_added: "1.4" @@ -78,7 +78,7 @@ options: notes: [] requirements: [] -author: Jeroen Hoekx, John Jarvis +author: Jeroen Hoekx, John Jarvis, Andrii Radyk ''' EXAMPLES = ''' @@ -92,6 +92,12 @@ EXAMPLES = ''' # wait until the string "completed" is in the file /tmp/foo before continuing - wait_for: path=/tmp/foo search_regex=completed +# wait until the lock file is removed +- wait_for: path=/var/lock/file.lock state=absent + +# wait until the process is finished and pid was destroyed +- wait_for: path=/proc/3466/status state=absent + ''' def main(): @@ -105,7 +111,7 @@ def main(): port=dict(default=None), path=dict(default=None), search_regex=dict(default=None), - state=dict(default='started', choices=['started', 'stopped', 'present']), + state=dict(default='started', choices=['started', 'stopped', 'present', 'absent']), ), ) @@ -133,23 +139,35 @@ def main(): if delay: time.sleep(delay) - if state == 'stopped': + if state in [ 'stopped', 'absent' ]: ### first wait for the stop condition end = start + datetime.timedelta(seconds=timeout) while datetime.datetime.now() < end: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - s.shutdown(socket.SHUT_RDWR) - s.close() - time.sleep(1) - except: - break + if path: + try: + f = open(path) + f.close() + time.sleep(1) + pass + except IOError: + break + elif port: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(connect_timeout) + try: + s.connect( (host, port) ) + s.shutdown(socket.SHUT_RDWR) + s.close() + time.sleep(1) + except: + break else: elapsed = datetime.datetime.now() - start - module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) + if port: + module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) + elif path: + module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds) elif state in ['started', 'present']: ### wait for start condition From bc14ced48d54d444860949c3f54e2e9ec927c6f2 Mon Sep 17 00:00:00 2001 From: Baptiste Lafontaine Date: Fri, 13 Dec 2013 09:51:55 +0100 Subject: [PATCH 024/772] Adding 'export' option to subversion module (to export instead of checkout) --- library/source_control/subversion | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/library/source_control/subversion b/library/source_control/subversion index 38417e801b5..43824ec25b1 100644 --- a/library/source_control/subversion +++ b/library/source_control/subversion @@ -70,6 +70,12 @@ options: description: - Path to svn executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. + export: + required: false + default: False + version_added: "1.5" + description: + - If True, do export instead of checkout/update. ''' EXAMPLES = ''' @@ -110,6 +116,10 @@ class Subversion(object): def checkout(self): '''Creates new svn working directory if it does not already exist.''' self._exec("checkout -r %s '%s' '%s'" % (self.revision, self.repo, self.dest)) + + def export(self, force=False): + '''Export svn repo to directory''' + self._exec("export -r %s '%s' '%s'" % (self.revision, self.repo, self.dest)) def switch(self): '''Change working directory's repo.''' @@ -163,6 +173,7 @@ def main(): username=dict(required=False), password=dict(required=False), executable=dict(default=None), + export=dict(default=False, required=False), ), supports_check_mode=True ) @@ -174,6 +185,7 @@ def main(): username = module.params['username'] password = module.params['password'] svn_path = module.params['executable'] or module.get_bin_path('svn', True) + export = module.params['export'] svn = Subversion(module, dest, repo, revision, username, password, svn_path) @@ -182,7 +194,10 @@ def main(): local_mods = False if module.check_mode: module.exit_json(changed=True) - svn.checkout() + if not export: + svn.checkout() + else: + svn.export() elif os.path.exists("%s/.svn" % (dest, )): # Order matters. Need to get local mods before switch to avoid false # positives. Need to switch before revert to ensure we are reverting to From 40f9da351f171aca102d93b7924fe914c1c88a70 Mon Sep 17 00:00:00 2001 From: Joseph Tate Date: Fri, 13 Dec 2013 13:43:30 -0500 Subject: [PATCH 025/772] Extend ec2 module to support spot instances --- library/cloud/ec2 | 107 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 82 insertions(+), 25 deletions(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 0e0b8aaf0fd..3442605da84 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -67,6 +67,12 @@ options: required: true default: null aliases: [] + spot_price: + description: + - Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started. + required: false + default: null + aliases: [] image: description: - I(emi) (or I(ami)) to use for the instance @@ -97,6 +103,11 @@ options: - how long before wait gives up, in seconds default: 300 aliases: [] + spot_wait_timeout: + description: + - how long to wait for the spot instance request to be fulfilled + default: 600 + aliases: [] ec2_url: description: - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used @@ -247,6 +258,19 @@ local_action: vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# Spot instance example +- local_action: + module: ec2 + spot_price: 0.24 + spot_wait_timeout: 600 + keypair: mykey + group_id: sg-1dc53f72 + instance_type: m1.small + image: ami-6e649707 + wait: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + # Launch instances, runs some tasks # and then terminate them @@ -392,6 +416,7 @@ def create_instances(module, ec2): group_id = module.params.get('group_id') zone = module.params.get('zone') instance_type = module.params.get('instance_type') + spot_price = module.params.get('spot_price') image = module.params.get('image') count = module.params.get('count') monitoring = module.params.get('monitoring') @@ -399,6 +424,7 @@ def create_instances(module, ec2): ramdisk = module.params.get('ramdisk') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) placement_group = module.params.get('placement_group') user_data = module.params.get('user_data') instance_tags = module.params.get('instance_tags') @@ -456,16 +482,12 @@ def create_instances(module, ec2): try: params = {'image_id': image, 'key_name': key_name, - 'client_token': id, - 'min_count': count_remaining, - 'max_count': count_remaining, 'monitoring_enabled': monitoring, 'placement': zone, 'placement_group': placement_group, 'instance_type': instance_type, 'kernel_id': kernel, 'ramdisk_id': ramdisk, - 'private_ip_address': private_ip, 'user_data': user_data} if boto_supports_profile_name_arg(ec2): @@ -498,22 +520,55 @@ def create_instances(module, ec2): else: params['security_groups'] = group_name - res = ec2.run_instances(**params) + if not spot_price: + params.update({ + 'min_count': count_remaining, + 'max_count': count_remaining, + 'client_token': id, + 'private_ip_address': private_ip, + }) + res = ec2.run_instances(**params) + instids = [ i.id for i in res.instances ] + while True: + try: + ec2.get_all_instances(instids) + break + except boto.exception.EC2ResponseError as e: + if "InvalidInstanceID.NotFound" in str(e): + # there's a race between start and get an instance + continue + else: + module.fail_json(msg = str(e)) + else: + if private_ip: + module.fail_json( + msg='private_ip only available with on-demand (non-spot) instances') + params.update({ + 'count': count_remaining, + }) + res = ec2.request_spot_instances(spot_price, **params) + #Now we have to do the intermediate waiting + if wait: + spot_req_inst_ids = dict() + spot_wait_timeout = time.time() + spot_wait_timeout + while spot_wait_timeout > time.time(): + reqs = ec2.get_all_spot_instance_requests() + for sirb in res: + if sirb.id in spot_req_inst_ids: + continue + for sir in reqs: + if sir.id == sirb.id and sir.instance_id is not None: + spot_req_inst_ids[sirb.id] = sir.instance_id + if len(spot_req_inst_ids) < count: + time.sleep(5) + else: + break + if spot_wait_timeout <= time.time(): + module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime()) + instids = spot_req_inst_ids.values() except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - instids = [ i.id for i in res.instances ] - while True: - try: - res.connection.get_all_instances(instids) - break - except boto.exception.EC2ResponseError as e: - if "InvalidInstanceID.NotFound" in str(e): - # there's a race between start and get an instance - continue - else: - module.fail_json(msg = str(e)) - if instance_tags: try: ec2.create_tags(instids, instance_tags) @@ -521,15 +576,14 @@ def create_instances(module, ec2): module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) # wait here until the instances are up - this_res = [] num_running = 0 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and num_running < len(instids): - res_list = res.connection.get_all_instances(instids) - if len(res_list) > 0: - this_res = res_list[0] - num_running = len([ i for i in this_res.instances if i.state=='running' ]) - else: + res_list = ec2.get_all_instances(instids) + num_running = 0 + for res in res_list: + num_running += len([ i for i in res.instances if i.state=='running' ]) + if len(res_list) <= 0: # got a bad response of some sort, possibly due to # stale/cached data. Wait a second and then try again time.sleep(1) @@ -543,8 +597,9 @@ def create_instances(module, ec2): # waiting took too long module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) - for inst in this_res.instances: - running_instances.append(inst) + #We do this after the loop ends so that we end up with one list + for res in res_list: + running_instances.extend(res.instances) instance_dict_array = [] created_instance_ids = [] @@ -631,6 +686,7 @@ def main(): region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), + spot_price = dict(), image = dict(), kernel = dict(), count = dict(default='1'), @@ -638,6 +694,7 @@ def main(): ramdisk = dict(), wait = dict(type='bool', default=False), wait_timeout = dict(default=300), + spot_wait_timeout = dict(default=600), ec2_url = dict(), ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), From 080e70ab6e39940e1ee5bdd39f600aa586814667 Mon Sep 17 00:00:00 2001 From: Joseph Tate Date: Fri, 13 Dec 2013 15:01:58 -0500 Subject: [PATCH 026/772] Added version_added for spot instance parameters --- library/cloud/ec2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 3442605da84..dac64c3243a 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -68,6 +68,7 @@ options: default: null aliases: [] spot_price: + version_added: "1.5" description: - Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started. required: false @@ -104,6 +105,7 @@ options: default: 300 aliases: [] spot_wait_timeout: + version_added: "1.5" description: - how long to wait for the spot instance request to be fulfilled default: 600 From dd3aea6e9b8ffd15507a538057ede155a44624bf Mon Sep 17 00:00:00 2001 From: Alex Coomans Date: Fri, 13 Dec 2013 16:12:58 -0600 Subject: [PATCH 027/772] Add ability to disable the Source/Destination check on EC2 --- library/cloud/ec2 | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 0e0b8aaf0fd..09c7739edf2 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -184,6 +184,12 @@ options: required: false default: null aliases: [] + source_dest_check: + version_added: "1.5" + description: + - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) + required: false + default: true state: version_added: "1.3" description: @@ -406,6 +412,7 @@ def create_instances(module, ec2): assign_public_ip = module.boolean(module.params.get('assign_public_ip')) private_ip = module.params.get('private_ip') instance_profile_name = module.params.get('instance_profile_name') + source_dest_check = module.boolean(module.params.get('source_dest_check')) # group_id and group_name are exclusive of each other if group_id and group_name: @@ -546,6 +553,11 @@ def create_instances(module, ec2): for inst in this_res.instances: running_instances.append(inst) + # Enabled by default by Amazon + if not source_dest_check: + for inst in res.instances: + inst.modify_attribute('sourceDestCheck', False) + instance_dict_array = [] created_instance_ids = [] for inst in running_instances: @@ -649,6 +661,7 @@ def main(): private_ip = dict(), instance_profile_name = dict(), instance_ids = dict(type='list'), + source_dest_check = dict(type='bool', default=True), state = dict(default='present'), ) ) From e868d0047233e0fd7d36756bf02a707a69b2bcc7 Mon Sep 17 00:00:00 2001 From: Joseph Tate Date: Thu, 19 Dec 2013 18:16:56 -0500 Subject: [PATCH 028/772] Add capability check for parameters on request_spot_instances --- library/cloud/ec2 | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index dac64c3243a..c992122e7cc 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -399,6 +399,17 @@ def boto_supports_profile_name_arg(ec2): run_instances_method = getattr(ec2, 'run_instances') return 'instance_profile_name' in run_instances_method.func_code.co_varnames +def boto_supports_param_in_spot_request(ec2, param): + """ + Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. + + ec2: authenticated ec2 connection object + + Returns: + True if boto library has the named param as an argument on the request_spot_instances method, else False + """ + method = getattr(ec2, 'request_spot_instances') + return param in method.func_code.co_varnames def create_instances(module, ec2): """ @@ -486,7 +497,6 @@ def create_instances(module, ec2): 'key_name': key_name, 'monitoring_enabled': monitoring, 'placement': zone, - 'placement_group': placement_group, 'instance_type': instance_type, 'kernel_id': kernel, 'ramdisk_id': ramdisk, @@ -527,6 +537,7 @@ def create_instances(module, ec2): 'min_count': count_remaining, 'max_count': count_remaining, 'client_token': id, + 'placement_group': placement_group, 'private_ip_address': private_ip, }) res = ec2.run_instances(**params) @@ -545,6 +556,12 @@ def create_instances(module, ec2): if private_ip: module.fail_json( msg='private_ip only available with on-demand (non-spot) instances') + if boto_supports_param_in_spot_request(ec2, placement_group): + params['placement_group'] = placement_group + elif placement_group : + module.fail_json( + msg="placement_group parameter requires Boto version 2.3.0 or higher.") + params.update({ 'count': count_remaining, }) From 6bf5d664dc5af64098db97bc96b9d62525b6b38a Mon Sep 17 00:00:00 2001 From: dparalen Date: Fri, 20 Dec 2013 13:22:17 +0100 Subject: [PATCH 029/772] allow per-host sudo operation --- lib/ansible/runner/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 3318bcba2b3..b2278e5d7f7 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -604,8 +604,16 @@ class Runner(object): actual_pass = inject.get('ansible_ssh_pass', self.remote_pass) actual_transport = inject.get('ansible_connection', self.transport) actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) + self.sudo = utils.boolean(inject.get('ansible_sudo', self.sudo)) + self.sudo_user = inject.get('ansible_sudo_user', self.sudo_user) self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) + # select default root user in case self.sudo requested + # but no user specified; happens e.g. in host vars when + # just ansible_sudo=True is specified + if self.sudo and self.sudo_user is None: + self.sudo_user = 'root' + if actual_private_key_file is not None: actual_private_key_file = os.path.expanduser(actual_private_key_file) From 6d1c2a92de12ad172cab9a789169d07c5a5986a0 Mon Sep 17 00:00:00 2001 From: Augustus Kling Date: Tue, 24 Dec 2013 12:29:02 +0100 Subject: [PATCH 030/772] Basic handling of locales. --- library/system/locale | 101 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 library/system/locale diff --git a/library/system/locale b/library/system/locale new file mode 100644 index 00000000000..350c1449e19 --- /dev/null +++ b/library/system/locale @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +import os.path +from subprocess import Popen, PIPE, call + +DOCUMENTATION = ''' +--- +module: locale +short_description: Creates of removes locales. +description: + - Manages locales by editing /etc/locale.gen and invoking locale-gen. +version_added: "1.5" +options: + name: + description: + - Name and encoding of the locale, such as "en_GB.UTF-8". + required: true + default: null + aliases: [] + state: + description: + - Whether the locale shall be present. + required: false + choices: ["present", "absent"] + default: "present" +''' + +EXAMPLES = ''' +# Ensure a locale exists. +- locale: name=de_CH.UTF-8 state=present +''' + +# =========================================== +# location module specific support methods. +# + +def is_present(name): + """Checks if the given locale is currently installed.""" + output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] + return any(fix_case(name) == fix_case(line) for line in output.splitlines()) + +def fix_case(name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + return name.replace(".utf8", ".UTF-8") + +def replace_line(existing_line, new_line): + """Replaces lines in /etc/locale.gen""" + with open("/etc/locale.gen", "r") as f: + lines = [line.replace(existing_line, new_line) for line in f] + with open("/etc/locale.gen", "w") as f: + f.write("".join(lines)) + +# ============================================================== +# main + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(choices=['present','absent'], required=True), + ), + supports_check_mode=True + ) + + name = module.params['name'] + if not "." in name: + module.fail_json(msg="Locale does not match pattern. Did you specify the encoding?") + state = module.params['state'] + + if not os.path.exists("/etc/locale.gen"): + module.fail_json(msg="/etc/locale.gen missing. Is the package “locales” installed?") + + prev_state = "present" if is_present(name) else "absent" + changed = (prev_state!=state) + + if module.check_mode: + module.exit_json(changed=changed) + else: + encoding = name.split(".")[1] + if changed: + if state=="present": + # Create locale. + replace_line("# "+name+" "+encoding, name+" "+encoding) + else: + # Delete locale. + replace_line(name+" "+encoding, "# "+name+" "+encoding) + + localeGenExitValue = call("locale-gen") + if localeGenExitValue!=0: + module.fail_json(msg="locale.gen failed to execute, it returned "+localeGenExitValue) + + module.exit_json(name=name, changed=changed, msg="OK") + +# import module snippets +from ansible.module_utils.basic import * + +main() \ No newline at end of file From 8b4c9420e71037974b7efc0f9cabcbea428d5466 Mon Sep 17 00:00:00 2001 From: Nick Irvine Date: Thu, 26 Dec 2013 16:11:39 -0800 Subject: [PATCH 031/772] Make default irc color none; add color none --- library/notification/irc | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/library/notification/irc b/library/notification/irc index 11bdc4a95ec..f5ef9956cf2 100644 --- a/library/notification/irc +++ b/library/notification/irc @@ -51,8 +51,8 @@ options: description: - Text color for the message. Default is black. required: false - default: black - choices: [ "yellow", "red", "green", "blue", "black" ] + default: none + choices: [ "none", "yellow", "red", "green", "blue", "black" ] channel: description: - Channel name @@ -94,7 +94,7 @@ from time import sleep def send_msg(channel, msg, server='localhost', port='6667', - nick="ansible", color='black', passwd=False, timeout=30): + nick="ansible", color='none', passwd=False, timeout=30): '''send message to IRC''' colornumbers = { @@ -107,10 +107,11 @@ def send_msg(channel, msg, server='localhost', port='6667', try: colornumber = colornumbers[color] + colortext = "\x03" + colornumber except: - colornumber = "01" # black + colortext = "" - message = "\x03" + colornumber + msg + message = colortext + msg irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) irc.connect((server, int(port))) @@ -158,8 +159,8 @@ def main(): port=dict(default=6667), nick=dict(default='ansible'), msg=dict(required=True), - color=dict(default="black", choices=["yellow", "red", "green", - "blue", "black"]), + color=dict(default="none", choices=["yellow", "red", "green", + "blue", "black", "none"]), channel=dict(required=True), passwd=dict(), timeout=dict(type='int', default=30) From c06cc8c21b3e9c2bba4405f7d2f6889647daae70 Mon Sep 17 00:00:00 2001 From: Evgenii Terechkov Date: Tue, 3 Dec 2013 20:54:31 +0800 Subject: [PATCH 032/772] Initial version of apt-rpm module --- library/packaging/apt-rpm | 168 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100755 library/packaging/apt-rpm diff --git a/library/packaging/apt-rpm b/library/packaging/apt-rpm new file mode 100755 index 00000000000..e8302f1bd02 --- /dev/null +++ b/library/packaging/apt-rpm @@ -0,0 +1,168 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2013, Evgenii Terechkov +# Written by Evgenii Terechkov +# Based on urpmi module written by Philippe Makowski +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: apt-rpm +short_description: apt-rpm package manager +description: + - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. +version_added: "1.5" +options: + pkg: + description: + - name of package to install, upgrade or remove. + required: true + default: null + state: + description: + - Indicates the desired package state + required: false + default: present + choices: [ "absent", "present" ] + update_cache: + description: + - update the package database first C(apt-get update). + required: false + default: no + choices: [ "yes", "no" ] +author: Evgenii Terechkov +notes: [] +''' + +EXAMPLES = ''' +# install package foo +- apt-rpm: pkg=foo state=present +# remove package foo +- apt-rpm: pkg=foo state=absent +# description: remove packages foo and bar +- apt-rpm: pkg=foo,bar state=absent +# description: update the package database and install bar (bar will be the updated if a newer version exists) +- apt-rpm: name=bar state=present update_cache=yes +''' + + +import json +import shlex +import os +import sys + +APT_PATH="/usr/bin/apt-get" +RPM_PATH="/usr/bin/rpm" + +def query_package(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc = os.system("%s -q %s" % (RPM_PATH,name)) + if rc == 0: + return True + else: + return False + +def query_package_provides(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name)) + return rc == 0 + +def update_package_db(module): + rc = os.system("%s update" % APT_PATH) + + if rc != 0: + module.fail_json(msg="could not update package db") + +def remove_packages(module, packages): + + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package)) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, pkgspec): + + packages = "" + for package in pkgspec: + if not query_package_provides(module, package): + packages += "'%s' " % package + + if len(packages) != 0: + + cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages)) + + rc, out, err = module.run_command(cmd) + + installed = True + for packages in pkgspec: + if not query_package_provides(module, package): + installed = False + + # apt-rpm always have 0 for exit code if --force is used + if rc or not installed: + module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) + else: + module.exit_json(changed=True, msg="%s present(s)" % packages) + else: + module.exit_json(changed=False) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), + update_cache = dict(default=False, aliases=['update-cache'], type='bool'), + package = dict(aliases=['pkg', 'name'], required=True))) + + + if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH): + module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm") + + p = module.params + + if p['update_cache']: + update_package_db(module) + + packages = p['package'].split(',') + + if p['state'] in [ 'installed', 'present' ]: + install_packages(module, packages) + + elif p['state'] in [ 'removed', 'absent' ]: + remove_packages(module, packages) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * + +main() From 47f5909167431f8fc3fbcf362defaa6df0fba7c7 Mon Sep 17 00:00:00 2001 From: Tsz Ming WONG Date: Mon, 30 Dec 2013 16:51:35 +0800 Subject: [PATCH 033/772] Allow = symbols in variable values in host inventory --- lib/ansible/inventory/ini.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 69689758aad..371be2439b9 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -119,7 +119,7 @@ class InventoryParser(object): if t.startswith('#'): break try: - (k,v) = t.split("=") + (k,v) = t.split("=", 1) except ValueError, e: raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) try: From fbd39026525ac14cf126d1f26236f1c76454a199 Mon Sep 17 00:00:00 2001 From: "inetfuture(Aaron Wang)" Date: Wed, 8 Jan 2014 12:45:56 +0800 Subject: [PATCH 034/772] supervisorctl: add group support and refine documenation. --- library/web_infrastructure/supervisorctl | 136 +++++++++++++---------- 1 file changed, 80 insertions(+), 56 deletions(-) diff --git a/library/web_infrastructure/supervisorctl b/library/web_infrastructure/supervisorctl index 564368af5f4..a53a93d22ee 100644 --- a/library/web_infrastructure/supervisorctl +++ b/library/web_infrastructure/supervisorctl @@ -23,70 +23,74 @@ import os DOCUMENTATION = ''' --- module: supervisorctl -short_description: Manage the state of a program or group of programs running via Supervisord +short_description: Manage the state of a program or group of programs running via supervisord description: - - Manage the state of a program or group of programs running via I(Supervisord) + - Manage the state of a program or group of programs running via supervisord version_added: "0.7" options: name: description: - - The name of the I(supervisord) program/process to manage + - The name of the supervisord program/group to manage. It will be taken as group name when it end with a colon I(:). required: true default: null config: description: - - configuration file path, passed as -c to supervisorctl + - configuration file path, passed as -c to supervisorctl. required: false default: null version_added: "1.3" server_url: description: - - URL on which supervisord server is listening, passed as -s to supervisorctl + - URL on which supervisord server is listening, passed as -s to supervisorctl. required: false default: null version_added: "1.3" username: description: - - username to use for authentication with server, passed as -u to supervisorctl + - username to use for authentication with server, passed as -u to supervisorctl. required: false default: null version_added: "1.3" password: description: - - password to use for authentication with server, passed as -p to supervisorctl + - password to use for authentication with server, passed as -p to supervisorctl. required: false default: null version_added: "1.3" state: description: - - The state of service + - The desired state of program/group. Affected programs' name will be returned in I(affected) field of the result. required: true default: null choices: [ "present", "started", "stopped", "restarted" ] supervisorctl_path: description: - - Path to supervisorctl executable to use + - Path to supervisorctl executable to use. required: false default: null version_added: "1.4" -requirements: - - supervisorctl -requirements: [ ] -author: Matt Wright +notes: + - When C(state) = I(present), will call C(supervisorctl reread) then call C(supervisorctl add) if the program/group is not exists. + - When C(state) = I(restarted), will call C(supervisorctl update) then call C(supervisorctl restart). +requirements: [ "supervisorctl" ] +author: Matt Wright, Aaron Wang ''' EXAMPLES = ''' # Manage the state of program to be in 'started' state. - supervisorctl: name=my_app state=started +# Manage the state of program group to be in 'started' state. +- supervisorctl: name='my_apps:' state=started + # Restart my_app, reading supervisorctl configuration from a specified file. - supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf # Restart my_app, connecting to supervisord with credentials and server URL. - supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001 - ''' + def main(): arg_spec = dict( name=dict(required=True), @@ -101,6 +105,10 @@ def main(): module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) name = module.params['name'] + is_group = False + if name.endswith(':'): + is_group = True + name = name.rstrip(':') state = module.params['state'] config = module.params.get('config') server_url = module.params.get('server_url') @@ -111,11 +119,12 @@ def main(): if supervisorctl_path: supervisorctl_path = os.path.expanduser(supervisorctl_path) if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path): - supervisorctl_args = [ supervisorctl_path ] + supervisorctl_args = [supervisorctl_path] else: - module.fail_json(msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) + module.fail_json( + msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path) else: - supervisorctl_args = [ module.get_bin_path('supervisorctl', True) ] + supervisorctl_args = [module.get_bin_path('supervisorctl', True)] if config: supervisorctl_args.extend(['-c', os.path.expanduser(config)]) @@ -133,61 +142,76 @@ def main(): args.append(name) return module.run_command(args, **kwargs) - rc, out, err = run_supervisorctl('status') - present = name in out - - if state == 'present': - if not present: - if module.check_mode: - module.exit_json(changed=True) - run_supervisorctl('reread', check_rc=True) - rc, out, err = run_supervisorctl('add', name) - - if '%s: added process group' % name in out: - module.exit_json(changed=True, name=name, state=state) + def get_matched_processes(): + matched = [] + rc, out, err = run_supervisorctl('status') + for line in out.splitlines(): + # One status line may look like one of these two: + # process not in group: + # echo_date_lonely RUNNING pid 7680, uptime 13:22:18 + # process in group: + # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18 + fields = [field for field in line.split(' ') if field != ''] + process_name = fields[0] + status = fields[1] + + if is_group: + # If there is ':', this process must be in a group. + if ':' in process_name: + group = process_name.split(':')[0] + if group != name: + continue + else: + continue else: - module.fail_json(msg=out, name=name, state=state) - - module.exit_json(changed=False, name=name, state=state) + if process_name != name: + continue - rc, out, err = run_supervisorctl('status', name) - running = 'RUNNING' in out or '(already running)' in out + matched.append((process_name, status)) + return matched - if running and state == 'started': - module.exit_json(changed=False, name=name, state=state) + def take_action_on_processes(processes, status_filter, action, expected_result): + to_take_action_on = [] + for process_name, status in processes: + if status_filter(status): + to_take_action_on.append(process_name) - if running and state == 'stopped': + if len(to_take_action_on) == 0: + module.exit_json(changed=False, name=name, state=state) if module.check_mode: module.exit_json(changed=True) - rc, out, err = run_supervisorctl('stop', name) - - if '%s: stopped' % name in out: - module.exit_json(changed=True, name=name, state=state) + for process_name in to_take_action_on: + rc, out, err = run_supervisorctl(action, process_name) + if '%s: %s' % (process_name, expected_result) not in out: + module.fail_json(msg=out) - module.fail_json(msg=out) + module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) - elif state == 'restarted': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = run_supervisorctl('update', name) - rc, out, err = run_supervisorctl('restart', name) + if state == 'restarted': + rc, out, err = run_supervisorctl('update') + processes = get_matched_processes() + take_action_on_processes(processes, lambda s: True, 'restart', 'started') - if '%s: started' % name in out: - module.exit_json(changed=True, name=name, state=state) + processes = get_matched_processes() - module.fail_json(msg=out) + if state == 'present': + if len(processes) > 0: + module.exit_json(changed=False, name=name, state=state) - elif not running and state == 'started': if module.check_mode: module.exit_json(changed=True) - rc, out, err = run_supervisorctl('start',name) - - if '%s: started' % name in out: + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('add', name) + if '%s: added process group' % name in out: module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) - module.fail_json(msg=out) + if state == 'started': + take_action_on_processes(processes, lambda s: s != 'RUNNING', 'start', 'started') - module.exit_json(changed=False, name=name, state=state) + if state == 'stopped': + take_action_on_processes(processes, lambda s: s == 'RUNNING', 'stop', 'stopped') # import module snippets from ansible.module_utils.basic import * From f99500d65cd2ea7ccb69a71d9a112edbbb802726 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 8 Jan 2014 14:40:37 -0600 Subject: [PATCH 035/772] Expose the playbook to callback plugins --- lib/ansible/callbacks.py | 6 ++++++ lib/ansible/playbook/__init__.py | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index 4681dd2fe07..a2a7e4f58d4 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -108,6 +108,12 @@ def log_unflock(runner): except OSError: pass +def set_playbook(callback, playbook): + ''' used to notify callback plugins of playbook context ''' + callback.playbook = playbook + for callback_plugin in callback_plugins: + callback_plugin.playbook = playbook + def set_play(callback, play): ''' used to notify callback plugins of context ''' callback.play = play diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index dc7991aaf74..439c60b6859 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -148,6 +148,7 @@ class PlayBook(object): self.filename = playbook (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars) ansible.callbacks.load_callback_plugins() + ansible.callbacks.set_playbook(self.callbacks, self) # ***************************************************** From 6b75e75161fd8bf5bb051ff2302f10b71c4510a4 Mon Sep 17 00:00:00 2001 From: Augustus Kling Date: Sun, 19 Jan 2014 23:35:10 +0100 Subject: [PATCH 036/772] Handle install/uninstall of locales for Ubuntu, too. --- library/system/locale | 72 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 11 deletions(-) diff --git a/library/system/locale b/library/system/locale index 350c1449e19..577f34bc3ec 100644 --- a/library/system/locale +++ b/library/system/locale @@ -53,6 +53,52 @@ def replace_line(existing_line, new_line): with open("/etc/locale.gen", "w") as f: f.write("".join(lines)) +def apply_change(targetState, name, encoding): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, eiter present or absent. + name -- Name including encoding such as de_CH.UTF-8. + encoding -- Encoding such as UTF-8. + """ + if targetState=="present": + # Create locale. + replace_line("# "+name+" "+encoding, name+" "+encoding) + else: + # Delete locale. + replace_line(name+" "+encoding, "# "+name+" "+encoding) + + localeGenExitValue = call("locale-gen") + if localeGenExitValue!=0: + module.fail_json(msg="locale.gen failed to execute, it returned "+str(localeGenExitValue)) + +def apply_change_ubuntu(targetState, name, encoding): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, eiter present or absent. + name -- Name including encoding such as de_CH.UTF-8. + encoding -- Encoding such as UTF-8. + """ + if targetState=="present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + localeGenExitValue = call(["locale-gen", name]) + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + with open("/var/lib/locales/supported.d/local", "r") as f: + content = f.readlines() + with open("/var/lib/locales/supported.d/local", "w") as f: + for line in content: + if line!=(name+" "+encoding+"\n"): + f.write(line) + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + localeGenExitValue = call(["locale-gen", "--purge"]) + + if localeGenExitValue!=0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) + # ============================================================== # main @@ -72,7 +118,14 @@ def main(): state = module.params['state'] if not os.path.exists("/etc/locale.gen"): - module.fail_json(msg="/etc/locale.gen missing. Is the package “locales” installed?") + if os.path.exists("/var/lib/locales/supported.d/local"): + # Ubuntu created its own system to manage locales. + ubuntuMode = True + else: + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?") + else: + # We found the common way to manage locales. + ubuntuMode = False prev_state = "present" if is_present(name) else "absent" changed = (prev_state!=state) @@ -82,16 +135,13 @@ def main(): else: encoding = name.split(".")[1] if changed: - if state=="present": - # Create locale. - replace_line("# "+name+" "+encoding, name+" "+encoding) - else: - # Delete locale. - replace_line(name+" "+encoding, "# "+name+" "+encoding) - - localeGenExitValue = call("locale-gen") - if localeGenExitValue!=0: - module.fail_json(msg="locale.gen failed to execute, it returned "+localeGenExitValue) + try: + if ubuntuMode==False: + apply_change(state, name, encoding) + else: + apply_change_ubuntu(state, name, encoding) + except EnvironmentError as e: + module.fail_json(msg=e.strerror, exitValue=e.errno) module.exit_json(name=name, changed=changed, msg="OK") From c3f182bc9bf4cdc93b939a4005b9b8d8a3186bcd Mon Sep 17 00:00:00 2001 From: Augustus Kling Date: Mon, 20 Jan 2014 00:11:01 +0100 Subject: [PATCH 037/772] Fix error reporting in case locale-gen fails on non-Ubuntu systems. --- library/system/locale | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/locale b/library/system/locale index 577f34bc3ec..81f33dec9ca 100644 --- a/library/system/locale +++ b/library/system/locale @@ -70,7 +70,7 @@ def apply_change(targetState, name, encoding): localeGenExitValue = call("locale-gen") if localeGenExitValue!=0: - module.fail_json(msg="locale.gen failed to execute, it returned "+str(localeGenExitValue)) + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) def apply_change_ubuntu(targetState, name, encoding): """Create or remove locale. From 13432bb18d9e360692b3799a9490161ad49f4dd0 Mon Sep 17 00:00:00 2001 From: Tim Miller Date: Mon, 20 Jan 2014 15:27:37 -0800 Subject: [PATCH 038/772] Make vars plugins honor `hash_behaviour` setting. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When applying precedence ordering of different classes of vars (hostvars, groupvars, role-defaults, etc.), the hash_behaviour setting controls whether duplicate hash keys are replaced in entirety, or merged together. The wording of the documentation suggests that this setting applies to all levels of the precedence ordering, when it currently does not: > Ansible by default will override variables in specific precedence orders, > as described in Variables. When a variable of higher precedence wins, > it will replace the other value. ... Some users prefer that variables that > are hashes (aka ‘dictionaries’ in Python terms) are merged together. This > setting is called ‘merge’. This change causes the hash_behavior setting to extend to vars plugins. --- lib/ansible/inventory/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index e7979011cdd..337a0377c98 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -341,12 +341,12 @@ class Inventory(object): raise errors.AnsibleError("host not found: %s" % hostname) vars = {} - vars_results = [ plugin.run(host) for plugin in self._vars_plugins ] + vars_results = [ plugin.run(host) for plugin in self._vars_plugins ] for updated in vars_results: if updated is not None: - vars.update(updated) + vars = utils.combine_vars(vars, updated) - vars.update(host.get_variables()) + vars = utils.combine_vars(vars, host.get_variables()) if self.parser is not None: vars = utils.combine_vars(vars, self.parser.get_host_variables(host)) return vars From 62ef8dfbe02c2d6e8a7729530391409645002612 Mon Sep 17 00:00:00 2001 From: Dmitry Ivanov Date: Tue, 21 Jan 2014 17:27:50 +0700 Subject: [PATCH 039/772] [s3] Compatibility with fakes3. --- library/cloud/s3 | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/library/cloud/s3 b/library/cloud/s3 index 5d2c97baf39..948ddcf58b7 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -68,7 +68,7 @@ options: aliases: [] s3_url: description: - - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. + - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus. default: null aliases: [ S3_URL ] aws_secret_key: @@ -238,6 +238,13 @@ def get_download_url(module, s3, bucket, obj, expiry, changed=True): except s3.provider.storage_response_error, e: module.fail_json(msg= str(e)) +def is_fakes3(s3_url): + """ Return True if s3_url has scheme fakes3:// """ + if s3_url is not None: + return urlparse.urlparse(s3_url).scheme == 'fakes3' + else: + return False + def is_walrus(s3_url): """ Return True if it's Walrus endpoint, not S3 @@ -283,8 +290,22 @@ def main(): if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] - # If we have an S3_URL env var set, this is likely to be Walrus, so change connection method - if is_walrus(s3_url): + # Look at s3_url and tweak connection settings + # if connecting to Walrus or fakes3 + if is_fakes3(s3_url): + try: + fakes3 = urlparse.urlparse(s3_url) + from boto.s3.connection import OrdinaryCallingFormat + s3 = boto.connect_s3( + aws_access_key, + aws_secret_key, + is_secure=False, + host=fakes3.hostname, + port=fakes3.port, + calling_format=OrdinaryCallingFormat()) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + elif is_walrus(s3_url): try: walrus = urlparse.urlparse(s3_url).hostname s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key) From 5ba67396036bb87d6b76f726569314ff43955a86 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 21 Jan 2014 12:41:58 -0600 Subject: [PATCH 040/772] Use realpath for plugin directories instead of abspath --- lib/ansible/utils/plugins.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 1aeba166931..b1d0117e613 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -30,7 +30,7 @@ _basedirs = [] def push_basedir(basedir): # avoid pushing the same absolute dir more than once - basedir = os.path.abspath(basedir) + basedir = os.path.realpath(basedir) if basedir not in _basedirs: _basedirs.insert(0, basedir) @@ -99,7 +99,7 @@ class PluginLoader(object): ret = [] ret += self._extra_dirs for basedir in _basedirs: - fullpath = os.path.abspath(os.path.join(basedir, self.subdir)) + fullpath = os.path.realpath(os.path.join(basedir, self.subdir)) if os.path.isdir(fullpath): files = glob.glob("%s/*" % fullpath) for file in files: @@ -111,7 +111,7 @@ class PluginLoader(object): # look in any configured plugin paths, allow one level deep for subcategories configured_paths = self.config.split(os.pathsep) for path in configured_paths: - path = os.path.abspath(os.path.expanduser(path)) + path = os.path.realpath(os.path.expanduser(path)) contents = glob.glob("%s/*" % path) for c in contents: if os.path.isdir(c) and c not in ret: @@ -131,7 +131,7 @@ class PluginLoader(object): ''' Adds an additional directory to the search path ''' self._paths = None - directory = os.path.abspath(directory) + directory = os.path.realpath(directory) if directory is not None: if with_subdir: From ce0b5d16b36a652c2bd6d0979c92726a13acb586 Mon Sep 17 00:00:00 2001 From: Thomas Omans Date: Tue, 21 Jan 2014 12:37:04 -0800 Subject: [PATCH 041/772] Allow templates in ansible_sudo_pass inventory var Template ansible_sudo_pass the same way we template ansible_ssh_pass. --- lib/ansible/runner/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index b5138bf1500..27c741267ce 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -688,6 +688,8 @@ class Runner(object): # user/pass may still contain variables at this stage actual_user = template.template(self.basedir, actual_user, inject) actual_pass = template.template(self.basedir, actual_pass, inject) + self.sudo_pass = template.template(self.basedir, self.sudo_pass, inject) + # make actual_user available as __magic__ ansible_ssh_user variable inject['ansible_ssh_user'] = actual_user From 9343db69b41487c3ca783caa827926ebf3a02d6e Mon Sep 17 00:00:00 2001 From: Thomas Omans Date: Tue, 21 Jan 2014 12:44:36 -0800 Subject: [PATCH 042/772] Fixing whitespace --- lib/ansible/runner/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 27c741267ce..99fe1d17e0c 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -689,7 +689,6 @@ class Runner(object): actual_user = template.template(self.basedir, actual_user, inject) actual_pass = template.template(self.basedir, actual_pass, inject) self.sudo_pass = template.template(self.basedir, self.sudo_pass, inject) - # make actual_user available as __magic__ ansible_ssh_user variable inject['ansible_ssh_user'] = actual_user From 1ef8f279b9d24a13b5b6788cca44af07580fb2cb Mon Sep 17 00:00:00 2001 From: Matt Date: Wed, 22 Jan 2014 00:01:51 -0800 Subject: [PATCH 043/772] added random sleep option to ansible-pull --- bin/ansible-pull | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/bin/ansible-pull b/bin/ansible-pull index 3253ced80c9..9a338edfa7c 100755 --- a/bin/ansible-pull +++ b/bin/ansible-pull @@ -44,6 +44,8 @@ import subprocess import sys import datetime import socket +import random +import time from ansible import utils from ansible.utils import cmd_functions @@ -101,6 +103,8 @@ def main(args): help='purge checkout after playbook run') parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') + parser.add_option('-s', '--sleep', dest='sleep', default=None, + help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests') parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could ' @@ -159,7 +163,18 @@ def main(args): inv_opts, base_opts, options.module_name, repo_opts ) - # RUN THE CHECKOUT COMMAND + if options.sleep: + try: + secs = random.randint(0,int(options.sleep)); + except ValueError: + parser.error("%s is not a number." % options.sleep) + return 1 + + print >>sys.stderr, "Sleeping for %d seconds..." % secs + time.sleep(secs); + + + # RUN THe CHECKOUT COMMAND rc, out, err = cmd_functions.run_cmd(cmd, live=True) if rc != 0: From 88b7dc344cec69a09f2596f9cad1778015c8e01d Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Wed, 22 Jan 2014 20:10:53 +0100 Subject: [PATCH 044/772] Introduce module layman --- library/packaging/layman | 236 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 236 insertions(+) create mode 100644 library/packaging/layman diff --git a/library/packaging/layman b/library/packaging/layman new file mode 100644 index 00000000000..0f7b986d491 --- /dev/null +++ b/library/packaging/layman @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jakub Jirutka +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import shutil +from os import path +from urllib2 import Request, urlopen, URLError + +DOCUMENTATION = ''' +--- +module: layman +author: Jakub Jirutka +version_added: "1.4.5" +short_description: Manage Gentoo overlays +description: + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. + Please note that Layman must be installed on a managed node prior using this module. +options: + name: + description: + - The overlay id to install, synchronize, or uninstall. + Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)). + required: true + list_url: + description: + - An URL of the alternative overlays list that defines the overlay to install. + This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where + C(overlay_defs) is readed from the Layman's configuration. + required: false + state: + description: + - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. + required: false + default: present + choices: [present, absent, updated] +''' + +EXAMPLES = ''' +# Install the overlay 'mozilla' which is on the central overlays list. +- layman: name=mozilla + +# Install the overlay 'cvut' from the specified alternative list. +- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml + +# Update (sync) the overlay 'cvut', or install if not installed yet. +- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated + +# Update (sync) all of the installed overlays. +- layman: name=ALL state=updated + +# Uninstall the overlay 'cvut'. +- layman: name=cvut state=absent +''' + +USERAGENT = 'ansible-httpget' + +try: + from layman.api import LaymanAPI + from layman.config import BareConfig + HAS_LAYMAN_API = True +except ImportError: + HAS_LAYMAN_API = False + + +class ModuleError(Exception): pass + + +def init_layman(config=None): + '''Returns the initialized ``LaymanAPI``. + + :param config: the layman's configuration to use (optional) + ''' + if config is None: config = BareConfig(read_configfile=True, quietness=1) + return LaymanAPI(config) + + +def download_url(url, dest): + ''' + :param url: the URL to download + :param dest: the absolute path of where to save the downloaded content to; + it must be writable and not a directory + + :raises ModuleError + ''' + request = Request(url) + request.add_header('User-agent', USERAGENT) + + try: + response = urlopen(request) + except URLError, e: + raise ModuleError("Failed to get %s: %s" % (url, str(e))) + + try: + with open(dest, 'w') as f: + shutil.copyfileobj(response, f) + except IOError, e: + raise ModuleError("Failed to write: %s" % str(e)) + + +def install_overlay(name, list_url=None): + '''Installs the overlay repository. If not on the central overlays list, + then :list_url of an alternative list must be provided. The list will be + fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the + ``overlay_defs`` is read from the Layman's configuration). + + :param name: the overlay id + :param list_url: the URL of the remote repositories list to look for the overlay + definition (optional, default: None) + + :returns: True if the overlay was installed, or False if already exists + (i.e. nothing has changed) + :raises ModuleError + ''' + # read Layman configuration + layman_conf = BareConfig(read_configfile=True) + layman = init_layman(layman_conf) + + if layman.is_installed(name): + return False + + if not layman.is_repo(name): + if not list_url: raise ModuleError("Overlay '%s' is not on the list of known " \ + "overlays and URL of the remote list was not provided." % name) + + overlay_defs = layman_conf.get_option('overlay_defs') + dest = path.join(overlay_defs, name + '.xml') + + download_url(list_url, dest) + + # reload config + layman = init_layman() + + if not layman.add_repos(name): raise ModuleError(layman.get_errors()) + + return True + + +def uninstall_overlay(name): + '''Uninstalls the given overlay repository from the system. + + :param name: the overlay id to uninstall + + :returns: True if the overlay was uninstalled, or False if doesn't exist + (i.e. nothing has changed) + :raises ModuleError + ''' + layman = init_layman() + + if not layman.is_installed(name): + return False + + layman.delete_repos(name) + if layman.get_errors(): raise ModuleError(layman.get_errors()) + + return True + + +def sync_overlay(name): + '''Synchronizes the specified overlay repository. + + :param name: the overlay repository id to sync + :raises ModuleError + ''' + layman = init_layman() + + if not layman.sync(name): + messages = [ str(item[1]) for item in layman.sync_results[2] ] + raise ModuleError(messages) + + +def sync_overlays(): + '''Synchronize all of the installed overlays. + + :raises ModuleError + ''' + layman = init_layman() + + for name in layman.get_installed(): + sync_overlay(name) + + +def main(): + # define module + module = AnsibleModule( + argument_spec = { + 'name': { 'required': True }, + 'list_url': { 'aliases': ['url'] }, + 'state': { 'default': "present", 'choices': ['present', 'absent', 'updated'] }, + } + ) + + if not HAS_LAYMAN_API: + module.fail_json(msg='Layman is not installed') + + state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) + + changed = False + try: + if state == 'present': + changed = install_overlay(name, url) + + elif state == 'updated': + if name == 'ALL': + sync_overlays() + elif install_overlay(name, url): + changed = True + else: + sync_overlay(name) + else: + changed = uninstall_overlay(name) + + except ModuleError, e: + module.fail_json(msg=e.message) + else: + module.exit_json(changed=changed, name=name) + + +# import module snippets +from ansible.module_utils.basic import * +main() From e545d1026abb917dead37616617d39b8f4d1dd40 Mon Sep 17 00:00:00 2001 From: Dave Rawks Date: Fri, 24 Jan 2014 15:39:02 -0800 Subject: [PATCH 045/772] Correct sleep calls Looks like we import "from time import sleep" but were calling "time.sleep" which is scoped into the wrong namespace. --- library/notification/irc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/notification/irc b/library/notification/irc index 11bdc4a95ec..dcdaa7cef9b 100644 --- a/library/notification/irc +++ b/library/notification/irc @@ -126,7 +126,7 @@ def send_msg(channel, msg, server='localhost', port='6667', break elif time.time() - start > timeout: raise Exception('Timeout waiting for IRC server welcome response') - time.sleep(0.5) + sleep(0.5) irc.send('JOIN %s\r\n' % channel) join = '' @@ -137,13 +137,13 @@ def send_msg(channel, msg, server='localhost', port='6667', break elif time.time() - start > timeout: raise Exception('Timeout waiting for IRC JOIN response') - time.sleep(0.5) + sleep(0.5) irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) - time.sleep(1) + sleep(1) irc.send('PART %s\r\n' % channel) irc.send('QUIT\r\n') - time.sleep(1) + sleep(1) irc.close() # =========================================== From 54858279a63345d7cf0efaf15b5c5f4f1b700926 Mon Sep 17 00:00:00 2001 From: someda Date: Sat, 25 Jan 2014 15:11:13 +0900 Subject: [PATCH 046/772] Add typetalk notification module --- library/notification/typetalk | 116 ++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 library/notification/typetalk diff --git a/library/notification/typetalk b/library/notification/typetalk new file mode 100644 index 00000000000..56d64d15329 --- /dev/null +++ b/library/notification/typetalk @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: typetalk +version_added: "1.5" +short_description: Send a message to typetalk +description: + - Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ ) +options: + client_id: + description: + - OAuth2 client ID + required: true + client_secret: + description: + - OAuth2 client secret + required: true + topic: + description: + - topic id to post message + required: true + msg: + description: + - message body + required: true +requirements: [ urllib, urllib2, json ] +author: Takashi Someda +''' + +EXAMPLES = ''' +- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" +''' + +try: + import urllib +except ImportError: + urllib = None + +try: + import urllib2 +except ImportError: + urllib2 = None + +try: + import json +except ImportError: + json = None + + +def do_request(url, params, headers={}): + data = urllib.urlencode(params) + headers = dict(headers, **{ + 'User-Agent': 'Ansible/typetalk module', + }) + return urllib2.urlopen(urllib2.Request(url, data, headers)) + + +def get_access_token(client_id, client_secret): + params = { + 'client_id': client_id, + 'client_secret': client_secret, + 'grant_type': 'client_credentials', + 'scope': 'topic.post' + } + res = do_request('https://typetalk.in/oauth2/access_token', params) + return json.load(res)['access_token'] + + +def send_message(client_id, client_secret, topic, msg): + """ + send message to typetalk + """ + try: + access_token = get_access_token(client_id, client_secret) + url = 'https://typetalk.in/api/v1/topics/%d' % topic + headers = { + 'Authorization': 'Bearer %s' % access_token, + } + do_request(url, {'message': msg}, headers) + return True, {'access_token': access_token} + except urllib2.HTTPError, e: + return False, e + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + client_id=dict(required=True), + client_secret=dict(required=True), + topic=dict(required=True, type='int'), + msg=dict(required=True), + ), + supports_check_mode=False + ) + + if not (urllib and urllib2 and json): + module.fail_json(msg="urllib, urllib2 and json modules are required") + + client_id = module.params["client_id"] + client_secret = module.params["client_secret"] + topic = module.params["topic"] + msg = module.params["msg"] + + res, error = send_message(client_id, client_secret, topic, msg) + if not res: + module.fail_json(msg='fail to send message with response code %s' % error.code) + + module.exit_json(changed=True, topic=topic, msg=msg) + + +# import module snippets +from ansible.module_utils.basic import * +main() From 07a388e567db109192151c290ef10cf75e95cd46 Mon Sep 17 00:00:00 2001 From: Matthew Leventi Date: Sat, 25 Jan 2014 22:20:39 -0800 Subject: [PATCH 047/772] adding the ability to change redis configuration --- library/database/redis | 52 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/library/database/redis b/library/database/redis index 4e3793daa09..8cddb925ab8 100644 --- a/library/database/redis +++ b/library/database/redis @@ -22,8 +22,9 @@ module: redis short_description: Various redis commands, slave and flush description: - Unified utility to interact with redis instances. - 'slave' Sets a redis instance in slave or master mode. + 'slave' Sets a redis instance in slave or master mode. 'flush' Flushes all the instance or a specified db. + 'config' Ensures a configuration setting on an instance. version_added: "1.3" options: command: @@ -31,7 +32,7 @@ options: - The selected redis command required: true default: null - choices: [ "slave", "flush" ] + choices: [ "slave", "flush", "config" ] login_password: description: - The password used to authenticate with (usually not used) @@ -75,6 +76,16 @@ options: required: false default: all choices: [ "all", "db" ] + name: + description: + - A redis config key. + required: false + default: null + value: + description: + - A redis config value. + required: false + default: null notes: @@ -100,6 +111,12 @@ EXAMPLES = ''' # Flush only one db in a redis instance - redis: command=flush db=1 flush_mode=db + +# Configure local redis to have 10000 max clients +- redis: command=config name=maxclients value=10000 + +# Configure local redis to have lua time limit of 100 ms +- redis: command=config name=lua-time-limit value=100 ''' try: @@ -146,7 +163,7 @@ def flush(client, db=None): def main(): module = AnsibleModule( argument_spec = dict( - command=dict(default=None, choices=['slave', 'flush']), + command=dict(default=None, choices=['slave', 'flush', 'config']), login_password=dict(default=None), login_host=dict(default='localhost'), login_port=dict(default='6379'), @@ -155,6 +172,8 @@ def main(): slave_mode=dict(default='slave', choices=['master', 'slave']), db=dict(default=None), flush_mode=dict(default='all', choices=['all', 'db']), + name=dict(default=None), + value=dict(default=None) ), supports_check_mode = True ) @@ -272,7 +291,34 @@ def main(): module.exit_json(changed=True, flushed=True, db=db) else: # Flush never fails :) module.fail_json(msg="Unable to flush '%d' database" % db) + elif command == 'config': + name = module.params['name'] + value = module.params['value'] + r = redis.StrictRedis(host=login_host, + port=login_port, + password=login_password) + + try: + r.ping() + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + + + try: + old_value = r.config_get(name)[name] + except Exception, e: + module.fail_json(msg="unable to read config: %s" % e) + changed = old_value != value + + if module.check_mode or not changed: + module.exit_json(changed=changed, name=name, value=value) + else: + try: + r.config_set(name, value) + except Exception, e: + module.fail_json(msg="unable to write config: %s" % e) + module.exit_json(changed=changed, name=name, value=value) else: module.fail_json(msg='A valid command must be provided') From ff4e72d3ada7a85d9bdae7e07356ddfecb5dc4cf Mon Sep 17 00:00:00 2001 From: Drew Stokes Date: Tue, 28 Jan 2014 15:49:18 -0800 Subject: [PATCH 048/772] add registry option to npm module --- library/packaging/npm | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/library/packaging/npm b/library/packaging/npm index 3a4cd13f5d7..22e1658b6c9 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -55,6 +55,10 @@ options: - Install dependencies in production mode, excluding devDependencies required: false default: no + registry: + description: + - The registry to install modules from. + required: false state: description: - The state of the node.js library @@ -76,6 +80,9 @@ description: Install "coffee-script" node.js package globally. description: Remove the globally package "coffee-script". - npm: name=coffee-script global=yes state=absent +description: Install "coffee-script" node.js package from custom registry. +- npm: name=coffee-script registry=http://registry.mysite.com + description: Install packages based on package.json. - npm: path=/app/location @@ -100,6 +107,7 @@ class Npm(object): self.name = kwargs['name'] self.version = kwargs['version'] self.path = kwargs['path'] + self.registry = kwargs['registry'] self.production = kwargs['production'] if kwargs['executable']: @@ -122,6 +130,9 @@ class Npm(object): cmd.append('--production') if self.name: cmd.append(self.name_version) + if self.registry: + cmd.append('--registry') + cmd.append(self.registry) #If path is specified, cd into that path and run the command. if self.path: @@ -178,6 +189,7 @@ def main(): version=dict(default=None), production=dict(default='no', type='bool'), executable=dict(default=None), + registry=dict(default=None), state=dict(default='present', choices=['present', 'absent', 'latest']) ) arg_spec['global'] = dict(default='no', type='bool') @@ -192,6 +204,7 @@ def main(): glbl = module.params['global'] production = module.params['production'] executable = module.params['executable'] + registry = module.params['registry'] state = module.params['state'] if not path and not glbl: @@ -200,7 +213,7 @@ def main(): module.fail_json(msg='uninstalling a package is only available for named packages') npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \ - executable=executable) + executable=executable, registry=registry) changed = False if state == 'present': From 553b42516e3b679d7a7682df51d3f86576acbc6b Mon Sep 17 00:00:00 2001 From: Eugene Brevdo Date: Fri, 13 Dec 2013 13:45:42 -0800 Subject: [PATCH 049/772] ec2_vol supports name / id to mount volume on instance * volume_size no longer required if name/id are provided * id is volume-id * name is volume Name tag * special checking is provided --- library/cloud/ec2_vol | 105 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 91 insertions(+), 14 deletions(-) diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index 71a60ac603c..511bdd0cea5 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -46,6 +46,18 @@ options: required: false default: null aliases: [] + name: + description: + - volume Name tag if you wish to attach an existing volume (requires instance) + required: false + default: null + aliases: [] + name: + description: + - volume id if you wish to attach an existing volume (requires instance) + required: false + default: null + aliases: [] volume_size: description: - size of volume (in GB) to create. @@ -122,6 +134,26 @@ EXAMPLES = ''' volume_size: 5 with_items: ec2.instances register: ec2_vol + +# Idempotent playbook example combined with single instance launch +# Volume must exist in the same zone; will not do anything if it is +# already attached. +- local_action: + module: ec2 + keypair: "{{ keypair }}" + image: "{{ image }}" + zone: YYYYYY + id: my_instance + wait: yes + count: 1 + register: ec2 +- local_action: + module: ec2_vol + instance: "{{ item.id }}" + name: my_existing_volume_Name_tag + device_name: /dev/xvdf + with_items: ec2.instances + register: ec2_vol ''' # Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. @@ -142,7 +174,9 @@ def main(): module = AnsibleModule( argument_spec = dict( instance = dict(), - volume_size = dict(required=True), + id = dict(), + name = dict(), + volume_size = dict(), iops = dict(), device_name = dict(), region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), @@ -154,20 +188,27 @@ def main(): ) ) + id = module.params.get('id') + name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') iops = module.params.get('iops') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') - + ec2 = ec2_connect(module) + if id and name: + module.fail_json(msg="Both id and name cannot be specified") + + if not (id or name or volume_size): + module.fail_json(msg="Cannot specify volume_size and either one of name or id") + # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. # Useful for playbooks chaining instance launch with volume create + attach and where the # zone doesn't matter to the user. - if instance: reservation = ec2.get_all_instances(instance_ids=instance) inst = reservation[0].instances[0] @@ -189,14 +230,51 @@ def main(): volume_type = 'standard' # If no instance supplied, try volume creation based on module parameters. + if name or id: + if not instance: + module.fail_json(msg = "If name or id is specified, instance must also be specified") + if iops or volume_size: + module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") + + filters = {} + volume_ids = None + if zone: + filters['availability_zone'] = zone + if name: + filters = {'tag:Name': name} + if id: + volume_ids = [id] + try: + vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) + except boto.exception.BotoServerError, e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + + if not vols: + module.fail_json(msg = "Could not find volume in zone (if specified): %s" % name or id) + if len(vols) > 1: + module.fail_json(msg = + "Found more than one volume in zone (if specified) with name: %s" % name) - try: - volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) - while volume.status != 'available': - time.sleep(3) - volume.update() - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + volume = vols.pop() + if volume.attachment_state() is not None: + adata = volume.attach_data + if adata.instance_id != instance: + module.fail_json(msg = "Volume %s is already attached to another instance: %s" + % (name or id, adata.instance_id)) + else: + module.exit_json(msg="Volume %s is already mapped on instance %s: %s" % + (name or id, adata.instance_id, adata.device), + volume_id=id, + device=adata.device, + changed=False) + else: + try: + volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) + while volume.status != 'available': + time.sleep(3) + volume.update() + except boto.exception.BotoServerError, e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) # Attach the created volume. @@ -207,16 +285,15 @@ def main(): time.sleep(3) volume.update() except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + # If device_name isn't set, make a choice based on best practices here: # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html - + # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) # Use password data attribute to tell whether the instance is Windows or Linux - if device_name is None and instance: try: if not ec2.get_password_data(inst.id): From 335bc1ee5ca876d2d51f1cfe676013e3fc1d7eea Mon Sep 17 00:00:00 2001 From: Steve Tjoa Date: Mon, 3 Feb 2014 12:20:04 -0800 Subject: [PATCH 050/772] adding -K flag for ask-sudo-pass to ansible-pull --- bin/ansible-pull | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bin/ansible-pull b/bin/ansible-pull index 3253ced80c9..abb0b998684 100755 --- a/bin/ansible-pull +++ b/bin/ansible-pull @@ -123,6 +123,8 @@ def main(args): default=DEFAULT_REPO_TYPE, help='Module name used to check out repository. ' 'Default is %s.' % DEFAULT_REPO_TYPE) + parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password') options, args = parser.parse_args(args) hostname = socket.getfqdn() @@ -180,6 +182,8 @@ def main(args): cmd = 'ansible-playbook %s %s' % (base_opts, playbook) if options.inventory: cmd += ' -i "%s"' % options.inventory + if options.ask_sudo_pass: + cmd += ' -K' os.chdir(options.dest) # RUN THE PLAYBOOK COMMAND From 66b2d55a184e7859243c1c851228bfe12d9d3d49 Mon Sep 17 00:00:00 2001 From: George Miroshnykov Date: Mon, 13 Jan 2014 15:37:26 +0200 Subject: [PATCH 051/772] Add idempotency to mongodb_user module --- library/database/mongodb_user | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/library/database/mongodb_user b/library/database/mongodb_user index 63bc6b5400d..de99e4971dd 100644 --- a/library/database/mongodb_user +++ b/library/database/mongodb_user @@ -188,19 +188,28 @@ def main(): try: client = MongoClient(login_host, int(login_port)) - if login_user is None and login_password is None: - mongocnf_creds = load_mongocnf() - if mongocnf_creds is not False: - login_user = mongocnf_creds['user'] - login_password = mongocnf_creds['password'] - elif login_password is None and login_user is not None: - module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') - - if login_user is not None and login_password is not None: - client.admin.authenticate(login_user, login_password) - except ConnectionFailure, e: - module.fail_json(msg='unable to connect to database, check login_user and login_password are correct') + module.fail_json(msg='unable to connect to database, check login_host and login_port are correct') + + # try to authenticate as a target user to check if it already exists + try: + client[db_name].authenticate(user, password) + if state == 'present': + module.exit_json(changed=False, user=user) + except OperationFailure: + if state == 'absent': + module.exit_json(changed=False, user=user) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None and login_user is not None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password) if state == 'present': if password is None: From 1f0cdc5ec81dd205d6582310f3303f1eb8624e26 Mon Sep 17 00:00:00 2001 From: Ingmar Hupp Date: Wed, 5 Feb 2014 18:53:37 +0000 Subject: [PATCH 052/772] ansible-pull support for -e (--extra-vars) option (same as ansible-playbook) #5707 --- bin/ansible-pull | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bin/ansible-pull b/bin/ansible-pull index 3253ced80c9..65bb8948332 100755 --- a/bin/ansible-pull +++ b/bin/ansible-pull @@ -116,6 +116,8 @@ def main(args): 'Defaults to behavior of repository module.') parser.add_option('-i', '--inventory-file', dest='inventory', help="location of the inventory host file") + parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", + help="set additional variables as key=value or YAML/JSON", default=[]) parser.add_option('-v', '--verbose', default=False, action="callback", callback=increment_debug, help='Pass -vvvv to ansible-playbook') @@ -180,6 +182,8 @@ def main(args): cmd = 'ansible-playbook %s %s' % (base_opts, playbook) if options.inventory: cmd += ' -i "%s"' % options.inventory + for ev in options.extra_vars: + cmd += ' -e "%s"' % ev os.chdir(options.dest) # RUN THE PLAYBOOK COMMAND From b7df76706591c5116afbd4fe0e74a5333acc10df Mon Sep 17 00:00:00 2001 From: trbs Date: Thu, 6 Feb 2014 15:49:06 +0100 Subject: [PATCH 053/772] make --list-tasks honor --limit --- bin/ansible-playbook | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 0659a0e8a62..2241d75716c 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -149,12 +149,14 @@ def main(args): playnum += 1 play = ansible.playbook.Play(pb, play_ds, play_basedir) label = play.name + hosts = pb.inventory.list_hosts(play.hosts) if options.listhosts: - hosts = pb.inventory.list_hosts(play.hosts) print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts)) for host in hosts: print ' %s' % host if options.listtasks: + if options.subset and not hosts: + continue matched_tags, unmatched_tags = play.compare_tags(pb.only_tags) unmatched_tags.discard('all') unknown_tags = set(pb.only_tags) - (matched_tags | unmatched_tags) From 2b4688a0bd00b31be92ce9094da5f1dc74af5b99 Mon Sep 17 00:00:00 2001 From: Mike Grozak Date: Thu, 6 Feb 2014 17:20:32 +0100 Subject: [PATCH 054/772] Started the development of the 'chdir' functionality for file module - creation symlinks without the base directory prefix --- library/files/file | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/library/files/file b/library/files/file index bef175873e7..4b59b652c7d 100644 --- a/library/files/file +++ b/library/files/file @@ -149,6 +149,7 @@ def main(): original_basename = dict(required=False), # Internal use only, for recursive ops recurse = dict(default='no', type='bool'), force = dict(required=False,default=False,type='bool'), + chdir = dict(required=False,default=False), diff_peek = dict(default=None), validate = dict(required=False, default=None), ), @@ -159,8 +160,16 @@ def main(): params = module.params state = params['state'] force = params['force'] + + params['chdir'] = chdir = os.path.expanduser(params['chdir']) params['path'] = path = os.path.expanduser(params['path']) + if state == 'link' and chdir is not None and os.isdir(chdir): + os.chdir(chdir) + # catch exception permission deny, no directory, etc + # save current working directory, chdir to it at the end of the module + # or before any escape + # short-circuit for diff_peek if params.get('diff_peek', None) is not None: appears_binary = False From 42b01dac9b58be0b4ed9f86be4a4bf5c3f4a5a54 Mon Sep 17 00:00:00 2001 From: Jack Neely Date: Thu, 6 Feb 2014 15:10:20 -0500 Subject: [PATCH 055/772] ec2.py: Make sure ec2_placement gets handled correctly ec2_placement was missing from the inventory variables ec2.py was producing. Make sure that gets properly included rather than ignored. --- plugins/inventory/ec2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 4ec4abd36d7..84841d3f09a 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -510,6 +510,8 @@ class Ec2Inventory(object): instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name + elif key == 'ec2__placement': + instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': for k, v in value.iteritems(): key = self.to_safe('ec2_tag_' + k) From d3b544e8427e8f2410e6d4fb63518b4db32137f4 Mon Sep 17 00:00:00 2001 From: Mike Grozak Date: Fri, 7 Feb 2014 14:13:41 +0100 Subject: [PATCH 056/772] Found a way how to have symlinks towards files without absolute paths in prefixes; fixed incorrect force setting on the hard links, which can not point to non-existing files --- library/files/file | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/library/files/file b/library/files/file index 4b59b652c7d..7f789242df2 100644 --- a/library/files/file +++ b/library/files/file @@ -149,7 +149,6 @@ def main(): original_basename = dict(required=False), # Internal use only, for recursive ops recurse = dict(default='no', type='bool'), force = dict(required=False,default=False,type='bool'), - chdir = dict(required=False,default=False), diff_peek = dict(default=None), validate = dict(required=False, default=None), ), @@ -161,15 +160,8 @@ def main(): state = params['state'] force = params['force'] - params['chdir'] = chdir = os.path.expanduser(params['chdir']) params['path'] = path = os.path.expanduser(params['path']) - if state == 'link' and chdir is not None and os.isdir(chdir): - os.chdir(chdir) - # catch exception permission deny, no directory, etc - # save current working directory, chdir to it at the end of the module - # or before any escape - # short-circuit for diff_peek if params.get('diff_peek', None) is not None: appears_binary = False @@ -298,7 +290,7 @@ def main(): else: module.fail_json(msg="absolute paths are required") - if not os.path.exists(abs_src) and not force: + if not os.path.exists(abs_src): module.fail_json(path=path, src=src, msg='src file does not exist') if prev_state == 'absent': From f386e137fe03aff6242018dde5a297ce2f86db07 Mon Sep 17 00:00:00 2001 From: Mike Grozak Date: Fri, 7 Feb 2014 14:29:20 +0100 Subject: [PATCH 057/772] Updated the force parameter description; added one example how to create the symlinks pointing to non-absolute paths --- library/files/file | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/library/files/file b/library/files/file index 7f789242df2..ac766839f16 100644 --- a/library/files/file +++ b/library/files/file @@ -123,8 +123,7 @@ options: default: "no" choices: [ "yes", "no" ] description: - - 'force the creation of the symlinks in two cases: the source file does - not exist (but will appear later); the destination exists and is a file (so, we need to unlink the + - 'force the creation of the symlinks when the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' notes: - See also M(copy), M(template), M(assemble) @@ -135,6 +134,10 @@ author: Michael DeHaan EXAMPLES = ''' - file: path=/etc/foo.conf owner=foo group=foo mode=0644 - file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link +- file: path=/tmp/{{ item.path }} dest={{ item.dest }} state=link + with_items: + - { path: 'x', dest: 'y' } + - { path: 'z', dest: 'k' } ''' def main(): From 242bfd8c68be0c55939b176bad7798607813d620 Mon Sep 17 00:00:00 2001 From: Joey Baker Date: Fri, 7 Feb 2014 15:41:21 -0800 Subject: [PATCH 058/772] NPM install latest made more effecient `npm install` is smart enough to only update updated modules. Checking for `outdated` and running `update` repeats the same process 2x. --- library/packaging/npm | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/library/packaging/npm b/library/packaging/npm index 3a4cd13f5d7..19825502589 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -209,12 +209,7 @@ def main(): changed = True npm.install() elif state == 'latest': - installed, missing = npm.list() - outdated = npm.list_outdated() - if len(missing) or len(outdated): - changed = True - npm.install() - npm.update() + npm.install() else: #absent installed, missing = npm.list() if name in installed: From 5655ffa8bf2334752fbf936ecd66e2258aaec8b9 Mon Sep 17 00:00:00 2001 From: Joey Baker Date: Fri, 7 Feb 2014 15:50:17 -0800 Subject: [PATCH 059/772] outdated needed to see set `changed` --- library/packaging/npm | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/library/packaging/npm b/library/packaging/npm index 19825502589..7c03ab383d6 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -209,7 +209,11 @@ def main(): changed = True npm.install() elif state == 'latest': - npm.install() + installed, missing = npm.list() + outdated = npm.list_outdated() + if len(missing) or len(outdated): + changed = True + npm.install() else: #absent installed, missing = npm.list() if name in installed: From 95545ca6897d05cffecb7b823a162bbac78323d4 Mon Sep 17 00:00:00 2001 From: Joey Baker Date: Fri, 7 Feb 2014 15:50:44 -0800 Subject: [PATCH 060/772] indent err --- library/packaging/npm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/packaging/npm b/library/packaging/npm index 7c03ab383d6..b315d70a870 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -212,8 +212,8 @@ def main(): installed, missing = npm.list() outdated = npm.list_outdated() if len(missing) or len(outdated): - changed = True - npm.install() + changed = True + npm.install() else: #absent installed, missing = npm.list() if name in installed: From 4a7c6a9727196593422e3949b948093de56ba67e Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Fri, 7 Feb 2014 19:52:55 -0500 Subject: [PATCH 061/772] apt module: Add support for installing .deb packages Support installing .deb packages from the local filesystem. apt: deb=/tmp/mypackage.deb --- library/packaging/apt | 88 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 80 insertions(+), 8 deletions(-) diff --git a/library/packaging/apt b/library/packaging/apt index eb64f8701fb..6efb50c1695 100644 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -88,6 +88,11 @@ options: - Options should be supplied as comma separated list required: false default: 'force-confdef,force-confold' + deb: + description: + - Path to a local .deb package file to install. + required: false + version_added: "1.5" requirements: [ python-apt, aptitude ] author: Matthew Williams notes: @@ -125,6 +130,9 @@ EXAMPLES = ''' # Pass options to dpkg on run - apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef' + +# Install a .deb package +- apt: deb=/tmp/mypackage.deb ''' @@ -148,6 +156,7 @@ APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" HAS_PYTHON_APT = True try: import apt + import apt.debfile import apt_pkg except: HAS_PYTHON_APT = False @@ -182,7 +191,7 @@ def package_status(m, pkgname, version, cache, state): has_files = False # older python-apt cannot be used to determine non-purged try: - package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED + package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED except AttributeError: # python-apt 0.7.X has very weak low-level object try: # might not be necessary as python-apt post-0.7.X should have current_state property @@ -269,12 +278,57 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, rc, out, err = m.run_command(cmd) if rc: - m.fail_json(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err) + return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err)) else: - m.exit_json(changed=True, stdout=out, stderr=err) + return (True, dict(changed=True, stdout=out, stderr=err)) else: + return (True, dict(changed=False)) + +def install_deb(m, debfile, cache, force, install_recommends, dpkg_options): + changed=False + pkg = apt.debfile.DebPackage(debfile) + + # Check if it's already installed + if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: m.exit_json(changed=False) + # Check if package is installable + if not pkg.check(): + m.fail_json(msg=pkg._failure_string) + + (success, retvals) = install(m=m, pkgspec=pkg.missing_deps, + cache=cache, + install_recommends=install_recommends, + dpkg_options=expand_dpkg_options(dpkg_options)) + if not success: + m.fail_json(**retvals) + changed = retvals['changed'] + + + options = ' '.join(["--%s"% x for x in dpkg_options.split(",")]) + + if m.check_mode: + options += " --simulate" + if force: + options += " --force-yes" + + + cmd = "dpkg %s -i %s" % (options, debfile) + rc, out, err = m.run_command(cmd) + + if "stdout" in retvals: + stdout = retvals["stdout"] + out + else: + stdout = out + if "stderr" in retvals: + stderr = retvals["stderr"] + err + else: + stderr = err + if rc == 0: + m.exit_json(changed=True, stdout=stdout, stderr=stderr) + else: + m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) + def remove(m, pkgspec, cache, purge=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): packages = "" @@ -349,14 +403,15 @@ def main(): cache_valid_time = dict(type='int'), purge = dict(default=False, type='bool'), package = dict(default=None, aliases=['pkg', 'name']), + deb = dict(default=None), default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) ), - mutually_exclusive = [['package', 'upgrade']], - required_one_of = [['package', 'upgrade', 'update_cache']], + mutually_exclusive = [['package', 'upgrade', 'deb']], + required_one_of = [['package', 'upgrade', 'update_cache', 'deb']], supports_check_mode = True ) @@ -418,7 +473,7 @@ def main(): if cache_valid is not True: cache.update() cache.open(progress=None) - if not p['package'] and not p['upgrade']: + if not p['package'] and not p['upgrade'] and not p['deb']: module.exit_json(changed=False) force_yes = p['force'] @@ -426,6 +481,13 @@ def main(): if p['upgrade']: upgrade(module, p['upgrade'], force_yes, dpkg_options) + if p['deb']: + if p['state'] != "installed": + module.fail_json(msg="deb only supports state=installed") + install_deb(module, p['deb'], cache, + install_recommends=install_recommends, + force=force_yes, dpkg_options=p['dpkg_options']) + packages = p['package'].split(',') latest = p['state'] == 'latest' for package in packages: @@ -435,14 +497,24 @@ def main(): module.fail_json(msg='version number inconsistent with state=latest: %s' % package) if p['state'] == 'latest': - install(module, packages, cache, upgrade=True, + result = install(module, packages, cache, upgrade=True, default_release=p['default_release'], install_recommends=install_recommends, force=force_yes, dpkg_options=dpkg_options) + (success, retvals) = result + if success: + module.exit_json(**retvals) + else: + module.fail_json(**retvals) elif p['state'] in [ 'installed', 'present' ]: - install(module, packages, cache, default_release=p['default_release'], + result = install(module, packages, cache, default_release=p['default_release'], install_recommends=install_recommends,force=force_yes, dpkg_options=dpkg_options) + (success, retvals) = result + if success: + module.exit_json(**retvals) + else: + module.fail_json(**retvals) elif p['state'] in [ 'removed', 'absent' ]: remove(module, packages, cache, p['purge'], dpkg_options) From 674969a7d87586792144cf0b8f532a3ba8c889e3 Mon Sep 17 00:00:00 2001 From: Matt Saunders Date: Sat, 8 Feb 2014 14:45:03 +0000 Subject: [PATCH 062/772] Added user_data parameter to nova_compute module - currently no file parsing just text - Re issue #4992 --- library/cloud/nova_compute | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/library/cloud/nova_compute b/library/cloud/nova_compute index af693229333..d059024e2da 100644 --- a/library/cloud/nova_compute +++ b/library/cloud/nova_compute @@ -107,6 +107,11 @@ options: - The amount of time the module should wait for the VM to get into active state required: false default: 180 + user_data: + description: + - Opaque blob of data which is made available to the instance + required: false + default: None requirements: ["novaclient"] ''' @@ -157,6 +162,8 @@ def _create_server(module, nova): 'meta' : module.params['meta'], 'key_name': module.params['key_name'], 'security_groups': module.params['security_groups'].split(','), + #userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module: + 'userdata': module.params['user_data'], } if not module.params['key_name']: del bootkwargs['key_name'] @@ -227,7 +234,8 @@ def main(): meta = dict(default=None), wait = dict(default='yes', choices=['yes', 'no']), wait_for = dict(default=180), - state = dict(default='present', choices=['absent', 'present']) + state = dict(default='present', choices=['absent', 'present']), + user_data = dict(default=None) ), ) From 22468e9455b53cb814d30d373042fa449a317ec8 Mon Sep 17 00:00:00 2001 From: Baptiste Lafontaine Date: Mon, 10 Feb 2014 15:04:17 +0100 Subject: [PATCH 063/772] Adding example for svn export --- library/source_control/subversion | 3 +++ 1 file changed, 3 insertions(+) diff --git a/library/source_control/subversion b/library/source_control/subversion index 43824ec25b1..f6c4703f015 100644 --- a/library/source_control/subversion +++ b/library/source_control/subversion @@ -81,6 +81,9 @@ options: EXAMPLES = ''' # Checkout subversion repository to specified folder. - subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout + +# Export subversion directory to folder +- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True ''' import re From 4aaf8f9a4c22c56c8941c254b99aabdc1affbd16 Mon Sep 17 00:00:00 2001 From: zimbatm Date: Mon, 10 Feb 2014 11:52:26 +0000 Subject: [PATCH 064/772] library/digital_ocean: Adds the missing backups_enabled create option --- library/cloud/digital_ocean | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index a6721a55da1..73e48b0fa30 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -75,6 +75,12 @@ options: version_added: "1.4" default: "no" choices: [ "yes", "no" ] + backups_enabled: + description: + - Optional, Boolean, enables backups for your droplet. + version_added: "1.5" + default: "no" + choices: [ "yes", "no" ] wait: description: - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. @@ -164,11 +170,11 @@ try: import dopy from dopy.manager import DoError, DoManager except ImportError, e: - print "failed=True msg='dopy >= 0.2.2 required for this module'" + print "failed=True msg='dopy >= 0.2.3 required for this module'" sys.exit(1) -if dopy.__version__ < '0.2.2': - print "failed=True msg='dopy >= 0.2.2 required for this module'" +if dopy.__version__ < '0.2.3': + print "failed=True msg='dopy >= 0.2.3 required for this module'" sys.exit(1) class TimeoutError(DoError): @@ -229,8 +235,8 @@ class Droplet(JsonfyMixIn): cls.manager = DoManager(client_id, api_key) @classmethod - def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False): - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking) + def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False): + json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled) droplet = cls(json) return droplet @@ -333,7 +339,8 @@ def core(module): region_id=getkeyordie('region_id'), ssh_key_ids=module.params['ssh_key_ids'], virtio=module.params['virtio'], - private_networking=module.params['private_networking'] + private_networking=module.params['private_networking'], + backups_enabled=module.params['backups_enabled'], ) if droplet.is_powered_on(): @@ -394,6 +401,7 @@ def main(): ssh_key_ids = dict(default=''), virtio = dict(type='bool', choices=BOOLEANS, default='yes'), private_networking = dict(type='bool', choices=BOOLEANS, default='no'), + backups_enabled = dict(type='bool', choices=BOOLEANS, default='no'), id = dict(aliases=['droplet_id'], type='int'), unique_name = dict(type='bool', default='no'), wait = dict(type='bool', default=True), From f3c22565547a0b24ffdb564ded022f07090c6b19 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Dec 2013 14:29:30 -0500 Subject: [PATCH 065/772] vsphere/vmware inventory plugin Signed-off-by: Brian Coca --- plugins/inventory/vmware.ini | 15 +++ plugins/inventory/vmware.py | 205 +++++++++++++++++++++++++++++++++++ 2 files changed, 220 insertions(+) create mode 100644 plugins/inventory/vmware.ini create mode 100755 plugins/inventory/vmware.py diff --git a/plugins/inventory/vmware.ini b/plugins/inventory/vmware.ini new file mode 100644 index 00000000000..13b8384bf6d --- /dev/null +++ b/plugins/inventory/vmware.ini @@ -0,0 +1,15 @@ +# Ansible vmware external inventory script settings +# +[defaults] +guests_only = True +#vm_group = +#hw_group = + +[cache] +cache_max_age = 3600 +cache_dir = /var/tmp + +[auth] +host = vcenter.example.com +user = ihasaccess +password = ssshverysecret diff --git a/plugins/inventory/vmware.py b/plugins/inventory/vmware.py new file mode 100755 index 00000000000..6ed73865899 --- /dev/null +++ b/plugins/inventory/vmware.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +''' +VMWARE external inventory script +================================= + +shamelessly copied from existing inventory scripts. + +This script and it's ini can be used more than once, + +i.e vmware.py/vmware_colo.ini vmware_idf.py/vmware_idf.ini +(script can be link) + +so if you don't have clustered vcenter but multiple esx machines or +just diff clusters you can have a inventory per each and automatically +group hosts based on file name or specify a group in the ini. +''' + +import os +import sys +import time +import ConfigParser +from psphere.client import Client +from psphere.managedobjects import HostSystem + +try: + import json +except ImportError: + import simplejson as json + + +def save_cache(cache_item, data, config): + ''' saves item to cache ''' + dpath = config.get('defaults', 'cache_dir') + try: + cache = open('/'.join([dpath,cache_item]), 'w') + cache.write(json.dumps(data)) + cache.close() + except IOError, e: + pass # not really sure what to do here + + +def get_cache(cache_item, config): + ''' returns cached item ''' + dpath = config.get('defaults', 'cache_dir') + inv = {} + try: + cache = open('/'.join([dpath,cache_item]), 'r') + inv = json.loads(cache.read()) + cache.close() + except IOError, e: + pass # not really sure what to do here + + return inv + +def cache_available(cache_item, config): + ''' checks if we have a 'fresh' cache available for item requested ''' + + if config.has_option('defaults', 'cache_dir'): + dpath = config.get('defaults', 'cache_dir') + + try: + existing = os.stat( '/'.join([dpath,cache_item])) + except: + # cache doesn't exist or isn't accessible + return False + + if config.has_option('defaults', 'cache_max_age'): + maxage = config.get('defaults', 'cache_max_age') + + if (existing.st_mtime - int(time.time())) <= maxage: + return True + + return False + +def get_host_info(host): + ''' Get variables about a specific host ''' + + hostinfo = { + 'vmware_name' : host.name, + 'vmware_tag' : host.tag, + 'vmware_parent': host.parent.name, + } + for k in host.capability.__dict__.keys(): + if k.startswith('_'): + continue + try: + hostinfo['vmware_' + k] = str(host.capability[k]) + except: + continue + + return hostinfo + + +def get_inventory(client, config): + ''' Reads the inventory from cache or vmware api ''' + + if cache_available('inventory', config): + inv = get_cache('inventory',config) + else: + inv= { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } } + default_group = os.path.basename(sys.argv[0]).rstrip('.py') + + if config.has_option('defaults', 'guests_only'): + guests_only = config.get('defaults', 'guests_only') + else: + guests_only = True + + if not guests_only: + if config.has_option('defaults','hw_group'): + hw_group = config.get('defaults','hw_group') + else: + hw_group = default_group + '_hw' + inv[hw_group] = [] + + if config.has_option('defaults','vm_group'): + vm_group = config.get('defaults','vm_group') + else: + vm_group = default_group + '_vm' + inv[vm_group] = [] + + # Loop through physical hosts: + hosts = HostSystem.all(client) + for host in hosts: + if not guests_only: + inv['all']['hosts'].append(host.name) + inv[hw_group].append(host.name) + if host.tag: + taggroup = 'vmware_' + host.tag + if taggroup in inv: + inv[taggroup].append(host.name) + else: + inv[taggroup] = [ host.name ] + + inv['_meta']['hostvars'][host.name] = get_host_info(host) + save_cache(vm.name, inv['_meta']['hostvars'][host.name], config) + + for vm in host.vm: + inv['all']['hosts'].append(vm.name) + inv[vm_group].append(vm.name) + if vm.tag: + taggroup = 'vmware_' + vm.tag + if taggroup in inv: + inv[taggroup].append(vm.name) + else: + inv[taggroup] = [ vm.name ] + + inv['_meta']['hostvars'][vm.name] = get_host_info(host) + save_cache(vm.name, inv['_meta']['hostvars'][vm.name], config) + + save_cache('inventory', inv, config) + return json.dumps(inv) + +def get_single_host(client, config, hostname): + + inv = {} + + if cache_available(hostname, config): + inv = get_cache(hostname,config) + else: + hosts = HostSystem.all(client) #TODO: figure out single host getter + for host in hosts: + if hostname == host.name: + inv = get_host_info(host) + break + for vm in host.vm: + if hostname == vm.name: + inv = get_host_info(host) + break + save_cache(hostname,inv,config) + + return json.dumps(inv) + +if __name__ == '__main__': + inventory = {} + hostname = None + + if len(sys.argv) > 1: + if sys.argv[1] == "--host": + hostname = sys.argv[2] + + # Read config + config = ConfigParser.SafeConfigParser() + for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']: + if os.path.exists(configfilename): + config.read(configfilename) + break + + try: + client = Client( config.get('auth','host'), + config.get('auth','user'), + config.get('auth','password'), + ) + except Exception, e: + client = None + #print >> STDERR "Unable to login (only cache avilable): %s", str(e) + + # acitually do the work + if hostname is None: + inventory = get_inventory(client, config) + else: + inventory = get_single_host(client, config, hostname) + + # return to ansible + print inventory From 471b1a7a47c3fdf6efe22f1525dcc12aad1b85f2 Mon Sep 17 00:00:00 2001 From: "Christopher H. Laco" Date: Sun, 29 Dec 2013 19:45:35 -0500 Subject: [PATCH 066/772] Add rax_identity module For some tasks, I need to drop the username/api_key into configuration files. Rather than rely on how I'm calling the rax modules. It seemed more appropriate to authenticate against Rackspace and return the wealth of information contained in the pyrax identity payload with other modules/templates. --- library/cloud/rax_identity | 132 +++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 library/cloud/rax_identity diff --git a/library/cloud/rax_identity b/library/cloud/rax_identity new file mode 100644 index 00000000000..2890e40d160 --- /dev/null +++ b/library/cloud/rax_identity @@ -0,0 +1,132 @@ +#!/usr/bin/python -tt +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rax_identity +short_description: Load Rackspace Cloud Identity +description: + - Verifies Rackspace Cloud credentials and returns identity information +version_added: "1.5" +options: + api_key: + description: + - Rackspace API key (overrides C(credentials)) + credentials: + description: + - File to find the Rackspace credentials in (ignored if C(api_key) and + C(username) are provided) + default: null + aliases: ['creds_file'] + region: + description: + - Region to authenticate against + default: DFW + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + username: + description: + - Rackspace username (overrides C(credentials)) +requirements: [ "pyrax" ] +author: Christopher H. Laco, Matt Martz +notes: + - The following environment variables can be used, C(RAX_USERNAME), + C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). + - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file + appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) + - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file + - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +''' + +EXAMPLES = ''' +- name: Load Rackspace Cloud Identity + gather_facts: False + hosts: local + connection: local + tasks: + - name: Load Identity + local_action: + module: rax_identity + credentials: ~/.raxpub + region: DFW + register: rackspace_identity +''' + +import sys + +from types import NoneType + +try: + import pyrax +except ImportError: + print("failed=True msg='pyrax required for this module'") + sys.exit(1) + + +NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) + + +def cloud_identity(module, state, identity): + for arg in (state, identity): + if not arg: + module.fail_json(msg='%s is required for rax_identity' % arg) + + instance = dict( + authenticated=identity.authenticated, + credentials=identity._creds_file + ) + changed = False + + for key, value in vars(identity).iteritems(): + if (isinstance(value, NON_CALLABLES) and + not key.startswith('_')): + instance[key] = value + + if state == 'present': + if not identity.authenticated: + module.fail_json(msg='Credentials could not be verified!') + + module.exit_json(changed=changed, identity=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + state=dict(default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + state = module.params.get('state') + + setup_rax_module(module, pyrax) + + cloud_identity(module, state, pyrax.identity) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +### invoke the module +main() From 35742fe0089bd04536eab36a5bbf89926004a476 Mon Sep 17 00:00:00 2001 From: Jacob Weber Date: Tue, 11 Feb 2014 22:02:53 -0800 Subject: [PATCH 067/772] Add regex_replace jinja2 filter --- lib/ansible/runner/filter_plugins/core.py | 10 ++++++++++ test/TestFilters.py | 15 +++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 623e770dea8..89c73127658 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -127,6 +127,15 @@ def search(value, pattern='', ignorecase=False): ''' Perform a `re.search` returning a boolean ''' return regex(value, pattern, ignorecase, 'search') +def regex_replace(value='', pattern='', replacement='', ignorecase=False): + ''' Perform a `re.sub` returning a string ''' + if ignorecase: + flags = re.I + else: + flags = 0 + _re = re.compile(pattern, flags=flags) + return _re.sub(replacement, value) + def unique(a): return set(a) @@ -195,6 +204,7 @@ class FilterModule(object): 'match': match, 'search': search, 'regex': regex, + 'regex_replace': regex_replace, # list 'unique' : unique, diff --git a/test/TestFilters.py b/test/TestFilters.py index d850db4c3a3..e79d4c8970d 100644 --- a/test/TestFilters.py +++ b/test/TestFilters.py @@ -116,6 +116,21 @@ class TestFilters(unittest.TestCase): True) assert a == True + def test_regex_replace_case_sensitive(self): + a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^a.*i(.*)$', + 'a\\1') + assert a == 'able' + + def test_regex_replace_case_insensitive(self): + a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^A.*I(.*)$', + 'a\\1', True) + assert a == 'able' + + def test_regex_replace_no_match(self): + a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^b.*i(.*)$', + 'a\\1') + assert a == 'ansible' + #def test_filters(self): # this test is pretty low level using a playbook, hence I am disabling it for now -- MPD. From 811aa2611ede8e425f981a8c3a722dcbdb934bf3 Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Fri, 18 Oct 2013 12:35:40 +0200 Subject: [PATCH 068/772] Add support for string values The SET GLOBAL statement requires properly quoting of values. For example, the following correct queries will fail if quotes are toggled: mysql> SET GLOBAL innodb_lru_scan_depth = 2000; mysql> SET GLOBAL master_info_repository = "TABLE"; `mysql_variable` module doesn't quote the value argument, therefore string values will fail. # this task will pass, 2000 is passed without quotes - name: set a numeric value mysql_variable: variable=innodb_lru_scan_depth value=2000 # this task will fail, TABLE is passed without quotes - name: set a string value mysql_variable: variable=master_info_repository value=TABLE With this patch prepared statements are used. Proper quoting will be done automatically based on the type of the variables thus an attempt to convert to int, then to float is done in first place. Booleans values, ie: ON, OFF, are not specially handled because they can be quoted. For example, the following queries are correct and equivalent, they all set _innodb_file_per_table_ to logical _True_: mysql> SET GLOBAL innodb_file_per_table = "ON"; mysql> SET GLOBAL innodb_file_per_table = ON; mysql> SET GLOBAL innodb_file_per_table = 1; Tested in mysql 5.5 and 5.6. --- library/database/mysql_variables | 45 +++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/library/database/mysql_variables b/library/database/mysql_variables index 720478cc005..595e0bbb55d 100644 --- a/library/database/mysql_variables +++ b/library/database/mysql_variables @@ -76,14 +76,48 @@ else: mysqldb_found = True +def typedvalue(value): + """ + Convert value to number whenever possible, return same value + otherwise. + + >>> typedvalue('3') + 3 + >>> typedvalue('3.0') + 3.0 + >>> typedvalue('foobar') + 'foobar' + + """ + try: + return int(value) + except ValueError: + pass + + try: + return float(value) + except ValueError: + pass + + return value + + def getvariable(cursor, mysqlvar): cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'") mysqlvar_val = cursor.fetchall() return mysqlvar_val + def setvariable(cursor, mysqlvar, value): + """ Set a global mysql variable to a given value + + The DB driver will handle quoting of the given value based on its + type, thus numeric strings like '3.0' or '8' are illegal, they + should be passed as numeric literals. + + """ try: - cursor.execute("SET GLOBAL " + mysqlvar + "=" + value) + cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,)) cursor.fetchall() result = True except Exception, e: @@ -203,11 +237,14 @@ def main(): else: if len(mysqlvar_val) < 1: module.fail_json(msg="Variable not available", changed=False) - if value == mysqlvar_val[0][1]: + # Type values before using them + value_wanted = typedvalue(value) + value_actual = typedvalue(mysqlvar_val[0][1]) + if value_wanted == value_actual: module.exit_json(msg="Variable already set to requested value", changed=False) - result = setvariable(cursor, mysqlvar, value) + result = setvariable(cursor, mysqlvar, value_wanted) if result is True: - module.exit_json(msg="Variable change succeeded", changed=True) + module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True) else: module.fail_json(msg=result, changed=False) From 1602a80d765989caa2456811038009c532ca0433 Mon Sep 17 00:00:00 2001 From: Isao Jonas Date: Wed, 12 Feb 2014 08:53:29 -0600 Subject: [PATCH 069/772] added elasticache vpc security group ids --- library/cloud/elasticache | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/library/cloud/elasticache b/library/cloud/elasticache index a54deafc25d..9309f7a5b41 100644 --- a/library/cloud/elasticache +++ b/library/cloud/elasticache @@ -58,6 +58,11 @@ options: - The port number on which each of the cache nodes will accept connections required: false default: 11211 + security_group_ids: + description: + - A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc + required: false + default: ['default'] cache_security_groups: description: - A list of cache security group names to associate with this cache cluster @@ -152,7 +157,7 @@ class ElastiCacheManager(object): EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, cache_security_groups, zone, wait, + num_nodes, cache_port, cache_security_groups, security_group_ids, zone, wait, hard_modify, aws_access_key, aws_secret_key, region): self.module = module self.name = name @@ -162,6 +167,7 @@ class ElastiCacheManager(object): self.num_nodes = num_nodes self.cache_port = cache_port self.cache_security_groups = cache_security_groups + self.security_group_ids = security_group_ids self.zone = zone self.wait = wait self.hard_modify = hard_modify @@ -217,6 +223,7 @@ class ElastiCacheManager(object): engine=self.engine, engine_version=self.cache_engine_version, cache_security_group_names=self.cache_security_groups, + security_group_ids=self.security_group_ids, preferred_availability_zone=self.zone, port=self.cache_port) except boto.exception.BotoServerError, e: @@ -291,6 +298,7 @@ class ElastiCacheManager(object): num_cache_nodes=self.num_nodes, cache_node_ids_to_remove=nodes_to_remove, cache_security_group_names=self.cache_security_groups, + security_group_ids=self.security_group_ids, apply_immediately=True, engine_version=self.cache_engine_version) except boto.exception.BotoServerError, e: @@ -377,12 +385,20 @@ class ElastiCacheManager(object): if self.data[key] != value: return True - # Check security groups + # Check cache security groups cache_security_groups = [] for sg in self.data['CacheSecurityGroups']: cache_security_groups.append(sg['CacheSecurityGroupName']) if set(cache_security_groups) - set(self.cache_security_groups): return True + + # check vpc security groups + vpc_security_groups = [] + for sg in self.data['SecurityGroups']: + vpc_security_groups.append(sg['SecurityGroupId']) + if set(vpc_security_groups) - set(self.security_group_ids): + return True + return False def _requires_destroy_and_create(self): @@ -469,6 +485,8 @@ def main(): cache_port={'required': False, 'default': 11211, 'type': 'int'}, cache_security_groups={'required': False, 'default': ['default'], 'type': 'list'}, + security_group_ids={'required': False, 'default': [], + 'type': 'list'}, zone={'required': False, 'default': None}, ec2_secret_key={'default': None, 'aliases': ['aws_secret_key', 'secret_key'], @@ -493,6 +511,7 @@ def main(): num_nodes = module.params['num_nodes'] cache_port = module.params['cache_port'] cache_security_groups = module.params['cache_security_groups'] + security_group_ids = module.params['security_group_ids'] zone = module.params['zone'] wait = module.params['wait'] hard_modify = module.params['hard_modify'] @@ -506,7 +525,8 @@ def main(): elasticache_manager = ElastiCacheManager(module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, - cache_security_groups, zone, wait, + cache_security_groups, + security_group_ids, zone, wait, hard_modify, aws_access_key, aws_secret_key, region) From 44578e030d700f09d72dd85832e8a1db57ac304e Mon Sep 17 00:00:00 2001 From: Drew Date: Wed, 12 Feb 2014 11:31:11 -0600 Subject: [PATCH 070/772] Fix proper defaulting/required of 'state' for gem module The docs for 'gem' say state is required, but the actual code says it's not required. If it's not included (and there's no default) then the if block falls through with no changes and no errors (it neither adds nor removes the gem). This change synchronizes the docs with the code. It also adds the sane default of state=present, which is consistent with all other modules with the 'state' field. --- library/packaging/gem | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/library/packaging/gem b/library/packaging/gem index 8c46ceb5b3d..18a41660ecb 100644 --- a/library/packaging/gem +++ b/library/packaging/gem @@ -34,8 +34,9 @@ options: state: description: - The desired state of the gem. C(latest) ensures that the latest version is installed. - required: true + required: false choices: [present, absent, latest] + default: present gem_source: description: - The path to a local gem used as installation source. @@ -186,7 +187,7 @@ def main(): include_dependencies = dict(required=False, default=True, type='bool'), name = dict(required=True, type='str'), repository = dict(required=False, aliases=['source'], type='str'), - state = dict(required=False, choices=['present','absent','latest'], type='str'), + state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), version = dict(required=False, type='str'), ), From 7b7c6c38bcc2ad23d096007f0268247a444a6db8 Mon Sep 17 00:00:00 2001 From: Tomas Karasek Date: Thu, 13 Feb 2014 16:33:25 +0200 Subject: [PATCH 071/772] Added inventory plugin for hosts from ~/.ssh/config --- plugins/inventory/ssh_config.py | 110 ++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100755 plugins/inventory/ssh_config.py diff --git a/plugins/inventory/ssh_config.py b/plugins/inventory/ssh_config.py new file mode 100755 index 00000000000..24017e62282 --- /dev/null +++ b/plugins/inventory/ssh_config.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# (c) 2014, Tomas Karasek +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use aliases from ~/.ssh/config. +# +# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts +# with their alias, rather than with the IP or hostname. It takes advantage +# of the ansible_ssh_{host,port,user,private_key_file}. +# +# If you have in your .ssh/config: +# Host git +# HostName git.domain.org +# User tkarasek +# IdentityFile /home/tomk/keys/thekey +# +# You can do +# $ ansible git -m ping +# +# Example invocation: +# ssh_config.py --list +# ssh_config.py --host + +import argparse +import os.path +import sys + +import paramiko + +try: + import json +except ImportError: + import simplejson as json + +_key = 'ssh_config' + +_ssh_to_ansible = [('user', 'ansible_ssh_user'), + ('hostname', 'ansible_ssh_host'), + ('identityfile', 'ansible_ssh_private_key_file'), + ('port', 'ansible_ssh_port')] + + +def get_config(): + with open(os.path.expanduser('~/.ssh/config')) as f: + cfg = paramiko.SSHConfig() + cfg.parse(f) + ret_dict = {} + for d in cfg._config: + _copy = dict(d) + del _copy['host'] + ret_dict[d['host']] = _copy + return ret_dict + + +def print_list(): + cfg = get_config() + meta = {'hostvars': {}} + for alias, attributes in cfg.items(): + tmp_dict = {} + for ssh_opt, ans_opt in _ssh_to_ansible: + if ssh_opt in attributes: + tmp_dict[ans_opt] = attributes[ssh_opt] + if tmp_dict: + meta['hostvars'][alias] = tmp_dict + + print json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta}) + + +def print_host(host): + cfg = get_config() + print json.dumps(cfg[host]) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script parsing .ssh/config') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from .ssh/config inventory' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) From 6f44e7b039f53988f62511ebb64f252373b8f3bf Mon Sep 17 00:00:00 2001 From: Isao Jonas Date: Thu, 13 Feb 2014 10:14:13 -0600 Subject: [PATCH 072/772] add security groups to elb_lb --- library/cloud/ec2_elb_lb | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb index 5e4db144c87..941c564282d 100644 --- a/library/cloud/ec2_elb_lb +++ b/library/cloud/ec2_elb_lb @@ -51,6 +51,11 @@ options: - Purge existing availability zones on ELB that are not found in zones required: false default: false + security_group_ids: + description: + - A list of security groups to apply to the elb + require: false + default: None health_check: description: - An associative array of health check configuration settigs (see example) @@ -175,7 +180,7 @@ class ElbManager(object): """Handles ELB creation and destruction""" def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, health_check=None, + zones=None, purge_zones=None, security_group_ids=None, health_check=None, aws_access_key=None, aws_secret_key=None, region=None): self.module = module self.name = name @@ -183,6 +188,7 @@ class ElbManager(object): self.purge_listeners = purge_listeners self.zones = zones self.purge_zones = purge_zones + self.security_group_ids = security_group_ids self.health_check = health_check self.aws_access_key = aws_access_key @@ -201,6 +207,7 @@ class ElbManager(object): self._create_elb() else: self._set_zones() + self._set_security_groups() self._set_elb_listeners() self._set_health_check() @@ -220,6 +227,7 @@ class ElbManager(object): 'name': self.elb.name, 'dns_name': self.elb.dns_name, 'zones': self.elb.availability_zones, + 'security_group_ids': self.elb.security_groups, 'status': self.status } @@ -273,6 +281,7 @@ class ElbManager(object): listeners = [self._listener_as_tuple(l) for l in self.listeners] self.elb = self.elb_conn.create_load_balancer(name=self.name, zones=self.zones, + security_groups=self.security_group_ids, complex_listeners=listeners) if self.elb: self.changed = True @@ -397,6 +406,11 @@ class ElbManager(object): if zones_to_disable: self._disable_zones(zones_to_disable) + def _set_security_groups(self): + if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids): + self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) + self.Changed = True + def _set_health_check(self): """Set health check values on ELB as needed""" if self.health_check: @@ -449,6 +463,7 @@ def main(): zones={'default': None, 'required': False, 'type': 'list'}, purge_zones={'default': False, 'required': False, 'choices': BOOLEANS, 'type': 'bool'}, + security_group_ids={'default': None, 'required': False, 'type': 'list'}, health_check={'default': None, 'required': False, 'type': 'dict'}, ec2_secret_key={'default': None, 'aliases': ['aws_secret_key', 'secret_key'], @@ -471,6 +486,7 @@ def main(): purge_listeners = module.params['purge_listeners'] zones = module.params['zones'] purge_zones = module.params['purge_zones'] + security_group_ids = module.params['security_group_ids'] health_check = module.params['health_check'] if state == 'present' and not listeners: @@ -480,7 +496,7 @@ def main(): module.fail_json(msg="At least one availability zone is required for ELB creation") elb_man = ElbManager(module, name, listeners, purge_listeners, zones, - purge_zones, health_check, aws_access_key, + purge_zones, security_group_ids, health_check, aws_access_key, aws_secret_key, region=region) if state == 'present': From 30cabddf76e740f136463628eb8684289a1d9a35 Mon Sep 17 00:00:00 2001 From: Stefhen Hovland Date: Fri, 14 Feb 2014 00:11:21 -0500 Subject: [PATCH 073/772] Ensure option output is sorted. --- bin/ansible-doc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index 7e9a2eb81f5..96703d9bc1f 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -98,7 +98,7 @@ def get_man_text(doc): if 'option_keys' in doc and len(doc['option_keys']) > 0: text.append("Options (= is mandatory):\n") - for o in doc['option_keys']: + for o in sorted(doc['option_keys']): opt = doc['options'][o] if opt.get('required', False): @@ -146,7 +146,7 @@ def get_snippet_text(doc): text.append("- name: %s" % (desc)) text.append(" action: %s" % (doc['module'])) - for o in doc['options']: + for o in sorted(doc['options'].keys()): opt = doc['options'][o] desc = tty_ify("".join(opt['description'])) s = o + "=" From e1b7278265f11b8c77ceaf03a3dc5dee512c4325 Mon Sep 17 00:00:00 2001 From: Stefhen Hovland Date: Fri, 14 Feb 2014 00:12:29 -0500 Subject: [PATCH 074/772] Only display equals sign in summary for required options. --- bin/ansible-doc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bin/ansible-doc b/bin/ansible-doc index 96703d9bc1f..a77fff81302 100755 --- a/bin/ansible-doc +++ b/bin/ansible-doc @@ -149,7 +149,12 @@ def get_snippet_text(doc): for o in sorted(doc['options'].keys()): opt = doc['options'][o] desc = tty_ify("".join(opt['description'])) - s = o + "=" + + if opt.get('required', False): + s = o + "=" + else: + s = o + text.append(" %-20s # %s" % (s, desc)) text.append('') From c427a829a07dbe1390a9ec1d81c8ae7534ed8455 Mon Sep 17 00:00:00 2001 From: Peter Gehres Date: Thu, 13 Feb 2014 22:47:14 -0800 Subject: [PATCH 075/772] Adding better failure output for chkconfig failures rather than just saying that the service name is unknown --- library/system/service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/service b/library/system/service index 2e26a47b636..732d868c786 100644 --- a/library/system/service +++ b/library/system/service @@ -577,7 +577,7 @@ class LinuxService(Service): self.execute_command("%s --add %s" % (self.enable_cmd, self.name)) (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) if not self.name in out: - self.module.fail_json(msg="unknown service name") + self.module.fail_json(msg="service %s does not support chkconfig" % self.name) state = out.split()[-1] if self.enable and ( "3:on" in out and "5:on" in out ): return From 899811f339ee585997ab734793651264434f0b74 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Fri, 14 Feb 2014 20:16:58 +0000 Subject: [PATCH 076/772] Added wait_timeout parameter --- library/cloud/ec2_elb | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index c6f4a72b0e1..fe5260bb16b 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -82,6 +82,12 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + wait_timeout: + description: + - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. + required: false + default: 0 + version_added: "1.5" """ @@ -133,7 +139,7 @@ class ElbManager: self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False - def deregister(self, wait): + def deregister(self, wait, timeout): """De-register the instance from all ELBs and wait for the ELB to report it out-of-service""" @@ -146,13 +152,13 @@ class ElbManager: return if wait: - self._await_elb_instance_state(lb, 'OutOfService', initial_state) + self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) else: # We cannot assume no change was made if we don't wait # to find out self.changed = True - def register(self, wait, enable_availability_zone): + def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: @@ -165,7 +171,7 @@ class ElbManager: lb.register_instances([self.instance_id]) if wait: - self._await_elb_instance_state(lb, 'InService', initial_state) + self._await_elb_instance_state(lb, 'InService', initial_state, timeout) else: # We cannot assume no change was made if we don't wait # to find out @@ -195,10 +201,12 @@ class ElbManager: # lb.availability_zones return instance.placement in lb.availability_zones - def _await_elb_instance_state(self, lb, awaited_state, initial_state): + def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): """Wait for an ELB to change state lb: load balancer awaited_state : state to poll for (string)""" + + wait_timeout = time.time() + timeout while True: instance_state = self._get_instance_health(lb) @@ -217,7 +225,8 @@ class ElbManager: # If it's pending, we'll skip further checks andd continue waiting pass elif (awaited_state == 'InService' - and instance_state.reason_code == "Instance"): + and instance_state.reason_code == "Instance" + and time.time() >= wait_timeout): # If the reason_code for the instance being out of service is # "Instance" this indicates a failure state, e.g. the instance # has failed a health check or the ELB does not have the @@ -303,7 +312,8 @@ def main(): ec2_access_key={'default': None, 'aliases': ['aws_access_key', 'access_key']}, region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS}, enable_availability_zone={'default': True, 'required': False, 'choices': BOOLEANS, 'type': 'bool'}, - wait={'required': False, 'choices': BOOLEANS, 'default': True, 'type': 'bool'} + wait={'required': False, 'choices': BOOLEANS, 'default': True, 'type': 'bool'}, + wait_timeout={'requred': False, 'default': 0, 'type': 'int'} ) ) @@ -315,6 +325,7 @@ def main(): region = module.params['region'] wait = module.params['wait'] enable_availability_zone = module.params['enable_availability_zone'] + timeout = module.params['wait_timeout'] if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: module.fail_json(msg="ELBs are required for registration") @@ -330,9 +341,9 @@ def main(): module.fail_json(msg=msg) if module.params['state'] == 'present': - elb_man.register(wait, enable_availability_zone) + elb_man.register(wait, enable_availability_zone, timeout) elif module.params['state'] == 'absent': - elb_man.deregister(wait) + elb_man.deregister(wait, timeout) ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) From a5434a987a4d92e7355c8d0ee267d2a5738c5896 Mon Sep 17 00:00:00 2001 From: cgtx Date: Tue, 18 Feb 2014 08:54:13 -0600 Subject: [PATCH 077/772] use correct permission mode when installing --- packaging/arch/PKGBUILD | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packaging/arch/PKGBUILD b/packaging/arch/PKGBUILD index 6f5c587ea94..916ecb6cba1 100644 --- a/packaging/arch/PKGBUILD +++ b/packaging/arch/PKGBUILD @@ -32,19 +32,21 @@ build() { package() { cd $pkgname - mkdir -p "$pkgdir/usr/share/ansible" + install -dm755 $pkgdir/usr/share/ansible cp -dpr --no-preserve=ownership ./library/* "$pkgdir/usr/share/ansible/" cp -dpr --no-preserve=ownership ./examples "$pkgdir/usr/share/ansible" python2 setup.py install -O1 --root="$pkgdir" - install -D examples/ansible.cfg "$pkgdir/etc/ansible/ansible.cfg" + install -Dm644 examples/ansible.cfg $pkgdir/etc/ansible/ansible.cfg + install -dm755 $pkgdir/etc/ansible/group_vars + install -dm755 $pkgdir/etc/ansible/host_vars - install -D README.md "$pkgdir/usr/share/doc/ansible/README.md" - install -D COPYING "$pkgdir/usr/share/doc/ansible/COPYING" - install -D CHANGELOG.md "$pkgdir/usr/share/doc/ansible/CHANGELOG.md" + install -Dm644 README.md $pkgdir/usr/share/doc/ansible/README.md + install -Dm644 COPYING $pkgdir/usr/share/doc/ansible/COPYING + install -Dm644 CHANGELOG.md $pkgdir/usr/share/doc/ansible/CHANGELOG.md - mkdir -p "$pkgdir/usr/share/man/man{1,3}" + install -dm755 ${pkgdir}/usr/share/man/man{1,3} cp -dpr --no-preserve=ownership docs/man/man1/*.1 "$pkgdir/usr/share/man/man1" cp -dpr --no-preserve=ownership docs/man/man3/*.3 "$pkgdir/usr/share/man/man3" } From 5776c95497b7a0795ca26fc19188a186a43b6a2d Mon Sep 17 00:00:00 2001 From: "C. Morgan Hamill" Date: Tue, 18 Feb 2014 11:11:18 -0500 Subject: [PATCH 078/772] Add 'vgoptions' parameter to lvg module. Allows specifying extra flags to pass to `vgcreate`. --- library/system/lvg | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/library/system/lvg b/library/system/lvg index 545ab1df5db..465b8610232 100644 --- a/library/system/lvg +++ b/library/system/lvg @@ -41,6 +41,12 @@ options: - The size of the physical extent in megabytes. Must be a power of 2. default: 4 required: false + vgoptions: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + default: null + required: false + version_added: "1.5" state: choices: [ "present", "absent" ] default: present @@ -99,6 +105,7 @@ def main(): vg=dict(required=True), pvs=dict(type='list'), pesize=dict(type='int', default=4), + vgoptions=dict(), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), ), @@ -109,6 +116,7 @@ def main(): state = module.params['state'] force = module.boolean(module.params['force']) pesize = module.params['pesize'] + vgoptions = module.params.get('vgoptions', '').split() if module.params['pvs']: dev_string = ' '.join(module.params['pvs']) @@ -168,7 +176,7 @@ def main(): else: module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err) vgcreate_cmd = module.get_bin_path('vgcreate') - rc,_,err = module.run_command("%s -s %s %s %s"%(vgcreate_cmd, pesize, vg, dev_string)) + rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg, dev_string]) if rc == 0: changed = True else: From f1aee1cfaf006071d1a48951b59d248d233f85e3 Mon Sep 17 00:00:00 2001 From: cgtx Date: Tue, 18 Feb 2014 15:56:11 -0600 Subject: [PATCH 079/772] remove optional vars folders --- packaging/arch/PKGBUILD | 2 -- 1 file changed, 2 deletions(-) diff --git a/packaging/arch/PKGBUILD b/packaging/arch/PKGBUILD index 916ecb6cba1..0df907ed28d 100644 --- a/packaging/arch/PKGBUILD +++ b/packaging/arch/PKGBUILD @@ -39,8 +39,6 @@ package() { python2 setup.py install -O1 --root="$pkgdir" install -Dm644 examples/ansible.cfg $pkgdir/etc/ansible/ansible.cfg - install -dm755 $pkgdir/etc/ansible/group_vars - install -dm755 $pkgdir/etc/ansible/host_vars install -Dm644 README.md $pkgdir/usr/share/doc/ansible/README.md install -Dm644 COPYING $pkgdir/usr/share/doc/ansible/COPYING From 01e1991baaadbf7f130949d8334aa3d701b8cf81 Mon Sep 17 00:00:00 2001 From: Jan-Piet Mens Date: Wed, 19 Feb 2014 14:53:45 +0100 Subject: [PATCH 080/772] module update: mqtt notification now uses Paho as mosquitto.py being deprecated --- library/notification/mqtt | 70 ++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 42 deletions(-) diff --git a/library/notification/mqtt b/library/notification/mqtt index d00307018dc..d701bd9348a 100644 --- a/library/notification/mqtt +++ b/library/notification/mqtt @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2013, Jan-Piet Mens +# (c) 2013, 2014, Jan-Piet Mens # # This file is part of Ansible # @@ -80,7 +80,7 @@ options: requirements: [ mosquitto ] notes: - This module requires a connection to an MQTT broker such as Mosquitto - U(http://mosquitto.org) and the C(mosquitto) Python module (U(http://mosquitto.org/python)). + U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)). author: Jan-Piet Mens ''' @@ -97,34 +97,12 @@ EXAMPLES = ''' # MQTT module support methods. # -HAS_MOSQUITTO = True +HAS_PAHOMQTT = True try: import socket - import mosquitto + import paho.mqtt.publish as mqtt except ImportError: - HAS_MOSQUITTO = False -import os - -def publish(module, topic, payload, server='localhost', port='1883', qos='0', - client_id='', retain=False, username=None, password=None): - '''Open connection to MQTT broker and publish the topic''' - - mqttc = mosquitto.Mosquitto(client_id, clean_session=True) - - if username is not None and password is not None: - mqttc.username_pw_set(username, password) - - rc = mqttc.connect(server, int(port), 5) - if rc != 0: - module.fail_json(msg="unable to connect to MQTT broker") - - mqttc.publish(topic, payload, int(qos), retain) - rc = mqttc.loop() - if rc != 0: - module.fail_json(msg="unable to send to MQTT broker") - - mqttc.disconnect() - + HAS_PAHOMQTT = False # =========================================== # Main @@ -132,10 +110,6 @@ def publish(module, topic, payload, server='localhost', port='1883', qos='0', def main(): - if not HAS_MOSQUITTO: - module.fail_json(msg="mosquitto is not installed") - - module = AnsibleModule( argument_spec=dict( server = dict(default = 'localhost'), @@ -151,15 +125,18 @@ def main(): supports_check_mode=True ) - server = module.params["server"] - port = module.params["port"] - topic = module.params["topic"] - payload = module.params["payload"] - client_id = module.params["client_id"] - qos = module.params["qos"] - retain = module.params["retain"] - username = module.params["username"] - password = module.params["password"] + if not HAS_PAHOMQTT: + module.fail_json(msg="Paho MQTT is not installed") + + server = module.params.get("server", 'localhost') + port = module.params.get("port", 1883) + topic = module.params.get("topic") + payload = module.params.get("payload") + client_id = module.params.get("client_id", '') + qos = int(module.params.get("qos", 0)) + retain = module.params.get("retain") + username = module.params.get("username", None) + password = module.params.get("password", None) if client_id is None: client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) @@ -167,9 +144,18 @@ def main(): if payload and payload == 'None': payload = None + auth=None + if username is not None: + auth = { 'username' : username, 'password' : password } + try: - publish(module, topic, payload, server, port, qos, client_id, retain, - username, password) + rc = mqtt.single(topic, payload, + qos=qos, + retain=retain, + client_id=client_id, + hostname=server, + port=port, + auth=auth) except Exception, e: module.fail_json(msg="unable to publish to MQTT broker %s" % (e)) From f7c3847391fbe93a6bad32a9a971ac18cdf56a51 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 14:48:59 -0500 Subject: [PATCH 081/772] Add module homebrew_cask --- library/packaging/homebrew_cask | 511 ++++++++++++++++++++++++++++++++ 1 file changed, 511 insertions(+) create mode 100644 library/packaging/homebrew_cask diff --git a/library/packaging/homebrew_cask b/library/packaging/homebrew_cask new file mode 100644 index 00000000000..20241f2e5cd --- /dev/null +++ b/library/packaging/homebrew_cask @@ -0,0 +1,511 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Daniel Jaouen +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: homebrew_cask +author: Daniel Jaouen +short_description: Install/uninstall homebrew casks. +description: + - Manages Homebrew casks. +version_added: "1.5" +options: + name: + description: + - name of cask to install/remove + required: true + state: + description: + - state of the cask + choices: [ 'installed', 'uninstalled' ] + required: false + default: present +''' +EXAMPLES = ''' +- homebrew_cask: name=alfred state=present +- homebrew_cask: name=alfred state=absent +''' + +import os.path +import re + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewCaskException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class HomebrewCask(object): + '''A class to manage Homebrew casks.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + - # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + - # dashes + '''.format(sep=os.path.sep) + + VALID_CASK_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + - # dashes + ''' + + INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) + INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, basestring): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True + + return ( + isinstance(brew_path, basestring) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) + + @classmethod + def valid_cask(cls, cask): + '''A valid cask is either None or alphanumeric + backslashes.''' + + if cask is None: + return True + + return ( + isinstance(cask, basestring) + and not cls.INVALID_CASK_REGEX.search(cask) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - installed + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, basestring) + and state.lower() in ( + 'installed', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewCaskException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewCaskException(self.message) + + else: + if isinstance(path, basestring): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewCaskException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_cask(self): + return self._current_cask + + @current_cask.setter + def current_cask(self, cask): + if not self.valid_cask(cask): + self._current_cask = None + self.failed = True + self.message = 'Invalid cask: {0}.'.format(cask) + raise HomebrewCaskException(self.message) + + else: + self._current_cask = cask + return cask + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path=None, casks=None, state=None): + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, casks=casks, + state=state) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in kwargs.iteritems(): + setattr(self, key, val) + + def _prep(self): + self._prep_path() + self._prep_brew_path() + + def _prep_path(self): + if not self.path: + self.path = ['/usr/local/bin'] + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewCaskException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewCaskException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewCaskException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_cask_is_installed(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + cmd = [self.brew_path, 'cask', 'list'] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()] + return self.current_cask in casks + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.state == 'installed': + return self._install_casks() + elif self.state == 'absent': + return self._uninstall_casks() + + if self.command: + return self._command() + + # updated -------------------------------- {{{ + def _update_homebrew(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, basestring): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /updated ------------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be installed: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + cmd = [opt + for opt in (self.brew_path, 'cask', 'install', self.current_cask) + if opt] + + rc, out, err = self.module.run_command(cmd) + + if self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask installed: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _install_casks(self): + for cask in self.casks: + self.current_cask = cask + self._install_current_cask() + + return True + # /installed ----------------------------- }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already uninstalled: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be uninstalled: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + cmd = [opt + for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask) + if opt] + + rc, out, err = self.module.run_command(cmd) + + if not self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask uninstalled: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _uninstall_casks(self): + for cask in self.casks: + self.current_cask = cask + self._uninstall_current_cask() + + return True + # /uninstalled ----------------------------- }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=["cask"], required=False), + path=dict(required=False), + state=dict( + default="present", + choices=[ + "present", "installed", + "absent", "removed", "uninstalled", + ], + ), + ), + supports_check_mode=True, + ) + p = module.params + + if p['name']: + casks = p['name'].split(',') + else: + casks = None + + path = p['path'] + if path: + path = path.split(':') + else: + path = ['/usr/local/bin'] + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + brew_cask = HomebrewCask(module=module, path=path, casks=casks, + state=state) + (failed, changed, message) = brew_cask.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + +# this is magic, see lib/ansible/module_common.py +#<> +main() From 8a612ba5d42444bc02a3c8b05cdc4bd77aa0d201 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 18:49:25 -0500 Subject: [PATCH 082/772] Handle homebrew_cask "nothing to list" corner case. --- library/packaging/homebrew_cask | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/library/packaging/homebrew_cask b/library/packaging/homebrew_cask index 20241f2e5cd..96fd5cfef1c 100644 --- a/library/packaging/homebrew_cask +++ b/library/packaging/homebrew_cask @@ -327,7 +327,9 @@ class HomebrewCask(object): cmd = [self.brew_path, 'cask', 'list'] rc, out, err = self.module.run_command(cmd) - if rc == 0: + if 'nothing to list' in out: + return True + elif rc == 0: casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()] return self.current_cask in casks else: From 55f636675c8dbc59e9ffe3d2c2134ce2f5cfe43b Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 18:57:36 -0500 Subject: [PATCH 083/772] homebrew_cask: return False instead of True when nothing to list. --- library/packaging/homebrew_cask | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/homebrew_cask b/library/packaging/homebrew_cask index 96fd5cfef1c..ec9c47ecfb9 100644 --- a/library/packaging/homebrew_cask +++ b/library/packaging/homebrew_cask @@ -328,7 +328,7 @@ class HomebrewCask(object): rc, out, err = self.module.run_command(cmd) if 'nothing to list' in out: - return True + return False elif rc == 0: casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()] return self.current_cask in casks From 911a3930a62adf4ae83e692c90dd306921c9336b Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 18:59:33 -0500 Subject: [PATCH 084/772] homebrew_cask: check err instead of out for "nothing to list". --- library/packaging/homebrew_cask | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/homebrew_cask b/library/packaging/homebrew_cask index ec9c47ecfb9..9954da47a26 100644 --- a/library/packaging/homebrew_cask +++ b/library/packaging/homebrew_cask @@ -327,7 +327,7 @@ class HomebrewCask(object): cmd = [self.brew_path, 'cask', 'list'] rc, out, err = self.module.run_command(cmd) - if 'nothing to list' in out: + if 'nothing to list' in err: return False elif rc == 0: casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()] From 02694b80de74c5d0c4558ffa432270e66292481c Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 14:46:44 -0500 Subject: [PATCH 085/772] Update homebrew module. --- library/packaging/homebrew | 794 ++++++++++++++++++++++++++++++++----- 1 file changed, 700 insertions(+), 94 deletions(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index ab1362acf1d..feea2dc8e03 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -2,6 +2,8 @@ # -*- coding: utf-8 -*- # (c) 2013, Andrew Dunham +# (c) 2013, Daniel Jaouen +# # Based on macports (Jimmy Tang ) # # This module is free software: you can redistribute it and/or modify @@ -24,7 +26,7 @@ author: Andrew Dunham short_description: Package manager for Homebrew description: - Manages Homebrew packages -version_added: "1.4" +version_added: "1.1" options: name: description: @@ -33,7 +35,7 @@ options: state: description: - state of the package - choices: [ 'present', 'absent' ] + choices: [ 'head', 'latest', 'installed', 'linked', 'uninstalled' ] required: false default: present update_homebrew: @@ -42,135 +44,739 @@ options: required: false default: "no" choices: [ "yes", "no" ] - install_options: - description: - - options flags to install a package - required: false - default: null notes: [] ''' EXAMPLES = ''' - homebrew: name=foo state=present - homebrew: name=foo state=present update_homebrew=yes +- homebrew: name=foo state=latest update_homebrew=yes +- homebrew: update_homebrew=yes upgrade=yes +- homebrew: name=foo state=head +- homebrew: name=foo state=linked - homebrew: name=foo state=absent - homebrew: name=foo,bar state=absent -- homebrew: name=foo state=present install_options=with-baz,enable-debug ''' +import os.path +import re + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class Homebrew(object): + '''A class to manage Homebrew packages.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + - # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + - # dashes + '''.format(sep=os.path.sep) + + VALID_PACKAGE_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + - # dashes + ''' + + INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, basestring): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True -def update_homebrew(module, brew_path): - """ Updates packages list. """ - - rc, out, err = module.run_command("%s update" % brew_path) + return ( + isinstance(brew_path, basestring) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) - if rc != 0: - module.fail_json(msg="could not update homebrew") + @classmethod + def valid_package(cls, package): + '''A valid package is either None or alphanumeric.''' + if package is None: + return True -def query_package(module, brew_path, name, state="present"): - """ Returns whether a package is installed or not. """ + return ( + isinstance(package, basestring) + and not cls.INVALID_PACKAGE_REGEX.search(package) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - None + - installed + - upgraded + - head + - linked + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, basestring) + and state.lower() in ( + 'installed', + 'upgraded', + 'head', + 'linked', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewException(self.message) + + else: + if isinstance(path, basestring): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_package(self): + return self._current_package + + @current_package.setter + def current_package(self, package): + if not self.valid_package(package): + self._current_package = None + self.failed = True + self.message = 'Invalid package: {0}.'.format(package) + raise HomebrewException(self.message) + + else: + self._current_package = package + return package + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path=None, packages=None, state=None, + update_homebrew=False, ): + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, ) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in kwargs.iteritems(): + setattr(self, key, val) + + def _prep(self): + self._prep_path() + self._prep_brew_path() + + def _prep_path(self): + if not self.path: + self.path = ['/usr/local/bin'] + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_package_is_installed(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + rc, out, err = self.module.run_command( + "{brew_path} list -m1 | grep -q '^{package}$'".format( + brew_path=self.brew_path, + package=self.current_package, + ) + ) - if state == "present": - rc, out, err = module.run_command("%s list %s" % (brew_path, name)) if rc == 0: return True + else: + return False + + def _outdated_packages(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'outdated', + ]) + return [line.split(' ')[0].strip() for line in out.split('\n') if line] + + def _current_package_is_outdated(self): + if not self.valid_package(self.current_package): + return False + + return self.current_package in self._outdated_packages() + + def _current_package_is_installed_from_head(self): + if not Homebrew.valid_package(self.current_package): + return False + elif not self._current_package_is_installed(): + return False + + rc, out, err = self.module.run_command([ + self.brew_path, + 'info', + self.current_package, + ]) + + try: + version_info = [line for line in out.split('\n') if line][0] + except IndexError: + return False + + return version_info.split(' ')[-1] == 'HEAD' + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.update_homebrew: + self._update_homebrew() + + if self.packages: + if self.state == 'installed': + return self._install_packages() + elif self.state == 'upgraded': + return self._upgrade_packages() + elif self.state == 'head': + return self._install_packages() + # elif self.state == 'linked': + # return self._linked() + elif self.state == 'absent': + return self._uninstall_packages() + + # updated -------------------------------- {{{ + def _update_homebrew(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, basestring): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' - return False - - -def remove_packages(module, brew_path, packages): - """ Uninstalls one or more packages if installed. """ - - removed_count = 0 - - # Using a for loop incase of error, we can report the package that failed - for package in packages: - # Query the package first, to see if we even need to remove. - if not query_package(module, brew_path, package): - continue - - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command([brew_path, 'remove', package]) - - if query_package(module, brew_path, package): - module.fail_json(msg="failed to remove %s: %s" % (package, out.strip())) - - removed_count += 1 - - if removed_count > 0: - module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) - - module.exit_json(changed=False, msg="package(s) already absent") + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /updated ------------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self._current_package_is_installed(): + self.unchanged_count += 1 + self.message = 'Package already installed: {0}'.format( + self.current_package, + ) + return True + if self.module.check_mode: + self.changed = True + self.message = 'Package would be installed: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) -def install_packages(module, brew_path, packages, options): - """ Installs one or more packages if not already installed. """ + if self.state == 'head': + head = '--HEAD' + else: + head = None - installed_count = 0 + cmd = [opt + for opt in (self.brew_path, 'install', self.current_package, head) + if opt] - for package in packages: - if query_package(module, brew_path, package): - continue + rc, out, err = self.module.run_command(cmd) - if module.check_mode: - module.exit_json(changed=True) + if self._current_package_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Package installed: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _install_packages(self): + for package in self.packages: + self.current_package = package + self._install_current_package() + + return True + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_current_package(self): + command = 'upgrade' + + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + command = 'install' + + if self._current_package_is_installed() and not self._current_package_is_outdated(): + self.message = 'Package is already upgraded: {0}'.format( + self.current_package, + ) + self.unchanged_count += 1 + return True - cmd = [brew_path, 'install', package] - if options: - cmd.extend(options) - rc, out, err = module.run_command(cmd) + if self.module.check_mode: + self.changed = True + self.message = 'Package would be upgraded: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + rc, out, err = self.module.run_command([ + self.brew_path, + command, + self.current_package, + ]) + + if not self._current_package_is_outdated(): + self.changed_count += 1 + self.changed = True + self.message = 'Package upgraded: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_all_packages(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'upgrade', + ]) + if rc == 0: + self.changed = True + self.message = 'All packages upgraded.' + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_packages(self): + if not self.packages: + self._upgrade_all_packages() + else: + for package in self.packages: + self.current_package = package + self._upgrade_current_package() + return True + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.unchanged_count += 1 + self.message = 'Package already uninstalled: {0}'.format( + self.current_package, + ) + return True - if not query_package(module, brew_path, package): - module.fail_json(msg="failed to install %s: '%s' %s" % (package, cmd, out.strip())) + if self.module.check_mode: + self.changed = True + self.message = 'Package would be uninstalled: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) - installed_count += 1 + cmd = [opt + for opt in (self.brew_path, 'uninstall', self.current_package) + if opt] - if installed_count > 0: - module.exit_json(changed=True, msg="installed %d package(s)" % (installed_count,)) + rc, out, err = self.module.run_command(cmd) - module.exit_json(changed=False, msg="package(s) already present") + if not self._current_package_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Package uninstalled: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) -def generate_options_string(install_options): - if install_options is None: - return None + def _uninstall_packages(self): + for package in self.packages: + self.current_package = package + self._uninstall_current_package() - options = [] + return True + # /uninstalled ----------------------------- }}} + # /commands ---------------------------------------------------- }}} - for option in install_options: - options.append('--%s' % option) - return options +# def link_package(module, brew_path, package): +# """ Links a single homebrew package. """ +# +# failed, changed, msg = False, False, '' +# +# if not a_valid_package(package): +# failed = True +# msg = 'invalid package' +# +# elif not query_package(module, brew_path, package): +# failed = True +# msg = 'not installed' +# +# else: +# if module.check_mode: +# module.exit_json(changed=True) +# +# rc, out, err = module.run_command([ +# brew_path, +# 'link', +# package, +# ]) +# +# if rc: +# failed = True +# msg = out.strip() +# else: +# if err.strip().lower().find('already linked') != -1: +# msg = 'already linked' +# else: +# changed = True +# msg = 'linked' +# +# return (failed, changed, msg) +# +# +# def link_packages(module, brew_path, packages): +# """ Upgrades one or more packages. """ +# +# failed, linked, unchanged, msg = False, 0, 0, '' +# +# for package in packages: +# failed, changed, msg = link_package(module, brew_path, package) +# if failed: +# break +# if changed: +# linked += 1 +# else: +# unchanged += 1 +# +# if failed: +# msg = 'installed: %d, unchanged: %d, error: ' + msg +# msg = msg % (linked, unchanged) +# elif linked: +# changed = True +# msg = 'linked: %d, unchanged: %d' % (linked, unchanged) +# else: +# msg = 'linked: %d, unchanged: %d' % (linked, unchanged) +# +# return (failed, changed, msg) +# +# +# def unlink_package(module, brew_path, package): +# """ Unlinks a single homebrew package. """ +# +# failed, changed, msg = False, False, '' +# +# if not a_valid_package(package): +# failed = True +# msg = 'invalid package' +# +# elif not query_package(module, brew_path, package): +# failed = True +# msg = 'not installed' +# +# else: +# if module.check_mode: +# module.exit_json(changed=True) +# +# rc, out, err = module.run_command([ +# brew_path, +# 'unlink', +# package, +# ]) +# +# if rc: +# failed = True +# msg = out.strip() +# else: +# if out.find('0 links') != -1: +# msg = 'already unlinked' +# else: +# changed = True +# msg = 'linked' +# +# return (failed, changed, msg) +# +# +# def unlink_packages(module, brew_path, packages): +# """ Unlinks one or more packages. """ +# +# failed, unlinked, unchanged, msg = False, 0, 0, '' +# +# for package in packages: +# failed, changed, msg = unlink_package(module, brew_path, package) +# if failed: +# break +# if changed: +# unlinked += 1 +# else: +# unchanged += 1 +# +# if failed: +# msg = 'installed: %d, unchanged: %d, error: ' + msg +# msg = msg % (unlinked, unchanged) +# elif unlinked: +# changed = True +# msg = 'unlinked: %d, unchanged: %d' % (unlinked, unchanged) +# else: +# msg = 'unlinked: %d, unchanged: %d' % (unlinked, unchanged) +# +# return (failed, changed, msg) def main(): module = AnsibleModule( - argument_spec = dict( - name = dict(aliases=["pkg"], required=True), - state = dict(default="present", choices=["present", "installed", "absent", "removed"]), - update_homebrew = dict(default="no", aliases=["update-brew"], type='bool'), - install_options = dict(default=None, aliases=["options"], type='list') + argument_spec=dict( + name=dict(aliases=["pkg"], required=False), + path=dict(required=False), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", "head", + "linked", "unlinked", + "absent", "removed", "uninstalled", + ], + ), + update_homebrew=dict( + default="no", + aliases=["update-brew"], + type='bool', + ), ), - supports_check_mode=True + supports_check_mode=True, ) - - brew_path = module.get_bin_path('brew', True, ['/usr/local/bin']) - p = module.params - if p["update_homebrew"]: - update_homebrew(module, brew_path) - - pkgs = p["name"].split(",") - - if p["state"] in ["present", "installed"]: - opt = generate_options_string(p["install_options"]) - install_packages(module, brew_path, pkgs, opt) - - elif p["state"] in ["absent", "removed"]: - remove_packages(module, brew_path, pkgs) - -# import module snippets -from ansible.module_utils.basic import * - + if p['name']: + packages = p['name'].split(',') + else: + packages = None + + path = p['path'] + if path: + path = path.split(':') + else: + path = ['/usr/local/bin'] + + state = p['state'] + if state in ('present', 'installed', 'head'): + state = 'installed' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + update_homebrew = p['update_homebrew'] + + brew = Homebrew(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew) + (failed, changed, message) = brew.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + +# this is magic, see lib/ansible/module_common.py +#<> main() From 91ac44a5bbb7da83b8aacd9f1a5d7726b7fc10b0 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 15:03:47 -0500 Subject: [PATCH 086/772] Fix linked/unlinked states. --- library/packaging/homebrew | 206 +++++++++++++++---------------------- 1 file changed, 81 insertions(+), 125 deletions(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index feea2dc8e03..dfb82fa8c8a 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -171,6 +171,7 @@ class Homebrew(object): - upgraded - head - linked + - unlinked - absent ''' @@ -184,6 +185,7 @@ class Homebrew(object): 'upgraded', 'head', 'linked', + 'unlinked', 'absent', ) ) @@ -406,8 +408,10 @@ class Homebrew(object): return self._upgrade_packages() elif self.state == 'head': return self._install_packages() - # elif self.state == 'linked': - # return self._linked() + elif self.state == 'linked': + return self._link_packages() + elif self.state == 'unlinked': + return self._unlink_packages() elif self.state == 'absent': return self._uninstall_packages() @@ -597,131 +601,79 @@ class Homebrew(object): return True # /uninstalled ----------------------------- }}} - # /commands ---------------------------------------------------- }}} + # linked --------------------------------- {{{ + def _link_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) -# def link_package(module, brew_path, package): -# """ Links a single homebrew package. """ -# -# failed, changed, msg = False, False, '' -# -# if not a_valid_package(package): -# failed = True -# msg = 'invalid package' -# -# elif not query_package(module, brew_path, package): -# failed = True -# msg = 'not installed' -# -# else: -# if module.check_mode: -# module.exit_json(changed=True) -# -# rc, out, err = module.run_command([ -# brew_path, -# 'link', -# package, -# ]) -# -# if rc: -# failed = True -# msg = out.strip() -# else: -# if err.strip().lower().find('already linked') != -1: -# msg = 'already linked' -# else: -# changed = True -# msg = 'linked' -# -# return (failed, changed, msg) -# -# -# def link_packages(module, brew_path, packages): -# """ Upgrades one or more packages. """ -# -# failed, linked, unchanged, msg = False, 0, 0, '' -# -# for package in packages: -# failed, changed, msg = link_package(module, brew_path, package) -# if failed: -# break -# if changed: -# linked += 1 -# else: -# unchanged += 1 -# -# if failed: -# msg = 'installed: %d, unchanged: %d, error: ' + msg -# msg = msg % (linked, unchanged) -# elif linked: -# changed = True -# msg = 'linked: %d, unchanged: %d' % (linked, unchanged) -# else: -# msg = 'linked: %d, unchanged: %d' % (linked, unchanged) -# -# return (failed, changed, msg) -# -# -# def unlink_package(module, brew_path, package): -# """ Unlinks a single homebrew package. """ -# -# failed, changed, msg = False, False, '' -# -# if not a_valid_package(package): -# failed = True -# msg = 'invalid package' -# -# elif not query_package(module, brew_path, package): -# failed = True -# msg = 'not installed' -# -# else: -# if module.check_mode: -# module.exit_json(changed=True) -# -# rc, out, err = module.run_command([ -# brew_path, -# 'unlink', -# package, -# ]) -# -# if rc: -# failed = True -# msg = out.strip() -# else: -# if out.find('0 links') != -1: -# msg = 'already unlinked' -# else: -# changed = True -# msg = 'linked' -# -# return (failed, changed, msg) -# -# -# def unlink_packages(module, brew_path, packages): -# """ Unlinks one or more packages. """ -# -# failed, unlinked, unchanged, msg = False, 0, 0, '' -# -# for package in packages: -# failed, changed, msg = unlink_package(module, brew_path, package) -# if failed: -# break -# if changed: -# unlinked += 1 -# else: -# unchanged += 1 -# -# if failed: -# msg = 'installed: %d, unchanged: %d, error: ' + msg -# msg = msg % (unlinked, unchanged) -# elif unlinked: -# changed = True -# msg = 'unlinked: %d, unchanged: %d' % (unlinked, unchanged) -# else: -# msg = 'unlinked: %d, unchanged: %d' % (unlinked, unchanged) -# -# return (failed, changed, msg) + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be linked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + cmd = [opt + for opt in (self.brew_path, 'link', self.current_package) + if opt] + + rc, out, err = self.module.run_command(cmd) + self.changed_count += 1 + self.changed = True + self.message = 'Package linked: {0}'.format(self.current_package) + + def _link_packages(self): + for package in self.packages: + self.current_package = package + self._link_current_package() + + return True + # /linked -------------------------------- }}} + + # unlinked ------------------------------- {{{ + def _unlink_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be unlinked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + cmd = [opt + for opt in (self.brew_path, 'unlink', self.current_package) + if opt] + + rc, out, err = self.module.run_command(cmd) + self.changed_count += 1 + self.changed = True + self.message = 'Package unlinked: {0}'.format(self.current_package) + + def _unlink_packages(self): + for package in self.packages: + self.current_package = package + self._unlink_current_package() + + return True + # /unlinked ------------------------------ }}} + # /commands ---------------------------------------------------- }}} def main(): @@ -764,6 +716,10 @@ def main(): state = 'installed' if state in ('latest', 'upgraded'): state = 'upgraded' + if state == 'linked': + state = 'linked' + if state == 'unlinked': + state = 'unlinked' if state in ('absent', 'removed', 'uninstalled'): state = 'absent' From 097ce843366cbf7dfd1a413e27266de15dbd4204 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 15:15:37 -0500 Subject: [PATCH 087/772] Fix homebrew linked/unlinked output. --- library/packaging/homebrew | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index dfb82fa8c8a..9609ce4068b 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -626,9 +626,17 @@ class Homebrew(object): if opt] rc, out, err = self.module.run_command(cmd) - self.changed_count += 1 - self.changed = True - self.message = 'Package linked: {0}'.format(self.current_package) + + if rc == 0: + self.changed_count += 1 + self.changed = True + self.message = 'Package linked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be linked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) def _link_packages(self): for package in self.packages: @@ -662,9 +670,17 @@ class Homebrew(object): if opt] rc, out, err = self.module.run_command(cmd) - self.changed_count += 1 - self.changed = True - self.message = 'Package unlinked: {0}'.format(self.current_package) + + if rc == 0: + self.changed_count += 1 + self.changed = True + self.message = 'Package unlinked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) def _unlink_packages(self): for package in self.packages: From 155f6e4dd3d597253e787c5eb191c6bde5d3dab8 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 17:06:58 -0500 Subject: [PATCH 088/772] Update homebrew documentation. --- library/packaging/homebrew | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index 9609ce4068b..dcd2c048a98 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: homebrew -author: Andrew Dunham +author: Andrew Dunham and Daniel Jaouen short_description: Package manager for Homebrew description: - Manages Homebrew packages @@ -35,7 +35,7 @@ options: state: description: - state of the package - choices: [ 'head', 'latest', 'installed', 'linked', 'uninstalled' ] + choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'uninstalled' ] required: false default: present update_homebrew: @@ -44,6 +44,11 @@ options: required: false default: "no" choices: [ "yes", "no" ] + install_options: + description: + - options flags to install a package + required: false + default: null notes: [] ''' EXAMPLES = ''' @@ -55,6 +60,7 @@ EXAMPLES = ''' - homebrew: name=foo state=linked - homebrew: name=foo state=absent - homebrew: name=foo,bar state=absent +- homebrew: name=foo state=present install_options=with-baz,enable-debug ''' import os.path From e3a39837e17d78467469cfb1ba4af4712aa88158 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 17:40:26 -0500 Subject: [PATCH 089/772] Add back homebrew `install_options` parameter. --- library/packaging/homebrew | 81 +++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 28 deletions(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index dcd2c048a98..dea962159a9 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -284,10 +284,13 @@ class Homebrew(object): # /class properties -------------------------------------------- }}} def __init__(self, module, path=None, packages=None, state=None, - update_homebrew=False, ): + update_homebrew=False, install_options=None): + if not install_options: + install_options = list() self._setup_status_vars() self._setup_instance_vars(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew, ) + state=state, update_homebrew=update_homebrew, + install_options=install_options, ) self._prep() @@ -473,10 +476,12 @@ class Homebrew(object): else: head = None - cmd = [opt - for opt in (self.brew_path, 'install', self.current_package, head) - if opt] - + opts = ( + [self.brew_path, 'install'] + + self.install_options + + [self.current_package, head] + ) + cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if self._current_package_is_installed(): @@ -523,11 +528,13 @@ class Homebrew(object): ) raise HomebrewException(self.message) - rc, out, err = self.module.run_command([ - self.brew_path, - command, - self.current_package, - ]) + opts = ( + [self.brew_path, command] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) if not self._current_package_is_outdated(): self.changed_count += 1 @@ -540,10 +547,13 @@ class Homebrew(object): raise HomebrewException(self.message) def _upgrade_all_packages(self): - rc, out, err = self.module.run_command([ - self.brew_path, - 'upgrade', - ]) + opts = ( + [self.brew_path, 'upgrade'] + + self.install_options + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + if rc == 0: self.changed = True self.message = 'All packages upgraded.' @@ -584,10 +594,12 @@ class Homebrew(object): ) raise HomebrewException(self.message) - cmd = [opt - for opt in (self.brew_path, 'uninstall', self.current_package) - if opt] - + opts = ( + [self.brew_path, 'uninstall'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if not self._current_package_is_installed(): @@ -627,10 +639,12 @@ class Homebrew(object): ) raise HomebrewException(self.message) - cmd = [opt - for opt in (self.brew_path, 'link', self.current_package) - if opt] - + opts = ( + [self.brew_path, 'link'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: @@ -671,10 +685,12 @@ class Homebrew(object): ) raise HomebrewException(self.message) - cmd = [opt - for opt in (self.brew_path, 'unlink', self.current_package) - if opt] - + opts = ( + [self.brew_path, 'unlink'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: @@ -717,6 +733,11 @@ def main(): aliases=["update-brew"], type='bool', ), + install_options=dict( + default=None, + aliases=['options'], + type='list', + ) ), supports_check_mode=True, ) @@ -746,9 +767,13 @@ def main(): state = 'absent' update_homebrew = p['update_homebrew'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] brew = Homebrew(module=module, path=path, packages=packages, - state=state, update_homebrew=update_homebrew) + state=state, update_homebrew=update_homebrew, + install_options=install_options) (failed, changed, message) = brew.run() if failed: module.fail_json(msg=message) From 6748ef121b2fb097e5a4c8b7b65f42fc6d6df38d Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 17:43:09 -0500 Subject: [PATCH 090/772] Update homebrew documentation. --- library/packaging/homebrew | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index dea962159a9..33b2ab62497 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -35,7 +35,7 @@ options: state: description: - state of the package - choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'uninstalled' ] + choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ] required: false default: present update_homebrew: From 498c8e8d5782cbffb77422e14b3967e4392f7a30 Mon Sep 17 00:00:00 2001 From: Dale Sedivec Date: Wed, 19 Feb 2014 20:38:21 -0600 Subject: [PATCH 091/772] Fix mistaken double backslash The module helper function run_command was appending a literal backslash followed by 'n' to the stdin of a command it runs unless you called it with binary_data=True (not the default). I have changed it to what I expect was the intent, to append an actual line feed to stdin. --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 3be407fe707..938595c5604 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1039,7 +1039,7 @@ class AnsibleModule(object): if data: if not binary_data: - data += '\\n' + data += '\n' out, err = cmd.communicate(input=data) rc = cmd.returncode except (OSError, IOError), e: From 8131feaadeb6f05eb6651d254f1a9bcf4820637e Mon Sep 17 00:00:00 2001 From: Matt Jeffery Date: Thu, 20 Feb 2014 16:50:09 +0000 Subject: [PATCH 092/772] Normalise the module name when comparing against the module names in /proc/modules. --- library/system/modprobe | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/system/modprobe b/library/system/modprobe index 82ca86b9bd5..864952ae5b0 100644 --- a/library/system/modprobe +++ b/library/system/modprobe @@ -60,8 +60,9 @@ def main(): try: modules = open('/proc/modules') present = False + module_name = args['name'].replace('-', '_') + ' ' for line in modules: - if line.startswith(args['name'] + ' '): + if line.startswith(module_name): present = True break modules.close() From bf79833c9d4c878352c671aa0a6d29cb3f4d9840 Mon Sep 17 00:00:00 2001 From: Gianluca Borello Date: Thu, 20 Feb 2014 22:17:55 -0800 Subject: [PATCH 093/772] Make sure droplet deletion works even when idempotency is achieved using 'name' and 'unique_name' rather than 'id' --- library/cloud/digital_ocean | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index a6721a55da1..67a2dadb4db 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -348,7 +348,7 @@ def core(module): elif state in ('absent', 'deleted'): # First, try to find a droplet by id. - droplet = Droplet.find(id=getkeyordie('id')) + droplet = Droplet.find(module.params['id']) # If we couldn't find the droplet and the user is allowing unique # hostnames, then check to see if a droplet with the specified From 8d507f79b9bf1b4ff9bf97c3c445e272cf745578 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Lalinsk=C3=BD?= Date: Sat, 22 Feb 2014 15:33:54 +0100 Subject: [PATCH 094/772] Don't require pycurl in apt_repository when it's not actually needed --- library/packaging/apt_repository | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index fc47be7915c..bf786a29a79 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -28,9 +28,10 @@ short_description: Add and remove APT repositores description: - Add or remove an APT repositories in Ubuntu and Debian. notes: - - This module works on Debian and Ubuntu and requires C(python-apt) and C(python-pycurl) packages. + - This module works on Debian and Ubuntu and requires C(python-apt). - This module supports Debian Squeeze (version 6) as well as its successors. - This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines. + Adding PPA repositories requires C(python-pycurl). options: repo: required: true @@ -273,6 +274,8 @@ class UbuntuSourcesList(SourcesList): def _get_ppa_info(self, owner_name, ppa_name): # we can not use urllib2 here as it does not do cert verification + if not HAVE_PYCURL: + module.fail_json(msg='Could not import python modules: pycurl. Please install python-pycurl package.') lp_api = self.LP_API % (owner_name, ppa_name) return self._get_ppa_info_curl(lp_api) @@ -343,9 +346,6 @@ def main(): if not HAVE_PYTHON_APT: module.fail_json(msg='Could not import python modules: apt_pkg. Please install python-apt package.') - if not HAVE_PYCURL: - module.fail_json(msg='Could not import python modules: pycurl. Please install python-pycurl package.') - repo = module.params['repo'] state = module.params['state'] update_cache = module.params['update_cache'] From dd6f2e6bfa9d251db95db06fb174faf0e2ccb9e1 Mon Sep 17 00:00:00 2001 From: Veeti Paananen Date: Sun, 23 Feb 2014 18:06:46 +0200 Subject: [PATCH 095/772] Show the offending file name with the include + with_items error --- lib/ansible/playbook/play.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 93b95049987..626c7136a36 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -498,7 +498,11 @@ class Play(object): include_vars = {} for k in x: if k.startswith("with_"): - utils.deprecated("include + with_items is a removed deprecated feature", "1.5", removed=True) + if original_file: + offender = " (in %s)" % original_file + else: + offender = "" + utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True) elif k.startswith("when_"): utils.deprecated("\"when_:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True) elif k == 'when': From 467914e3c339fa5cce2a9442875a0999a4da2555 Mon Sep 17 00:00:00 2001 From: "Dustin C. Hatch" Date: Tue, 25 Feb 2014 20:08:57 -0600 Subject: [PATCH 096/772] filesystem: Ignore blkid cache Sometimes, `blkid` will incorrectly return no information about a block device, even if it exists and has a valid filesystem. This causes the *filesystem* module to fail if *force=no*. Instructing `blkid` to use `/dev/null` as a cache file will force it to rescan the block device on each run, making results more consistent. Signed-off-by: Dustin C. Hatch --- library/system/filesystem | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/filesystem b/library/system/filesystem index 698c71d4534..46e798f6e81 100644 --- a/library/system/filesystem +++ b/library/system/filesystem @@ -79,7 +79,7 @@ def main(): cmd = module.get_bin_path('blkid', required=True) - rc,raw_fs,err = module.run_command("%s -o value -s TYPE %s" % (cmd, dev)) + rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) fs = raw_fs.strip() From 02477eef69861bd3953689d2f89d88f03a5a468b Mon Sep 17 00:00:00 2001 From: Gareth Armstrong Date: Thu, 27 Feb 2014 14:52:56 +0100 Subject: [PATCH 097/772] Fix issue 5621, rpm_key doesn't work for el5 --- library/packaging/rpm_key | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/library/packaging/rpm_key b/library/packaging/rpm_key index 82532477348..41ae5977d43 100644 --- a/library/packaging/rpm_key +++ b/library/packaging/rpm_key @@ -131,7 +131,9 @@ class RpmKey: def normalize_keyid(self, keyid): """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase""" ret = keyid.strip().lower() - if ret.startswith(('0x', '0X')): + if ret.startswith('0x'): + return ret[2:] + elif ret.startswith('0X'): return ret[2:] else: return ret @@ -141,9 +143,9 @@ class RpmKey: stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile]) for line in stdout.splitlines(): line = line.strip() - if line.startswith('keyid:'): + if line.startswith(':signature packet:'): # We want just the last 8 characters of the keyid - keyid = line.split(':')[1].strip()[8:] + keyid = line.split()[-1].strip()[8:] return keyid self.json_fail(msg="Unexpected gpg output") From cb3c6417ddc90e97dc9d292787d0b5e79a5c5075 Mon Sep 17 00:00:00 2001 From: Ashok Raja R Date: Fri, 28 Feb 2014 11:28:38 +0530 Subject: [PATCH 098/772] pause plugin doesn't flush raw_input prompt ##### Issue Type: Bugfix Pull Request ##### Ansible Version: ansible 1.4.3 ##### Environment: N/A ##### Summary: We are using a wrapper python script to run ansible-playbook. We use subprocess to execute and print the stdout as and when its written. Problem is when we use pause it doesn't display the prompt string as raw_input does not flush stdout before reading from stdin. It looks like a dirty fix to add "\n" to the prompt string but i don't see any other way to over come this. If anyone else have a better fix please do propose/suggest. ##### Steps To Reproduce: ```yaml #File: test_play.yml - name: Test hosts: $nodes gather_facts: false tasks: - name: Waiting for User local_action: pause prompt="Do you want to continue (yes/no)? " ``` ```python #!/usr/bin/env python #File: test.py import shlex, subprocess def run_process(process): process = process.encode("utf-8") command = shlex.split(process) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in iter(p.stdout.readline, b''): print line, cmd = "/usr/bin/python -u /usr/bin/ansible-playbook -i hosts.txt test_play.yml -e 'nodes=local'" run_process(cmd) ``` ``` shell $ python test.py ``` ##### Expected Results: ``` PLAY [Test] ******************************************************************* TASK: [Waiting for User] ****************************************************** [localhost] Do you want to continue (yes/no)? : ``` ##### Actual Results: ``` PLAY [Test] ******************************************************************* TASK: [Waiting for User] ****************************************************** [localhost] ``` --- lib/ansible/runner/action_plugins/pause.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/action_plugins/pause.py b/lib/ansible/runner/action_plugins/pause.py index 8aaa87f454e..c6a06dcd7cd 100644 --- a/lib/ansible/runner/action_plugins/pause.py +++ b/lib/ansible/runner/action_plugins/pause.py @@ -77,11 +77,11 @@ class ActionModule(object): # Is 'prompt' a key in 'args'? elif 'prompt' in args: self.pause_type = 'prompt' - self.prompt = "[%s]\n%s: " % (hosts, args['prompt']) + self.prompt = "[%s]\n%s:\n" % (hosts, args['prompt']) # Is 'args' empty, then this is the default prompted pause elif len(args.keys()) == 0: self.pause_type = 'prompt' - self.prompt = "[%s]\nPress enter to continue: " % hosts + self.prompt = "[%s]\nPress enter to continue:\n" % hosts # I have no idea what you're trying to do. But it's so wrong. else: raise ae("invalid pause type given. must be one of: %s" % \ From 1280269866b46b8f769187804978759c954ac809 Mon Sep 17 00:00:00 2001 From: Andy Trevorah Date: Fri, 28 Feb 2014 16:39:06 +0000 Subject: [PATCH 099/772] apt_repository: autoinstall python-apt if not available --- library/packaging/apt_repository | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index 4587d90ba78..34cdc6f4c96 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -366,7 +366,13 @@ def main(): ) if not HAVE_PYTHON_APT: - module.fail_json(msg='Could not import python modules: apt_pkg. Please install python-apt package.') + try: + module.run_command('apt-get update && apt-get install python-apt -y -q') + global apt, apt_pkg + import apt + import apt_pkg + except: + module.fail_json(msg='Could not import python modules: apt, apt_pkg. Please install python-apt package.') if not HAVE_PYCURL: module.fail_json(msg='Could not import python modules: pycurl. Please install python-pycurl package.') From fa3d84d59e9f4568ddbec50c3c2c2368f1242577 Mon Sep 17 00:00:00 2001 From: Eugene Brevdo Date: Fri, 28 Feb 2014 11:56:42 -0800 Subject: [PATCH 100/772] Tiny bugfix in ec2_vol documentation. --- library/cloud/ec2_vol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index 511bdd0cea5..49fd55eaba3 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -52,7 +52,7 @@ options: required: false default: null aliases: [] - name: + id: description: - volume id if you wish to attach an existing volume (requires instance) required: false From 6221a2740f5c3023c817d13e4a564f301ed3bc73 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 28 Feb 2014 14:17:07 -0600 Subject: [PATCH 101/772] Updating files for new upstream release 1.5.0 --- CHANGELOG.md | 2 +- RELEASES.txt | 2 +- packaging/debian/changelog | 4 ++-- packaging/rpm/ansible.spec | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a97e55a484..1ff78020e64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ Ansible Changes By Release ========================== -## 1.5 "Love Walks In" - Release pending! +## 1.5 "Love Walks In" - February 28, 2014 Major features/changes: diff --git a/RELEASES.txt b/RELEASES.txt index 6038f9a764f..63358298818 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -2,7 +2,7 @@ Ansible Releases at a Glance ============================ 1.6 "The Cradle Will Rock" - NEXT -1.5 "Love Walks In" -------- PENDING +1.5 "Love Walks In" -------- 02-28-2014 1.4.5 "Could This Be Magic?" - 02-12-2014 1.4.4 "Could This Be Magic?" - 01-06-2014 1.4.3 "Could This Be Magic?" - 12-20-2013 diff --git a/packaging/debian/changelog b/packaging/debian/changelog index a29f156753b..c009bebb376 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -1,8 +1,8 @@ ansible (1.5) unstable; urgency=low - * 1.5 release (PENDING) + * 1.5 release - -- Michael DeHaan Wed, 27 November 2013 15:00:02 -0500 + -- Michael DeHaan Fri, 28 February 2014 -0500 ansible (1.4.5) unstable; urgency=low diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 04561b3a5de..c067bbe42e9 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -102,8 +102,8 @@ rm -rf %{buildroot} %changelog -* Thu Feb 13 2014 Michael DeHaan - 1.5-0 -* (PENDING) +* Fri Feb 28 2014 Michael DeHaan - 1.5-0 +- Release 1.5.0 * Wed Feb 12 2014 Michael DeHaan - 1.4.5 * Release 1.4.5 From ce5939c507bc0c1595d88891fe84c457d8099814 Mon Sep 17 00:00:00 2001 From: Joshua Conner Date: Fri, 28 Feb 2014 18:05:52 -0800 Subject: [PATCH 102/772] nova_compute: fix for partial match b/w params['name'] and an existing name When there is an Openstack instance that has a name that's a partial match for module.params['name'], but a server with name module.params['name'] doesn't yet exist, this module would fail with a list index out of bounds error. This fixes that by filtering by exact name and only then getting the server from the list if the list is still not empty. --- library/cloud/nova_compute | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/library/cloud/nova_compute b/library/cloud/nova_compute index af693229333..6e1730ed3b7 100644 --- a/library/cloud/nova_compute +++ b/library/cloud/nova_compute @@ -193,7 +193,11 @@ def _get_server_state(module, nova): try: servers = nova.servers.list(True, {'name': module.params['name']}) if servers: - server = [x for x in servers if x.name == module.params['name']][0] + # the {'name': module.params['name']} will also return servers + # with names that partially match the server name, so we have to + # strictly filter here + servers = [x for x in servers if x.name == module.params['name']] + server = servers[0] if servers else None except Exception, e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) if server and module.params['state'] == 'present': From c770462fad3d14c22153dd4b2585a5175cf42071 Mon Sep 17 00:00:00 2001 From: Gareth Rushgrove Date: Sat, 1 Mar 2014 16:06:19 +0000 Subject: [PATCH 103/772] Added modules to manage ec2 autoscaling groups Includes management of Launch Configuration and related Autoscaling Groups --- library/cloud/ec2_asg | 190 ++++++++++++++++++++++++++++++++++++++++++ library/cloud/ec2_lc | 159 +++++++++++++++++++++++++++++++++++ 2 files changed, 349 insertions(+) create mode 100644 library/cloud/ec2_asg create mode 100644 library/cloud/ec2_lc diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg new file mode 100644 index 00000000000..0a6fe651baf --- /dev/null +++ b/library/cloud/ec2_asg @@ -0,0 +1,190 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: ec2_asg +short_description: Create or delete AWS Autoscaling Groups +description: + - Can create or delete AWS Autoscaling Groups + - Works with the ec2_lc module to manage Launch Configurations +version_added: "1.0" +requirements: [ "boto" ] +author: Gareth Rushgrove +options: + state: + description: + - register or deregister the instance + required: true + choices: ['present', 'absent'] + group: + description: + - Unique name for group to be created or deleted + required: true + load_balancers: + description: + - List of ELB names to use for the group + required: true + availability_zones: + description: + - List of availability zone names in which to create the group. + required: true + launch_config_name: + description: + - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. + required: true + min_size: + description: + - Minimum number of instances in group + required: true + max_size: + description: + - Maximum number of instances in group + required: true + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key' ] + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] +""" + +EXAMPLES = ''' +- ec2_asg: > + group_name: special + load_balancers: 'lb1,lb2' + availability_zones: 'eu-west-1a,eu-west-1b' + launch_config_name: 'lc-1' + min_size: 1 + max_size: 10 +''' + +import sys +import time + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +try: + import boto.ec2.autoscale + from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup + from boto.exception import BotoServerError +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +AWS_REGIONS = ['ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'eu-west-1', + 'sa-east-1', + 'us-east-1', + 'us-west-1', + 'us-west-2'] + + +def create_autoscaling_group(connection, module): + group_name = module.params.get('name') + load_balancers = module.params['load_balancers'] + availability_zones = module.params['availability_zones'] + launch_config_name = module.params.get('launch_config_name') + min_size = module.params.get('min_size') + max_size = module.params.get('max_size') + + launch_configs = connection.get_all_launch_configurations(name=[launch_config_name]) + + ag = AutoScalingGroup( + group_name=group_name, + load_balancers=load_balancers, + availability_zones=availability_zones, + launch_config=launch_configs[0], + min_size=min_size, + max_size=max_size, + connection=connection) + + try: + connection.create_auto_scaling_group(ag) + module.exit_json(changed=True) + except BotoServerError, e: + module.exit_json(changed=False, msg=str(e)) + + +def delete_autoscaling_group(connection, module): + group_name = module.params.get('name') + groups = connection.get_all_groups(names=[group_name]) + if groups: + group = groups[0] + group.shutdown_instances() + + instances = True + while instances: + connection.get_all_groups() + for group in groups: + if group.name == group_name: + if not group.instances: + instances = False + time.sleep(10) + + group.delete() + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, type='str'), + load_balancers = dict(required=True, type='list'), + availability_zones = dict(required=True, type='list'), + launch_config_name = dict(required=True, type='str'), + min_size = dict(required=True, type='int'), + max_size = dict(required=True, type='int'), + state = dict(default='present', choices=['present', 'absent']), + region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + ec2_url = dict(), + ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), + ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), + ) + ) + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + state = module.params.get('state') + + try: + connection = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + + if state == 'present': + create_autoscaling_group(connection, module) + elif state == 'absent': + delete_autoscaling_group(connection, module) + + +main() diff --git a/library/cloud/ec2_lc b/library/cloud/ec2_lc new file mode 100644 index 00000000000..a84449d7d91 --- /dev/null +++ b/library/cloud/ec2_lc @@ -0,0 +1,159 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: ec2_lc +short_description: Create or delete AWS Autoscaling Launch Configurations +description: + - Can create or delete AwS Autoscaling Configurations + - Works with the ec2_asg module to manage Autoscaling Groups +version_added: "1.6" +requirements: [ "boto" ] +author: Gareth Rushgrove +options: + state: + description: + - register or deregister the instance + required: true + choices: ['present', 'absent'] + name: + description: + - Unique name for configuration + required: true + image_id: + description: + - The AMI unique identifier to be used for the group + required: true + key_name: + description: + - The SSH key name to be used for access to managed instances + required: true + security_groups: + description: + - A list of security groups into which instances should be found + required: true + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key' ] + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] +""" + +EXAMPLES = ''' +- ec2_lc: > + name: special + image_id: ami-XXX + key_name: default + security_groups: 'group,group2' + +''' + +import sys +import time + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +try: + import boto.ec2.autoscale + from boto.ec2.autoscale import LaunchConfiguration + from boto.exception import BotoServerError +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +AWS_REGIONS = ['ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'eu-west-1', + 'sa-east-1', + 'us-east-1', + 'us-west-1', + 'us-west-2'] + + +def create_launch_config(connection, module): + name = module.params.get('name') + image_id = module.params.get('image_id') + key_name = module.params.get('key_name') + security_groups = module.params['security_groups'] + + lc = LaunchConfiguration( + name=name, + image_id=image_id, + key_name=key_name, + security_groups=security_groups) + + try: + connection.create_launch_configuration(lc) + module.exit_json(changed=True) + except BotoServerError, e: + module.exit_json(changed=False, msg=str(e)) + + +def delete_launch_config(connection, module): + name = module.params.get('name') + launch_configs = connection.get_all_launch_configurations(name=[name]) + if launch_configs: + launch_configs[0].delete() + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, type='str'), + image_id = dict(required=True, type='str'), + key_name = dict(required=True, type='str'), + security_groups = dict(required=True, type='list'), + state = dict(default='present', choices=['present', 'absent']), + region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + ec2_url = dict(), + ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), + ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), + ) + ) + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + try: + connection = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + + state = module.params.get('state') + + if state == 'present': + create_launch_config(connection, module) + elif state == 'absent': + delete_launch_config(connection, module) + +main() From 87265cd4b3f4b51a44208220a66f0b5b04b81b0b Mon Sep 17 00:00:00 2001 From: Gareth Rushgrove Date: Sun, 2 Mar 2014 11:58:22 +0000 Subject: [PATCH 104/772] correct version added --- library/cloud/ec2_asg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg index 0a6fe651baf..6aee6f5e152 100644 --- a/library/cloud/ec2_asg +++ b/library/cloud/ec2_asg @@ -21,7 +21,7 @@ short_description: Create or delete AWS Autoscaling Groups description: - Can create or delete AWS Autoscaling Groups - Works with the ec2_lc module to manage Launch Configurations -version_added: "1.0" +version_added: "1.6" requirements: [ "boto" ] author: Gareth Rushgrove options: From a26bc3e2f98e20aafb41aea71bfcb77cafa9f82b Mon Sep 17 00:00:00 2001 From: Gareth Rushgrove Date: Sun, 2 Mar 2014 12:00:50 +0000 Subject: [PATCH 105/772] correct name in documentation and example --- library/cloud/ec2_asg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg index 6aee6f5e152..a411bcd4279 100644 --- a/library/cloud/ec2_asg +++ b/library/cloud/ec2_asg @@ -30,7 +30,7 @@ options: - register or deregister the instance required: true choices: ['present', 'absent'] - group: + name: description: - Unique name for group to be created or deleted required: true @@ -75,7 +75,7 @@ options: EXAMPLES = ''' - ec2_asg: > - group_name: special + name: special load_balancers: 'lb1,lb2' availability_zones: 'eu-west-1a,eu-west-1b' launch_config_name: 'lc-1' From 58886533b047d50f8a3a4eefcb081a91dd9d3e8d Mon Sep 17 00:00:00 2001 From: Jake Kreider Date: Sun, 2 Mar 2014 20:45:53 -0600 Subject: [PATCH 106/772] Updated S3 metadata examples --- library/cloud/s3 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/s3 b/library/cloud/s3 index 3ce6e1b0b38..97a0d489813 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -105,6 +105,8 @@ EXAMPLES = ''' - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' +# PUT/upload with multiple metadata +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' # PUT/upload and do not overwrite remote file (trust local) - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false # Download an object as a string to use else where in your playbook From 7f1989dc9c816366005e320d25096a5b302ee3dc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 3 Mar 2014 10:12:03 -0600 Subject: [PATCH 107/772] Adding a wait loop to ec2_elb for the initial lb state when registering Fixes #5305 --- library/cloud/ec2_elb | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index ebd90aeda82..c6c61fd199b 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -157,7 +157,17 @@ class ElbManager: to report the instance in-service""" for lb in self.lbs: if wait: - initial_state = self._get_instance_health(lb) + tries = 1 + while True: + initial_state = self._get_instance_health(lb) + if initial_state: + break + time.sleep(1) + tries += 1 + # FIXME: this should be configurable, but since it didn't + # wait at all before this is at least better + if tries > 10: + self.module.fail_json(msg='failed to find the initial state of the load balancer') if enable_availability_zone: self._enable_availailability_zone(lb) From 10f36e8c624d86ddbcaff32c97620680a14f64da Mon Sep 17 00:00:00 2001 From: Joshua Conner Date: Mon, 3 Mar 2014 10:55:28 -0800 Subject: [PATCH 108/772] nova_compute: remove ternary statement --- library/cloud/nova_compute | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/cloud/nova_compute b/library/cloud/nova_compute index 6e1730ed3b7..e27ec7c0d49 100644 --- a/library/cloud/nova_compute +++ b/library/cloud/nova_compute @@ -197,7 +197,8 @@ def _get_server_state(module, nova): # with names that partially match the server name, so we have to # strictly filter here servers = [x for x in servers if x.name == module.params['name']] - server = servers[0] if servers else None + if servers: + server = servers[0] except Exception, e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) if server and module.params['state'] == 'present': From 635fdcb5334a71b3ab798cbe358b8337f02f9314 Mon Sep 17 00:00:00 2001 From: Quinn Slack Date: Mon, 3 Mar 2014 19:59:27 -0800 Subject: [PATCH 109/772] Add new cloud/rds DB instance types (db.m3 and db.cr1 families) --- library/cloud/rds | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/rds b/library/cloud/rds index d0eeaf35ba5..0e2c8ff2131 100644 --- a/library/cloud/rds +++ b/library/cloud/rds @@ -60,7 +60,7 @@ options: required: false default: null aliases: [] - choices: [ 'db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge' ] + choices: [ 'db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge', 'db.m3.medium', 'db.m3.large', 'db.m3.xlarge', 'db.m3.2xlarge', 'db.cr1.8xlarge' ] username: description: - Master database username. Used only when command=create. @@ -290,7 +290,7 @@ def main(): source_instance = dict(required=False), db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), size = dict(required=False), - instance_type = dict(aliases=['type'], choices=['db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge'], required=False), + instance_type = dict(aliases=['type'], choices=['db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge', 'db.m3.medium', 'db.m3.large', 'db.m3.xlarge', 'db.m3.2xlarge', 'db.cr1.8xlarge'], required=False), username = dict(required=False), password = dict(no_log=True, required=False), db_name = dict(required=False), From f568140ea51c5d84747112cbe201a0e2a8d6a125 Mon Sep 17 00:00:00 2001 From: willthames Date: Thu, 27 Feb 2014 16:07:11 +1000 Subject: [PATCH 110/772] Allow ec2_tag module to list the tags of an instance Use the list argument to state to just collect the tags of a resource through the AWS API. --- library/cloud/ec2_tag | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/library/cloud/ec2_tag b/library/cloud/ec2_tag index ca5a337646f..c96a8be6f73 100644 --- a/library/cloud/ec2_tag +++ b/library/cloud/ec2_tag @@ -19,7 +19,7 @@ DOCUMENTATION = ''' module: ec2_tag short_description: create and remove tag(s) to ec2 resources. description: - - Creates and removes tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. + - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. version_added: "1.3" options: resource: @@ -30,7 +30,7 @@ options: aliases: [] state: description: - - Whether the tags should be present or absent on the resource. + - Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance. required: false default: present choices: ['present', 'absent'] @@ -115,14 +115,14 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( resource = dict(required=True), - tags = dict(required=True), - state = dict(default='present', choices=['present', 'absent']), + tags = dict(), + state = dict(default='present', choices=['present', 'absent', 'list']), ) ) module = AnsibleModule(argument_spec=argument_spec) resource = module.params.get('resource') - tags = module.params['tags'] + tags = module.params.get('tags') state = module.params.get('state') ec2 = ec2_connect(module) @@ -140,6 +140,8 @@ def main(): tagdict[tag.name] = tag.value if state == 'present': + if not tags: + module.fail_json(msg="tags argument is required when state is present") if set(tags.items()).issubset(set(tagdict.items())): module.exit_json(msg="Tags already exists in %s." %resource, changed=False) else: @@ -151,6 +153,8 @@ def main(): module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) if state == 'absent': + if not tags: + module.fail_json(msg="tags argument is required when state is absent") for (key, value) in set(tags.items()): if (key, value) not in set(tagdict.items()): baddict[key] = value @@ -162,10 +166,9 @@ def main(): tagger = ec2.delete_tags(resource, dictremove) gettags = ec2.get_all_tags(filters=filters) module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) - -# print json.dumps({ -# "current_resource_tags": gettags, -# }) + + if state == 'list': + module.exit_json(changed=False, **tagdict) sys.exit(0) # import module snippets From 5c589af19130179bc5cbb15aa9cc08147effe40a Mon Sep 17 00:00:00 2001 From: Aaron Bassett Date: Tue, 4 Mar 2014 15:58:12 -0500 Subject: [PATCH 111/772] fix missing stdin in _parallel_runner --- lib/ansible/runner/__init__.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index dfcd0487925..a69f0f24e5e 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1075,9 +1075,17 @@ class Runner(object): job_queue.put(host) result_queue = manager.Queue() + try: + fileno = sys.stdin.fileno() + except ValueError: + fileno = None + workers = [] for i in range(self.forks): - new_stdin = os.fdopen(os.dup(sys.stdin.fileno())) + if fileno is not None: + new_stdin = os.fdopen(os.dup(fileno)) + else: + new_stdin = None prc = multiprocessing.Process(target=_executor_hook, args=(job_queue, result_queue, new_stdin)) prc.start() From 49bd8b0b350af32a1667ec8f28e3736ef1e1a400 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 4 Mar 2014 18:31:49 -0600 Subject: [PATCH 112/772] Fix inventory for test_dir_inventory It came up that fixing this unit test may relate to another ticket that is open. This work allows us to uncomment this unit test by fixing how we pars variables allowing a quoted variable to contain a '#'. Work also went into cleaning up some of the test data to clarify what was working. Lastly work went into cleaning up formatting so that the code is easily read. --- lib/ansible/inventory/ini.py | 73 ++++++--- test/units/TestInventory.py | 142 +++++++++--------- .../inventory_test_data/inventory_dir/0hosts | 2 +- .../inventory_dir/3comments | 5 +- 4 files changed, 130 insertions(+), 92 deletions(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index c50fae61164..b26fb5a20fe 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -27,6 +27,7 @@ import shlex import re import ast + class InventoryParser(object): """ Host inventory for ansible. @@ -47,7 +48,6 @@ class InventoryParser(object): self._parse_group_variables() return self.groups - # [webservers] # alpha # beta:2345 @@ -65,9 +65,36 @@ class InventoryParser(object): active_group_name = 'ungrouped' for line in self.lines: - line = line.split("#")[0].strip() + + # Split off any comments that are not contained in a variable. + if "#" in line: + split_line = line.split("#") + instances = len(split_line) - 1 + if instances > 0: + marker = 0 + while marker < instances: + if ("=\"" in split_line[marker] and "\"" in split_line[marker + 1]) or ( + "='" in split_line[marker] and "'" in split_line[marker + 1]): + marker += 1 + else: + if marker == 0: + line = split_line[marker] + else: + # We have multiple fragments that we need to combine back together. + # rekram is us reversing that work we did with marker. + rekram = 0 + new_line = split_line[rekram] + while marker > rekram: + rekram += 1 + new_line = new_line + "#" + split_line[rekram] + line = new_line + break + + # Clean up the end of the line. + line = line.strip() + if line.startswith("[") and line.endswith("]"): - active_group_name = line.replace("[","").replace("]","") + active_group_name = line.replace("[", "").replace("]", "") if line.find(":vars") != -1 or line.find(":children") != -1: active_group_name = active_group_name.rsplit(":", 1)[0] if active_group_name not in self.groups: @@ -95,20 +122,18 @@ class InventoryParser(object): if hostname.count(".") == 1: (hostname, port) = hostname.rsplit(".", 1) elif (hostname.find("[") != -1 and - hostname.find("]") != -1 and - hostname.find(":") != -1 and - (hostname.rindex("]") < hostname.rindex(":")) or - (hostname.find("]") == -1 and hostname.find(":") != -1)): - (hostname, port) = hostname.rsplit(":", 1) + hostname.find("]") != -1 and + hostname.find(":") != -1 and + (hostname.rindex("]") < hostname.rindex(":")) or + (hostname.find("]") == -1 and hostname.find(":") != -1)): + (hostname, port) = hostname.rsplit(":", 1) - hostnames = [] if detect_range(hostname): hostnames = expand_hostname_range(hostname) else: hostnames = [hostname] for hn in hostnames: - host = None if hn in self.hosts: host = self.hosts[hn] else: @@ -119,15 +144,24 @@ class InventoryParser(object): if t.startswith('#'): break try: - (k,v) = t.split("=", 1) + (k, v) = t.split("=", 1) except ValueError, e: raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) - try: - host.set_variable(k,ast.literal_eval(v)) - except: - # most likely a string that literal_eval - # doesn't like, so just set it - host.set_variable(k,v) + # I am not sure where a variable with a hash needs to be evaluated via ast. + # If an instance comes up this is the condition we need to modify. + if "#" in v: + host.set_variable(k, v) + else: + try: + host.set_variable(k, ast.literal_eval(v)) + # Using explicit exceptions. + # Likely a string that literal_eval does not like. We wil then just set it. + except ValueError: + # For some reason this was thought to be malformed. + host.set_variable(k, v) + except SyntaxError: + # Is this a hash with an equals at the end? + host.set_variable(k, v) self.groups[active_group_name].add_host(host) # [southeast:children] @@ -142,7 +176,7 @@ class InventoryParser(object): if line is None or line == '': continue if line.startswith("[") and line.find(":children]") != -1: - line = line.replace("[","").replace(":children]","") + line = line.replace("[", "").replace(":children]", "") group = self.groups.get(line, None) if group is None: group = self.groups[line] = Group(name=line) @@ -157,7 +191,6 @@ class InventoryParser(object): else: group.add_child_group(kid_group) - # [webservers:vars] # http_port=1234 # maxRequestsPerChild=200 @@ -167,7 +200,7 @@ class InventoryParser(object): for line in self.lines: line = line.strip() if line.startswith("[") and line.find(":vars]") != -1: - line = line.replace("[","").replace(":vars]","") + line = line.replace("[", "").replace(":vars]", "") group = self.groups.get(line, None) if group is None: raise errors.AnsibleError("can't add vars to undefined group: %s" % line) diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index 2ae6256e62b..6146ec8b9ff 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -5,6 +5,7 @@ from nose.tools import raises from ansible import errors from ansible.inventory import Inventory + class TestInventory(unittest.TestCase): def setUp(self): @@ -49,14 +50,14 @@ class TestInventory(unittest.TestCase): def dir_inventory(self): return Inventory(self.inventory_dir) - all_simple_hosts=['jupiter', 'saturn', 'zeus', 'hera', - 'cerberus001','cerberus002','cerberus003', - 'cottus99', 'cottus100', - 'poseidon', 'thor', 'odin', 'loki', - 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', - 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5', - 'Hotep-a', 'Hotep-b', 'Hotep-c', - 'BastC', 'BastD', 'neptun', ] + all_simple_hosts = ['jupiter', 'saturn', 'zeus', 'hera', + 'cerberus001', 'cerberus002', 'cerberus003', + 'cottus99', 'cottus100', + 'poseidon', 'thor', 'odin', 'loki', + 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', + 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5', + 'Hotep-a', 'Hotep-b', 'Hotep-c', + 'BastC', 'BastD', 'neptun', ] ##################################### ### Empty inventory format tests @@ -93,36 +94,36 @@ class TestInventory(unittest.TestCase): inventory = self.simple_inventory() hosts = inventory.list_hosts("norse") - expected_hosts=['thor', 'odin', 'loki'] + expected_hosts = ['thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_simple_ungrouped(self): inventory = self.simple_inventory() hosts = inventory.list_hosts("ungrouped") - expected_hosts=['jupiter', 'saturn', - 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', - 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5'] + expected_hosts = ['jupiter', 'saturn', + 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', + 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5'] assert sorted(hosts) == sorted(expected_hosts) def test_simple_combined(self): inventory = self.simple_inventory() hosts = inventory.list_hosts("norse:greek") - expected_hosts=['zeus', 'hera', 'poseidon', - 'cerberus001','cerberus002','cerberus003', - 'cottus99','cottus100', - 'thor', 'odin', 'loki'] + expected_hosts = ['zeus', 'hera', 'poseidon', + 'cerberus001', 'cerberus002', 'cerberus003', + 'cottus99', 'cottus100', + 'thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_simple_restrict(self): inventory = self.simple_inventory() restricted_hosts = ['hera', 'poseidon', 'thor'] - expected_hosts=['zeus', 'hera', 'poseidon', - 'cerberus001','cerberus002','cerberus003', - 'cottus99', 'cottus100', - 'thor', 'odin', 'loki'] + expected_hosts = ['zeus', 'hera', 'poseidon', + 'cerberus001', 'cerberus002', 'cerberus003', + 'cottus99', 'cottus100', + 'thor', 'odin', 'loki'] inventory.restrict_to(restricted_hosts) hosts = inventory.list_hosts("norse:greek") @@ -137,12 +138,12 @@ class TestInventory(unittest.TestCase): def test_simple_string_ipv4(self): inventory = Inventory('127.0.0.1,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['127.0.0.1', '192.168.1.1'])) def test_simple_string_ipv4_port(self): inventory = Inventory('127.0.0.1:2222,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['127.0.0.1', '192.168.1.1'])) def test_simple_string_ipv4_vars(self): inventory = Inventory('127.0.0.1:2222,192.168.1.1') @@ -152,12 +153,12 @@ class TestInventory(unittest.TestCase): def test_simple_string_ipv6(self): inventory = Inventory('FE80:EF45::12:1,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1', '192.168.1.1'])) def test_simple_string_ipv6_port(self): inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1', '192.168.1.1'])) def test_simple_string_ipv6_vars(self): inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1') @@ -167,12 +168,12 @@ class TestInventory(unittest.TestCase): def test_simple_string_fqdn(self): inventory = Inventory('foo.example.com,bar.example.com') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com'])) + self.assertEqual(sorted(hosts), sorted(['foo.example.com', 'bar.example.com'])) def test_simple_string_fqdn_port(self): inventory = Inventory('foo.example.com:2222,bar.example.com') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com'])) + self.assertEqual(sorted(hosts), sorted(['foo.example.com', 'bar.example.com'])) def test_simple_string_fqdn_vars(self): inventory = Inventory('foo.example.com:2222,bar.example.com') @@ -191,26 +192,26 @@ class TestInventory(unittest.TestCase): inventory = self.simple_inventory() vars = inventory.get_variables('hera') - expected = { 'ansible_ssh_port': 3000, - 'group_names': ['greek'], - 'inventory_hostname': 'hera', - 'inventory_hostname_short': 'hera' } + expected = {'ansible_ssh_port': 3000, + 'group_names': ['greek'], + 'inventory_hostname': 'hera', + 'inventory_hostname_short': 'hera'} assert vars == expected def test_large_range(self): inventory = self.large_range_inventory() hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted('bob%03i' %i for i in range(0, 143))) + self.assertEqual(sorted(hosts), sorted('bob%03i' % i for i in range(0, 143))) def test_subset(self): inventory = self.simple_inventory() inventory.subset('odin;thor,loki') - self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin','loki'])) + self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor', 'odin', 'loki'])) def test_subset_range(self): inventory = self.simple_inventory() inventory.subset('greek[0-2];norse[0]') - self.assertEqual(sorted(inventory.list_hosts()), sorted(['zeus','hera','thor'])) + self.assertEqual(sorted(inventory.list_hosts()), sorted(['zeus', 'hera', 'thor'])) def test_subet_range_empty_group(self): inventory = self.simple_inventory() @@ -220,11 +221,11 @@ class TestInventory(unittest.TestCase): def test_subset_filename(self): inventory = self.simple_inventory() inventory.subset('@' + os.path.join(self.test_dir, 'restrict_pattern')) - self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin'])) + self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor', 'odin'])) @raises(errors.AnsibleError) def testinvalid_entry(self): - Inventory('1234') + Inventory('1234') ################################################### ### INI file advanced tests @@ -240,7 +241,7 @@ class TestInventory(unittest.TestCase): g=' g ', h=' h ', i="' i \"", j='" j', rga='1', rgb='2', rgc='3', inventory_hostname='rtp_a', inventory_hostname_short='rtp_a', - group_names=[ 'eastcoast', 'nc', 'redundantgroup', 'redundantgroup2', 'redundantgroup3', 'rtp', 'us' ] + group_names=['eastcoast', 'nc', 'redundantgroup', 'redundantgroup2', 'redundantgroup3', 'rtp', 'us'] ) print vars print expected @@ -249,9 +250,9 @@ class TestInventory(unittest.TestCase): def test_complex_group_names(self): inventory = self.complex_inventory() tests = { - 'host1': [ 'role1', 'role3' ], - 'host2': [ 'role1', 'role2' ], - 'host3': [ 'role2', 'role3' ] + 'host1': ['role1', 'role3'], + 'host2': ['role1', 'role2'], + 'host3': ['role2', 'role3'] } for host, roles in tests.iteritems(): group_names = inventory.get_variables(host)['group_names'] @@ -275,11 +276,10 @@ class TestInventory(unittest.TestCase): def test_complex_enumeration(self): - expected1 = ['rtp_b'] expected2 = ['rtp_a', 'rtp_b'] expected3 = ['rtp_a', 'rtp_b', 'rtp_c', 'tri_a', 'tri_b', 'tri_c'] - expected4 = ['rtp_b', 'orlando' ] + expected4 = ['rtp_b', 'orlando'] expected5 = ['blade-a-1'] inventory = self.complex_inventory() @@ -303,34 +303,34 @@ class TestInventory(unittest.TestCase): @raises(errors.AnsibleError) def test_invalid_range(self): - Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_range')) + Inventory(os.path.join(self.test_dir, 'inventory', 'test_incorrect_range')) @raises(errors.AnsibleError) def test_missing_end(self): - Inventory(os.path.join(self.test_dir, 'inventory','test_missing_end')) + Inventory(os.path.join(self.test_dir, 'inventory', 'test_missing_end')) @raises(errors.AnsibleError) def test_incorrect_format(self): - Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_format')) + Inventory(os.path.join(self.test_dir, 'inventory', 'test_incorrect_format')) @raises(errors.AnsibleError) def test_alpha_end_before_beg(self): - Inventory(os.path.join(self.test_dir, 'inventory','test_alpha_end_before_beg')) + Inventory(os.path.join(self.test_dir, 'inventory', 'test_alpha_end_before_beg')) def test_combined_range(self): - i = Inventory(os.path.join(self.test_dir, 'inventory','test_combined_range')) + i = Inventory(os.path.join(self.test_dir, 'inventory', 'test_combined_range')) hosts = i.list_hosts('test') - expected_hosts=['host1A','host2A','host1B','host2B'] + expected_hosts = ['host1A', 'host2A', 'host1B', 'host2B'] assert sorted(hosts) == sorted(expected_hosts) def test_leading_range(self): - i = Inventory(os.path.join(self.test_dir, 'inventory','test_leading_range')) + i = Inventory(os.path.join(self.test_dir, 'inventory', 'test_leading_range')) hosts = i.list_hosts('test') - expected_hosts=['1.host','2.host','A.host','B.host'] + expected_hosts = ['1.host', '2.host', 'A.host', 'B.host'] assert sorted(hosts) == sorted(expected_hosts) hosts2 = i.list_hosts('test2') - expected_hosts2=['1.host','2.host','3.host'] + expected_hosts2 = ['1.host', '2.host', '3.host'] assert sorted(hosts2) == sorted(expected_hosts2) ################################################### @@ -340,38 +340,38 @@ class TestInventory(unittest.TestCase): inventory = self.script_inventory() hosts = inventory.list_hosts() - expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts = ['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] - print "Expected: %s"%(expected_hosts) - print "Got : %s"%(hosts) + print "Expected: %s" % expected_hosts + print "Got : %s" % hosts assert sorted(hosts) == sorted(expected_hosts) def test_script_all(self): inventory = self.script_inventory() hosts = inventory.list_hosts('all') - expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts = ['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_script_norse(self): inventory = self.script_inventory() hosts = inventory.list_hosts("norse") - expected_hosts=['thor', 'odin', 'loki'] + expected_hosts = ['thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_script_combined(self): inventory = self.script_inventory() hosts = inventory.list_hosts("norse:greek") - expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts = ['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_script_restrict(self): inventory = self.script_inventory() restricted_hosts = ['hera', 'poseidon', 'thor'] - expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts = ['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] inventory.restrict_to(restricted_hosts) hosts = inventory.list_hosts("norse:greek") @@ -389,7 +389,7 @@ class TestInventory(unittest.TestCase): print "VARS=%s" % vars - assert vars == {'hammer':True, + assert vars == {'hammer': True, 'group_names': ['norse'], 'inventory_hostname': 'thor', 'inventory_hostname_short': 'thor'} @@ -417,15 +417,17 @@ class TestInventory(unittest.TestCase): auth = inventory.get_variables('neptun')['auth'] assert auth == 'YWRtaW46YWRtaW4=' - # test disabled as needs to be updated to model desired behavior - # - #def test_dir_inventory(self): - # inventory = self.dir_inventory() - # vars = inventory.get_variables('zeus') - # - # print "VARS=%s" % vars - # - # assert vars == {'inventory_hostname': 'zeus', - # 'inventory_hostname_short': 'zeus', - # 'group_names': ['greek', 'major-god', 'ungrouped'], - # 'var_a': '1#2'} + def test_dir_inventory(self): + inventory = self.dir_inventory() + + host_vars = inventory.get_variables('zeus') + + expected_vars = {'inventory_hostname': 'zeus', + 'inventory_hostname_short': 'zeus', + 'group_names': ['greek', 'major-god', 'ungrouped'], + 'var_a': '2#3'} + + print "HOST VARS=%s" % host_vars + print "EXPECTED VARS=%s" % expected_vars + + assert host_vars == expected_vars \ No newline at end of file diff --git a/test/units/inventory_test_data/inventory_dir/0hosts b/test/units/inventory_test_data/inventory_dir/0hosts index 27fc46e8530..6f78a33a228 100644 --- a/test/units/inventory_test_data/inventory_dir/0hosts +++ b/test/units/inventory_test_data/inventory_dir/0hosts @@ -1,3 +1,3 @@ -zeus var_a=2 +zeus var_a=0 morpheus thor diff --git a/test/units/inventory_test_data/inventory_dir/3comments b/test/units/inventory_test_data/inventory_dir/3comments index 74642f13cc7..f18577fc7bd 100644 --- a/test/units/inventory_test_data/inventory_dir/3comments +++ b/test/units/inventory_test_data/inventory_dir/3comments @@ -1,5 +1,8 @@ +[titan] +cronus var_a="a#b" var_b="b#c" var_c="c#d" # Is this overkill? + [major-god] # group with inline comments -zeus var_a="1#2" # host with inline comments and "#" in the var string +zeus var_a="2#3" # host with inline comments and "#" in the var string # A comment thor From 347b425c671378be08dfec554bad69f8d7861c4c Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Wed, 5 Mar 2014 04:39:08 +0100 Subject: [PATCH 113/772] Make irc module accept the nick being shortened by the server. This can happen if the server has a NICKLEN setting which is less than the length of the specified nick. With this patch we now grab that nick and use it moving forward, instead of failing because we didn't get back the one we specified, in the connection response. --- library/notification/irc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/library/notification/irc b/library/notification/irc index 11bdc4a95ec..5fa05c41488 100644 --- a/library/notification/irc +++ b/library/notification/irc @@ -39,7 +39,7 @@ options: default: 6667 nick: description: - - Nickname + - Nickname. May be shortened, depending on server's NICKLEN setting. required: false default: ansible msg: @@ -122,7 +122,11 @@ def send_msg(channel, msg, server='localhost', port='6667', start = time.time() while 1: motd += irc.recv(1024) - if re.search('^:\S+ 00[1-4] %s :' % nick, motd, flags=re.M): + # The server might send back a shorter nick than we specified (due to NICKLEN), + # so grab that and use it from now on (assuming we find the 00[1-4] response). + match = re.search('^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) + if match: + nick = match.group('nick') break elif time.time() - start > timeout: raise Exception('Timeout waiting for IRC server welcome response') From dd62de03843a038f75a983967757687ac1794a4f Mon Sep 17 00:00:00 2001 From: "Leo R. Lundgren" Date: Wed, 5 Mar 2014 05:12:21 +0100 Subject: [PATCH 114/772] Fix whitespace (tabs -> spaces). --- library/notification/irc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/notification/irc b/library/notification/irc index 5fa05c41488..e3d5e65d66a 100644 --- a/library/notification/irc +++ b/library/notification/irc @@ -122,8 +122,8 @@ def send_msg(channel, msg, server='localhost', port='6667', start = time.time() while 1: motd += irc.recv(1024) - # The server might send back a shorter nick than we specified (due to NICKLEN), - # so grab that and use it from now on (assuming we find the 00[1-4] response). + # The server might send back a shorter nick than we specified (due to NICKLEN), + # so grab that and use it from now on (assuming we find the 00[1-4] response). match = re.search('^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) if match: nick = match.group('nick') From 2a108b2aeb49a0bac297ba0357ec90f979f24631 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Fri, 14 Feb 2014 21:36:13 -0500 Subject: [PATCH 115/772] Added subnet tagging. --- library/cloud/ec2_vpc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 9b9fb95a0b2..10a52533df1 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -277,7 +277,10 @@ def create_vpc(module, vpc_conn): add_subnet = False if add_subnet: try: - vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) + created_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) + subnet_tags = subnet.get('tags', None) + if subnet_tags: + vpc_conn.create_tags(created_subnet.id, subnet_tags) changed = True except EC2ResponseError, e: module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) From 4dbac647bc18e09663cafb4d6770f0bb21f3bc8d Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Fri, 14 Feb 2014 21:46:05 -0500 Subject: [PATCH 116/772] Added documentation for subnets: tags: option --- library/cloud/ec2_vpc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 10a52533df1..e92b9f77ed3 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -46,7 +46,7 @@ options: choices: [ "yes", "no" ] subnets: description: - - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... }. Where az is the desired availability zone of the subnet, but it is not required. All VPC subnets not in this list will be removed." + - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... , tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed." required: false default: null aliases: [] @@ -137,10 +137,13 @@ EXAMPLES = ''' subnets: - cidr: 172.22.1.0/24 az: us-west-2c + tags: { "Environment":"Dev", "Tier" : "Web" } - cidr: 172.22.2.0/24 az: us-west-2b + tags: { "Environment":"Dev", "Tier" : "App" } - cidr: 172.22.3.0/24 az: us-west-2a + tags: { "Environment":"Dev", "Tier" : "DB" } internet_gateway: True route_tables: - subnets: From 428c69c08a243326a92096199c39b96667894aad Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 19 Feb 2014 16:26:40 -0500 Subject: [PATCH 117/772] Refactored subnet tagging to account for AWS delays; added 'tags' attribute to 'subnet' node in the returned json. --- library/cloud/ec2_vpc | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index e92b9f77ed3..911cb4125df 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -280,10 +280,18 @@ def create_vpc(module, vpc_conn): add_subnet = False if add_subnet: try: - created_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) - subnet_tags = subnet.get('tags', None) - if subnet_tags: - vpc_conn.create_tags(created_subnet.id, subnet_tags) + new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) + new_subnet_tags = subnet.get('tags', None) + if new_subnet_tags: + # Sometimes AWS takes its time to create a subnet and so using new subnets's id + # to create tags results in exception. + # boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending' + # so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet + while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0: + time.sleep(0.1) + + vpc_conn.create_tags(new_subnet.id, new_subnet_tags) + changed = True except EC2ResponseError, e: module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) @@ -411,14 +419,15 @@ def create_vpc(module, vpc_conn): created_vpc_id = vpc.id returned_subnets = [] current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) + for sn in current_subnets: returned_subnets.append({ + 'tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), 'cidr': sn.cidr_block, 'az': sn.availability_zone, 'id': sn.id, }) - return (vpc_dict, created_vpc_id, returned_subnets, changed) def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): From 3c4b14523b45449acaf4b23d77b2ed356cc053e2 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Mon, 24 Feb 2014 23:50:12 -0500 Subject: [PATCH 118/772] Renamed subnet's 'tags' attribute into 'instance_tags' to distinguish it from Ansible's own 'tags' and to conform to ec2 module naming for AWS tags. --- library/cloud/ec2_vpc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 911cb4125df..439a20ddc68 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -46,7 +46,7 @@ options: choices: [ "yes", "no" ] subnets: description: - - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... , tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed." + - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... , instance_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: instance_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed." required: false default: null aliases: [] @@ -137,13 +137,13 @@ EXAMPLES = ''' subnets: - cidr: 172.22.1.0/24 az: us-west-2c - tags: { "Environment":"Dev", "Tier" : "Web" } + instance_tags: { "Environment":"Dev", "Tier" : "Web" } - cidr: 172.22.2.0/24 az: us-west-2b - tags: { "Environment":"Dev", "Tier" : "App" } + instance_tags: { "Environment":"Dev", "Tier" : "App" } - cidr: 172.22.3.0/24 az: us-west-2a - tags: { "Environment":"Dev", "Tier" : "DB" } + instance_tags: { "Environment":"Dev", "Tier" : "DB" } internet_gateway: True route_tables: - subnets: @@ -281,7 +281,7 @@ def create_vpc(module, vpc_conn): if add_subnet: try: new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) - new_subnet_tags = subnet.get('tags', None) + new_subnet_tags = subnet.get('instance_tags', None) if new_subnet_tags: # Sometimes AWS takes its time to create a subnet and so using new subnets's id # to create tags results in exception. @@ -422,7 +422,7 @@ def create_vpc(module, vpc_conn): for sn in current_subnets: returned_subnets.append({ - 'tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), + 'instance_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), 'cidr': sn.cidr_block, 'az': sn.availability_zone, 'id': sn.id, From e56cffe3a5d6f782ae3a714be5d12efb2c26cf05 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Sat, 1 Mar 2014 20:41:24 -0500 Subject: [PATCH 119/772] Renamed instance_tags to resource_tags based on community feedback. --- library/cloud/ec2_vpc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 439a20ddc68..7427fc2c905 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -46,7 +46,7 @@ options: choices: [ "yes", "no" ] subnets: description: - - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... , instance_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: instance_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed." + - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed." required: false default: null aliases: [] @@ -137,13 +137,13 @@ EXAMPLES = ''' subnets: - cidr: 172.22.1.0/24 az: us-west-2c - instance_tags: { "Environment":"Dev", "Tier" : "Web" } + resource_tags: { "Environment":"Dev", "Tier" : "Web" } - cidr: 172.22.2.0/24 az: us-west-2b - instance_tags: { "Environment":"Dev", "Tier" : "App" } + resource_tags: { "Environment":"Dev", "Tier" : "App" } - cidr: 172.22.3.0/24 az: us-west-2a - instance_tags: { "Environment":"Dev", "Tier" : "DB" } + resource_tags: { "Environment":"Dev", "Tier" : "DB" } internet_gateway: True route_tables: - subnets: @@ -281,7 +281,7 @@ def create_vpc(module, vpc_conn): if add_subnet: try: new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) - new_subnet_tags = subnet.get('instance_tags', None) + new_subnet_tags = subnet.get('resource_tags', None) if new_subnet_tags: # Sometimes AWS takes its time to create a subnet and so using new subnets's id # to create tags results in exception. @@ -422,7 +422,7 @@ def create_vpc(module, vpc_conn): for sn in current_subnets: returned_subnets.append({ - 'instance_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), + 'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})), 'cidr': sn.cidr_block, 'az': sn.availability_zone, 'id': sn.id, From daa7416323b047abda7d0b78c269ae066797e861 Mon Sep 17 00:00:00 2001 From: Julien DAUPHANT Date: Wed, 5 Mar 2014 16:42:52 +0100 Subject: [PATCH 120/772] Add linux module parameters for the modprobe module --- library/system/modprobe | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/library/system/modprobe b/library/system/modprobe index 82ca86b9bd5..1c06142b8eb 100644 --- a/library/system/modprobe +++ b/library/system/modprobe @@ -34,11 +34,18 @@ options: choices: [ present, absent ] description: - Whether the module should be present or absent. + params: + required: false + default: "" + description: + - Modules parameters. ''' EXAMPLES = ''' # Add the 802.1q module - modprobe: name=8021q state=present +# Add the dummy module +- modprobe: name=dummy state=present params="numdummies=2" ''' def main(): @@ -46,6 +53,7 @@ def main(): argument_spec={ 'name': {'required': True}, 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'params': {'default': ''}, }, supports_check_mode=True, ) @@ -54,6 +62,7 @@ def main(): 'failed': False, 'name': module.params['name'], 'state': module.params['state'], + 'params': module.params['params'], } # Check if module is present @@ -81,7 +90,7 @@ def main(): # Add/remove module as needed if args['state'] == 'present': if not present: - rc, _, err = module.run_command(['modprobe', args['name']]) + rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True From 16bb6c88f59fb70f5ca0e9f8730921d25b0cbbc0 Mon Sep 17 00:00:00 2001 From: Julien DAUPHANT Date: Wed, 5 Mar 2014 16:45:20 +0100 Subject: [PATCH 121/772] Add version_added 1.6 for the params modprobe option --- library/system/modprobe | 1 + 1 file changed, 1 insertion(+) diff --git a/library/system/modprobe b/library/system/modprobe index 1c06142b8eb..04f800f6793 100644 --- a/library/system/modprobe +++ b/library/system/modprobe @@ -37,6 +37,7 @@ options: params: required: false default: "" + version_added: "1.6" description: - Modules parameters. ''' From c4fea2d5b60b82902f0722715cf95ba34334c983 Mon Sep 17 00:00:00 2001 From: Jeremy Schneider Date: Wed, 5 Mar 2014 10:01:41 -0600 Subject: [PATCH 122/772] Update playbooks_vault.rst very minor grammatical fix for readability --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 20981215657..991c58f16ce 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -14,7 +14,7 @@ What Can Be Encrypted With Vault The vault feature can encrypt any structured data file used by Ansible. This can include "group_vars/" or "host_vars/" inventory variables, variables loaded by "include_vars" or "vars_files", or variable files passed on the ansible-playbook command line with "-e @file.yml" or "-e @file.json". Role variables and defaults are also included! -Because Ansible tasks, handlers, and so on are also data, these two can also be encrypted with vault. If you'd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted. However, that might be a little much and could annoy your coworkers :) +Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault. If you'd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted. However, that might be a little much and could annoy your coworkers :) .. _creating_files: From 38abd5e20ebf9d399c29d9434bb6ddfc3566ad5d Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 5 Mar 2014 10:50:58 -0600 Subject: [PATCH 123/772] Break this out into a reusable function and document regex shortcomings. --- lib/ansible/inventory/ini.py | 23 ++--------------------- lib/ansible/utils/__init__.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 21 deletions(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index b26fb5a20fe..29929672bb0 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -23,6 +23,7 @@ from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range from ansible import errors +import ansible.utils as utils import shlex import re import ast @@ -68,27 +69,7 @@ class InventoryParser(object): # Split off any comments that are not contained in a variable. if "#" in line: - split_line = line.split("#") - instances = len(split_line) - 1 - if instances > 0: - marker = 0 - while marker < instances: - if ("=\"" in split_line[marker] and "\"" in split_line[marker + 1]) or ( - "='" in split_line[marker] and "'" in split_line[marker + 1]): - marker += 1 - else: - if marker == 0: - line = split_line[marker] - else: - # We have multiple fragments that we need to combine back together. - # rekram is us reversing that work we did with marker. - rekram = 0 - new_line = split_line[rekram] - while marker > rekram: - rekram += 1 - new_line = new_line + "#" + split_line[rekram] - line = new_line - break + line = utils.split_unquoted_hash(line) # Clean up the end of the line. line = line.strip() diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 02148faff0c..996b457cb17 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1071,3 +1071,37 @@ def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS): password.append(new_char) return ''.join(password) + + +def split_unquoted_hash(line): + ''' + Carve off comments from a line which are not contained in quotes and a part of an assignment. + ''' + + # We would really like to have this using a regex to make it less code. For instance: + # line = re.split('(? 0: + marker = 0 + while marker < instances: + if ("=\"" in split_line[marker] and "\"" in split_line[marker + 1]) or ( + "='" in split_line[marker] and "'" in split_line[marker + 1]): + marker += 1 + else: + if marker == 0: + line = split_line[marker] + else: + # We have multiple fragments that we need to combine back together. + # rekram is us reversing that work we did with marker. + rekram = 0 + new_line = split_line[rekram] + while marker > rekram: + rekram += 1 + new_line = new_line + "#" + split_line[rekram] + line = new_line + break + return line \ No newline at end of file From 23fee4b836803d3256baa357b7f00080bfc7faea Mon Sep 17 00:00:00 2001 From: g-k-r Date: Fri, 31 Jan 2014 12:46:00 +0100 Subject: [PATCH 124/772] add test same host in different files in dir tests issue #5749 same host defined in different groups which in turn are defined in different ini files in an inventory directory Conflicts: test/units/TestInventory.py --- test/units/TestInventory.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index 6146ec8b9ff..d7211775033 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -430,4 +430,12 @@ class TestInventory(unittest.TestCase): print "HOST VARS=%s" % host_vars print "EXPECTED VARS=%s" % expected_vars - assert host_vars == expected_vars \ No newline at end of file + assert host_vars == expected_vars + + def test_dir_inventory_multiple_groups(self): + inventory = self.dir_inventory() + group_greek = inventory.get_group('greek') + group_major_god = inventory.get_group('major-god') + actual_host_names = [host.name for host in group_greek.get_hosts()]; + print "%s : %s " % (group_greek.name, actual_host_names) + assert actual_host_names == ['zeus','morpheus'] From a3c36a6853c79b47f8db3c8cc9acac7575cc306e Mon Sep 17 00:00:00 2001 From: g-k-r Date: Fri, 31 Jan 2014 12:34:21 +0100 Subject: [PATCH 125/772] modifed test to use get_hosts instead of get_groups closes #5749 Conflicts: test/units/TestInventory.py --- test/units/TestInventory.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index d7211775033..b061c1112b3 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -434,8 +434,7 @@ class TestInventory(unittest.TestCase): def test_dir_inventory_multiple_groups(self): inventory = self.dir_inventory() - group_greek = inventory.get_group('greek') - group_major_god = inventory.get_group('major-god') - actual_host_names = [host.name for host in group_greek.get_hosts()]; - print "%s : %s " % (group_greek.name, actual_host_names) - assert actual_host_names == ['zeus','morpheus'] + group_greek = inventory.get_hosts('greek') + actual_host_names = [host.name for host in group_greek]; + print "greek : %s " % (actual_host_names) + assert actual_host_names == ['zeus','morpheus'] From 22a1fb28d6c8faa4f9cdc93711fe118c7b273943 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 5 Mar 2014 13:44:15 -0600 Subject: [PATCH 126/772] Minor cleanup. --- test/units/TestInventory.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index b061c1112b3..2f7b47989ac 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -433,8 +433,8 @@ class TestInventory(unittest.TestCase): assert host_vars == expected_vars def test_dir_inventory_multiple_groups(self): - inventory = self.dir_inventory() - group_greek = inventory.get_hosts('greek') - actual_host_names = [host.name for host in group_greek]; - print "greek : %s " % (actual_host_names) - assert actual_host_names == ['zeus','morpheus'] + inventory = self.dir_inventory() + group_greek = inventory.get_hosts('greek') + actual_host_names = [host.name for host in group_greek] + print "greek : %s " % actual_host_names + assert actual_host_names == ['zeus', 'morpheus'] From 6129ea7566ffd8113b2038076d4d1550cd6383a4 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 5 Mar 2014 14:51:40 -0500 Subject: [PATCH 127/772] Fixes #6298 and adds a sudo unit test for synchronize --- .../runner/action_plugins/synchronize.py | 9 ++++-- test/units/TestSynchronize.py | 30 ++++++++++++++++++- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py index d7c9113f28e..c66fcdff3ce 100644 --- a/lib/ansible/runner/action_plugins/synchronize.py +++ b/lib/ansible/runner/action_plugins/synchronize.py @@ -173,6 +173,11 @@ class ActionModule(object): if self.runner.noop_on_check(inject): module_items += " CHECKMODE=True" - return self.runner._execute_module(conn, tmp, 'synchronize', - module_items, inject=inject) + # run the module and store the result + result = self.runner._execute_module(conn, tmp, 'synchronize', module_items, inject=inject) + + # reset the sudo property + self.runner.sudo = self.original_sudo + + return result diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py index 7965f2295e7..dfb1a129e5a 100644 --- a/test/units/TestSynchronize.py +++ b/test/units/TestSynchronize.py @@ -61,7 +61,35 @@ class TestSynchronize(unittest.TestCase): assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" assert runner.executed_args == "dest=root@el6.lab.net:/tmp/bar src=/tmp/foo", "wrong args used" - assert runner.sudo == False, "sudo not set to false" + assert runner.sudo == None, "sudo was not reset to None" + + def test_synchronize_action_sudo(self): + + """ verify the synchronize action plugin unsets and then sets sudo """ + + runner = FakeRunner() + runner.sudo = True + runner.remote_user = "root" + runner.transport = "ssh" + conn = FakeConn() + inject = { + 'inventory_hostname': "el6.lab.net", + 'inventory_hostname_short': "el6", + 'ansible_connection': None, + 'ansible_ssh_user': 'root', + 'delegate_to': None, + 'playbook_dir': '.', + } + + x = Synchronize(runner) + x.setup("synchronize", inject) + x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject) + + assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" + assert runner.executed_args == 'dest=root@el6.lab.net:/tmp/bar src=/tmp/foo rsync_path="sudo rsync"', \ + "wrong args used: %s" % runner.executed_args + assert runner.sudo == True, "sudo was not reset to True" + def test_synchronize_action_local(self): From 418b9fdef668c8e37d5fa234ad258f8fa0409ed4 Mon Sep 17 00:00:00 2001 From: Paul Durivage Date: Tue, 4 Mar 2014 19:44:17 -0600 Subject: [PATCH 128/772] Spelling, grammar, layout changes, plus more examples and an orchestration chapter --- docsite/rst/guide_rax.rst | 83 ++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 44 deletions(-) diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst index 37ca6b796c6..626bfac1ee9 100644 --- a/docsite/rst/guide_rax.rst +++ b/docsite/rst/guide_rax.rst @@ -11,7 +11,7 @@ Introduction Ansible contains a number of core modules for interacting with Rackspace Cloud. The purpose of this section is to explain how to put Ansible modules together -(and use inventory scripts) to use Ansible in Rackspace Cloud context. +(and use inventory scripts) to use Ansible in a Rackspace Cloud context. Prerequisites for using the rax modules are minimal. In addition to ansible itself, all of the modules require and are tested against pyrax 1.5 or higher. @@ -32,7 +32,7 @@ to add localhost to the inventory file. (Ansible may not require this manual st [localhost] localhost ansible_connection=local -In playbook steps we'll typically be using the following pattern: +In playbook steps, we'll typically be using the following pattern: .. code-block:: yaml @@ -66,21 +66,17 @@ https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authentic Running from a Python Virtual Environment (Optional) ++++++++++++++++++++++++++++++++++++++++++++++++++++ -Special considerations need to -be taken if pyrax is not installed globally but instead using a python virtualenv (it's fine if you install it globally). - -Ansible assumes, unless otherwise instructed, that the python binary will live at -/usr/bin/python. This is done so via the interpret line in the modules, however -when instructed using ansible_python_interpreter, ansible will use this specified path instead for finding -python. - -If using virtualenv, you may wish to modify your localhost inventory definition to find this location as follows: +There are special considerations when Ansible is installed to a Python virtualenv, rather than the default of installing at a global scope. Ansible assumes, unless otherwise instructed, that the python binary will live at /usr/bin/python. This is done so via the interpret line in modules, however when instructed using ansible_python_interpreter, Ansible will use this specified path instead to find Python. **This is a common cause of module failures**, because the user assumes that modules running on 'localhost', or perhaps running via 'local_action' are using the virtualenv Python interpreter. By setting this line in the inventory, the modules will execute in the virtualenv interpreter and have available the virtualenv packages, specifically pyrax. If using virtualenv, you may wish to modify your localhost inventory definition to find this location as follows: .. code-block:: ini [localhost] localhost ansible_connection=local ansible_python_interpreter=/path/to/ansible_venv/bin/python +.. note:: + + pyrax may be installed in the global Python package scope or in a virtual environment. There are no special considerations to keep in mind when installing pyrax. + .. _provisioning: Provisioning @@ -88,16 +84,20 @@ Provisioning Now for the fun parts. -The 'rax' module provides the ability to provision instances within Rackspace Cloud. Typically the -provisioning task will be performed from your Ansible control server against the Rackspace cloud API. +The 'rax' module provides the ability to provision instances within Rackspace Cloud. Typically the provisioning task will be performed from your Ansible control server (in our example, localhost) against the Rackspace cloud API. This is done for several reasons: + + - Avoiding installing the pyrax library on remote nodes + - No need to encrypt and distribute credentials to remote nodes + - Speed and simplicity .. note:: Authentication with the Rackspace-related modules is handled by either specifying your username and API key as environment variables or passing - them as module arguments. + them as module arguments, or by specifying the location of a credentials + file. -Here is a basic example of provisioning a instance in ad-hoc mode: +Here is a basic example of provisioning an instance in ad-hoc mode: .. code-block:: bash @@ -119,8 +119,9 @@ Here's what it would look like in a playbook, assuming the parameters were defin wait: yes register: rax -By registering the return value of the step, it is then possible to dynamically add the resulting hosts to inventory (temporarily, in memory). -This facilitates performing configuration actions on the hosts immediately in a subsequent task:: +The rax module returns data about the nodes it creates, like IP addresses, hostnames, and login passwords. By registering the return value of the step, it is possible used this data to dynamically add the resulting hosts to inventory (temporarily, in memory). This facilitates performing configuration actions on the hosts in a follow-on task. In the following example, the servers that were successfully created using the above task are dynamically added to a group called "raxhosts", with each nodes hostname, IP address, and root password being added to the inventory. + +.. code-block:: yaml - name: Add the instances we created (by public IP) to the group 'raxhosts' local_action: @@ -132,7 +133,9 @@ This facilitates performing configuration actions on the hosts immediately in a with_items: rax.success when: rax.action == 'create' -With the host group now created, a second play in your provision playbook could now configure them, for example:: +With the host group now created, the next play in this playbook could now configure servers belonging to the raxhosts group. + +.. code-block:: yaml - name: Configuration play hosts: raxhosts @@ -141,7 +144,6 @@ With the host group now created, a second play in your provision playbook could - ntp - webserver - The method above ties the configuration of a host with the provisioning step. This isn't always what you want, and leads us to the next section. @@ -150,41 +152,28 @@ to the next section. Host Inventory `````````````` -Once your nodes are spun up, you'll probably want to talk to them again. - -The best way to handle his is to use the rax inventory plugin, which dynamically queries Rackspace Cloud and tells Ansible what -nodes you have to manage. - -You might want to use this even if you are spinning up Ansible via other tools, including the Rackspace Cloud user interface. - -The inventory plugin can be used to group resources by their meta data. Utilizing meta data is highly -recommended in rax and can provide an easy way to sort between host groups and roles. - -If you don't want to use the ``rax.py`` dynamic inventory script, you could also still choose to manually manage your INI inventory file, -though this is less recommended. +Once your nodes are spun up, you'll probably want to talk to them again. The best way to handle his is to use the "rax" inventory plugin, which dynamically queries Rackspace Cloud and tells Ansible what nodes you have to manage. You might want to use this even if you are spinning up Ansible via other tools, including the Rackspace Cloud user interface. The inventory plugin can be used to group resources by metadata, region, OS, etc. Utilizing metadata is highly recommended in "rax" and can provide an easy way to sort between host groups and roles. If you don't want to use the ``rax.py`` dynamic inventory script, you could also still choose to manually manage your INI inventory file, though this is less recommended. -In Ansible it is quite possible to use multiple dynamic inventory plugins along with INI file data. Just put them in a common -directory and be sure the scripts are chmod +x, and the INI-based ones are not. +In Ansible it is quite possible to use multiple dynamic inventory plugins along with INI file data. Just put them in a common directory and be sure the scripts are chmod +x, and the INI-based ones are not. .. _raxpy: rax.py ++++++ -To use the rackspace dynamic inventory script, copy ``rax.py`` from ``plugins/inventory`` into your inventory directory and make it executable. You can specify credentials for ``rax.py`` utilizing the ``RAX_CREDS_FILE`` environment variable. +To use the rackspace dynamic inventory script, copy ``rax.py`` into your inventory directory and make it executable. You can specify a credentails file for ``rax.py`` utilizing the ``RAX_CREDS_FILE`` environment variable. + +.. note:: Dynamic inventory scripts (like ``rax.py``) are saved in ``/usr/share/ansible/inventory`` if Ansible has been installed globally. If installed to a virtualenv, the inventory scripts are installed to ``$VIRTUALENV/share/inventory``. .. note:: Users of :doc:`tower` will note that dynamic inventory is natively supported by Tower, and all you have to do is associate a group with your Rackspace Cloud credentials, and it will easily synchronize without going through these steps:: $ RAX_CREDS_FILE=~/.raxpub ansible all -i rax.py -m setup -``rax.py`` also accepts a ``RAX_REGION`` environment variable, which can contain an individual region, or a -comma separated list of regions. +``rax.py`` also accepts a ``RAX_REGION`` environment variable, which can contain an individual region, or a comma separated list of regions. When using ``rax.py``, you will not have a 'localhost' defined in the inventory. -As mentioned previously, you will often be running most of these modules outside of the host loop, -and will need 'localhost' defined. The recommended way to do this, would be to create an ``inventory`` directory, -and place both the ``rax.py`` script and a file containing ``localhost`` in it. +As mentioned previously, you will often be running most of these modules outside of the host loop, and will need 'localhost' defined. The recommended way to do this, would be to create an ``inventory`` directory, and place both the ``rax.py`` script and a file containing ``localhost`` in it. Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead of an individual file, will cause ansible to evaluate each file in that directory for inventory. @@ -295,8 +284,7 @@ following information, which will be utilized for inventory and variables. Standard Inventory ++++++++++++++++++ -When utilizing a standard ini formatted inventory file (as opposed to the inventory plugin), -it may still be adventageous to retrieve discoverable hostvar information from the Rackspace API. +When utilizing a standard ini formatted inventory file (as opposed to the inventory plugin), it may still be adventageous to retrieve discoverable hostvar information from the Rackspace API. This can be achieved with the ``rax_facts`` module and an inventory file similar to the following: @@ -587,9 +575,16 @@ and less information has to be shared with remote hosts. .. _pending_information: -Pending Information -``````````````````` +Orchestration in the Rackspace Cloud +++++++++++++++++++++++++++++++++++++ + +Ansible is a powerful orchestration tool, and rax modules allow you the opportunity to orchestrate complex tasks, deployments, and configurations. The key here is to automate provisioning of infrastructure, like any other pice of software in an environment. Complex deployments might have previously required manaul manipulation of load balancers, or manual provisioning of servers. Utilizing the rax modules included with Ansible, one can make the deployment of additioanl nodes contingent on the current number of running nodes, or the configuration of a clustered applicaiton dependent on the number of nodes with common metadata. One could automate the following scenarios, for example: + +* Servers that are removed from a Cloud Load Balancer one-by-one, updated, verified, and returned to the load balancer pool +* Expansion of an already-online environment, where nodes are provisioned, bootstrapped, configured, and software installed +* A procedure where app log files are uploaded to a central location, like Cloud Files, before a node is decommissioned +* Servers and load balancers that have DNS receords created and destroyed on creation and decomissioning, respectively + -More to come! From 73c883c1220d906686f26a0e00a5e00af120fce0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 5 Mar 2014 14:25:24 -0600 Subject: [PATCH 129/772] Keep track of role dependencies across plays Also fixes a bug in which tags specified on top-level roles were not being passed down to dependent roles. Fixes #4656 --- CHANGELOG.md | 1 + lib/ansible/playbook/__init__.py | 9 ++++++++- lib/ansible/playbook/play.py | 18 ++++++++++++++++-- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60330740156..35934bc541a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Ansible Changes By Release Major features/changes: * The deprecated legacy variable templating system has been finally removed. Use {{ foo }} always not $foo or ${foo}. +* Role dependencies are now tracked across multiple plays, making common roles easier to include in dependencies without any special variable tricks. New Modules: diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 65965526251..3fd84239fb7 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -240,13 +240,20 @@ class PlayBook(object): plays = [] matched_tags_all = set() unmatched_tags_all = set() + included_roles = [] # loop through all patterns and run them self.callbacks.on_start() for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs): - play = Play(self, play_ds, play_basedir, vault_password=self.vault_password) + play = Play(self, play_ds, play_basedir, included_roles=included_roles, vault_password=self.vault_password) assert play is not None + # add any new roles brought in by this play to the + # global list of roles we're tracking + for role in play.included_roles: + if role not in included_roles: + included_roles.append(role) + matched_tags, unmatched_tags = play.compare_tags(self.only_tags) matched_tags_all = matched_tags_all | matched_tags unmatched_tags_all = unmatched_tags_all | unmatched_tags diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index e9f00e47024..b9f740f2be0 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -49,7 +49,7 @@ class Play(object): # ************************************************* - def __init__(self, playbook, ds, basedir, vault_password=None): + def __init__(self, playbook, ds, basedir, included_roles=[], vault_password=None): ''' constructor loads from a play datastructure ''' for x in ds.keys(): @@ -81,7 +81,7 @@ class Play(object): self._update_vars_files_for_host(None) # now we load the roles into the datastructure - self.included_roles = [] + self.included_roles = included_roles ds = self._load_roles(self.roles, ds) # and finally re-process the vars files as they may have @@ -227,6 +227,20 @@ class Play(object): if meta_data: allow_dupes = utils.boolean(meta_data.get('allow_duplicates','')) + # if any tags were specified as role/dep variables, merge + # them into the passed_vars so they're passed on to any + # further dependencies too, and so we only have one place + # (passed_vars) to look for tags going forward + def __merge_tags(var_obj): + old_tags = passed_vars.get('tags', []) + new_tags = var_obj.get('tags', []) + if isinstance(new_tags, basestring): + new_tags = [new_tags, ] + return list(set(old_tags + new_tags)) + + passed_vars['tags'] = __merge_tags(role_vars) + passed_vars['tags'] = __merge_tags(dep_vars) + # if tags are set from this role, merge them # into the tags list for the dependent role if "tags" in passed_vars: From b41d8106ff2dc89463bc2ef60f921bba87c3f0b6 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 5 Mar 2014 14:49:30 -0600 Subject: [PATCH 130/772] Revert "Merge pull request #6287 from risaacson/fix_hash_in_var" This reverts commit a80828745167b38065bbc3b14d615f1c3c225e5d, reversing changes made to 6129ea7566ffd8113b2038076d4d1550cd6383a4. --- lib/ansible/inventory/ini.py | 54 +++---- lib/ansible/utils/__init__.py | 34 ---- test/units/TestInventory.py | 149 ++++++++---------- .../inventory_test_data/inventory_dir/0hosts | 2 +- .../inventory_dir/3comments | 5 +- 5 files changed, 92 insertions(+), 152 deletions(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 29929672bb0..c50fae61164 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -23,12 +23,10 @@ from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range from ansible import errors -import ansible.utils as utils import shlex import re import ast - class InventoryParser(object): """ Host inventory for ansible. @@ -49,6 +47,7 @@ class InventoryParser(object): self._parse_group_variables() return self.groups + # [webservers] # alpha # beta:2345 @@ -66,16 +65,9 @@ class InventoryParser(object): active_group_name = 'ungrouped' for line in self.lines: - - # Split off any comments that are not contained in a variable. - if "#" in line: - line = utils.split_unquoted_hash(line) - - # Clean up the end of the line. - line = line.strip() - + line = line.split("#")[0].strip() if line.startswith("[") and line.endswith("]"): - active_group_name = line.replace("[", "").replace("]", "") + active_group_name = line.replace("[","").replace("]","") if line.find(":vars") != -1 or line.find(":children") != -1: active_group_name = active_group_name.rsplit(":", 1)[0] if active_group_name not in self.groups: @@ -103,18 +95,20 @@ class InventoryParser(object): if hostname.count(".") == 1: (hostname, port) = hostname.rsplit(".", 1) elif (hostname.find("[") != -1 and - hostname.find("]") != -1 and - hostname.find(":") != -1 and - (hostname.rindex("]") < hostname.rindex(":")) or - (hostname.find("]") == -1 and hostname.find(":") != -1)): - (hostname, port) = hostname.rsplit(":", 1) + hostname.find("]") != -1 and + hostname.find(":") != -1 and + (hostname.rindex("]") < hostname.rindex(":")) or + (hostname.find("]") == -1 and hostname.find(":") != -1)): + (hostname, port) = hostname.rsplit(":", 1) + hostnames = [] if detect_range(hostname): hostnames = expand_hostname_range(hostname) else: hostnames = [hostname] for hn in hostnames: + host = None if hn in self.hosts: host = self.hosts[hn] else: @@ -125,24 +119,15 @@ class InventoryParser(object): if t.startswith('#'): break try: - (k, v) = t.split("=", 1) + (k,v) = t.split("=", 1) except ValueError, e: raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) - # I am not sure where a variable with a hash needs to be evaluated via ast. - # If an instance comes up this is the condition we need to modify. - if "#" in v: - host.set_variable(k, v) - else: - try: - host.set_variable(k, ast.literal_eval(v)) - # Using explicit exceptions. - # Likely a string that literal_eval does not like. We wil then just set it. - except ValueError: - # For some reason this was thought to be malformed. - host.set_variable(k, v) - except SyntaxError: - # Is this a hash with an equals at the end? - host.set_variable(k, v) + try: + host.set_variable(k,ast.literal_eval(v)) + except: + # most likely a string that literal_eval + # doesn't like, so just set it + host.set_variable(k,v) self.groups[active_group_name].add_host(host) # [southeast:children] @@ -157,7 +142,7 @@ class InventoryParser(object): if line is None or line == '': continue if line.startswith("[") and line.find(":children]") != -1: - line = line.replace("[", "").replace(":children]", "") + line = line.replace("[","").replace(":children]","") group = self.groups.get(line, None) if group is None: group = self.groups[line] = Group(name=line) @@ -172,6 +157,7 @@ class InventoryParser(object): else: group.add_child_group(kid_group) + # [webservers:vars] # http_port=1234 # maxRequestsPerChild=200 @@ -181,7 +167,7 @@ class InventoryParser(object): for line in self.lines: line = line.strip() if line.startswith("[") and line.find(":vars]") != -1: - line = line.replace("[", "").replace(":vars]", "") + line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: raise errors.AnsibleError("can't add vars to undefined group: %s" % line) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 996b457cb17..02148faff0c 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1071,37 +1071,3 @@ def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS): password.append(new_char) return ''.join(password) - - -def split_unquoted_hash(line): - ''' - Carve off comments from a line which are not contained in quotes and a part of an assignment. - ''' - - # We would really like to have this using a regex to make it less code. For instance: - # line = re.split('(? 0: - marker = 0 - while marker < instances: - if ("=\"" in split_line[marker] and "\"" in split_line[marker + 1]) or ( - "='" in split_line[marker] and "'" in split_line[marker + 1]): - marker += 1 - else: - if marker == 0: - line = split_line[marker] - else: - # We have multiple fragments that we need to combine back together. - # rekram is us reversing that work we did with marker. - rekram = 0 - new_line = split_line[rekram] - while marker > rekram: - rekram += 1 - new_line = new_line + "#" + split_line[rekram] - line = new_line - break - return line \ No newline at end of file diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index 2f7b47989ac..2ae6256e62b 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -5,7 +5,6 @@ from nose.tools import raises from ansible import errors from ansible.inventory import Inventory - class TestInventory(unittest.TestCase): def setUp(self): @@ -50,14 +49,14 @@ class TestInventory(unittest.TestCase): def dir_inventory(self): return Inventory(self.inventory_dir) - all_simple_hosts = ['jupiter', 'saturn', 'zeus', 'hera', - 'cerberus001', 'cerberus002', 'cerberus003', - 'cottus99', 'cottus100', - 'poseidon', 'thor', 'odin', 'loki', - 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', - 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5', - 'Hotep-a', 'Hotep-b', 'Hotep-c', - 'BastC', 'BastD', 'neptun', ] + all_simple_hosts=['jupiter', 'saturn', 'zeus', 'hera', + 'cerberus001','cerberus002','cerberus003', + 'cottus99', 'cottus100', + 'poseidon', 'thor', 'odin', 'loki', + 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', + 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5', + 'Hotep-a', 'Hotep-b', 'Hotep-c', + 'BastC', 'BastD', 'neptun', ] ##################################### ### Empty inventory format tests @@ -94,36 +93,36 @@ class TestInventory(unittest.TestCase): inventory = self.simple_inventory() hosts = inventory.list_hosts("norse") - expected_hosts = ['thor', 'odin', 'loki'] + expected_hosts=['thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_simple_ungrouped(self): inventory = self.simple_inventory() hosts = inventory.list_hosts("ungrouped") - expected_hosts = ['jupiter', 'saturn', - 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', - 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5'] + expected_hosts=['jupiter', 'saturn', + 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', + 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5'] assert sorted(hosts) == sorted(expected_hosts) def test_simple_combined(self): inventory = self.simple_inventory() hosts = inventory.list_hosts("norse:greek") - expected_hosts = ['zeus', 'hera', 'poseidon', - 'cerberus001', 'cerberus002', 'cerberus003', - 'cottus99', 'cottus100', - 'thor', 'odin', 'loki'] + expected_hosts=['zeus', 'hera', 'poseidon', + 'cerberus001','cerberus002','cerberus003', + 'cottus99','cottus100', + 'thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_simple_restrict(self): inventory = self.simple_inventory() restricted_hosts = ['hera', 'poseidon', 'thor'] - expected_hosts = ['zeus', 'hera', 'poseidon', - 'cerberus001', 'cerberus002', 'cerberus003', - 'cottus99', 'cottus100', - 'thor', 'odin', 'loki'] + expected_hosts=['zeus', 'hera', 'poseidon', + 'cerberus001','cerberus002','cerberus003', + 'cottus99', 'cottus100', + 'thor', 'odin', 'loki'] inventory.restrict_to(restricted_hosts) hosts = inventory.list_hosts("norse:greek") @@ -138,12 +137,12 @@ class TestInventory(unittest.TestCase): def test_simple_string_ipv4(self): inventory = Inventory('127.0.0.1,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['127.0.0.1', '192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1'])) def test_simple_string_ipv4_port(self): inventory = Inventory('127.0.0.1:2222,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['127.0.0.1', '192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1'])) def test_simple_string_ipv4_vars(self): inventory = Inventory('127.0.0.1:2222,192.168.1.1') @@ -153,12 +152,12 @@ class TestInventory(unittest.TestCase): def test_simple_string_ipv6(self): inventory = Inventory('FE80:EF45::12:1,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1', '192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1'])) def test_simple_string_ipv6_port(self): inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1', '192.168.1.1'])) + self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1'])) def test_simple_string_ipv6_vars(self): inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1') @@ -168,12 +167,12 @@ class TestInventory(unittest.TestCase): def test_simple_string_fqdn(self): inventory = Inventory('foo.example.com,bar.example.com') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['foo.example.com', 'bar.example.com'])) + self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com'])) def test_simple_string_fqdn_port(self): inventory = Inventory('foo.example.com:2222,bar.example.com') hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted(['foo.example.com', 'bar.example.com'])) + self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com'])) def test_simple_string_fqdn_vars(self): inventory = Inventory('foo.example.com:2222,bar.example.com') @@ -192,26 +191,26 @@ class TestInventory(unittest.TestCase): inventory = self.simple_inventory() vars = inventory.get_variables('hera') - expected = {'ansible_ssh_port': 3000, - 'group_names': ['greek'], - 'inventory_hostname': 'hera', - 'inventory_hostname_short': 'hera'} + expected = { 'ansible_ssh_port': 3000, + 'group_names': ['greek'], + 'inventory_hostname': 'hera', + 'inventory_hostname_short': 'hera' } assert vars == expected def test_large_range(self): inventory = self.large_range_inventory() hosts = inventory.list_hosts() - self.assertEqual(sorted(hosts), sorted('bob%03i' % i for i in range(0, 143))) + self.assertEqual(sorted(hosts), sorted('bob%03i' %i for i in range(0, 143))) def test_subset(self): inventory = self.simple_inventory() inventory.subset('odin;thor,loki') - self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor', 'odin', 'loki'])) + self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin','loki'])) def test_subset_range(self): inventory = self.simple_inventory() inventory.subset('greek[0-2];norse[0]') - self.assertEqual(sorted(inventory.list_hosts()), sorted(['zeus', 'hera', 'thor'])) + self.assertEqual(sorted(inventory.list_hosts()), sorted(['zeus','hera','thor'])) def test_subet_range_empty_group(self): inventory = self.simple_inventory() @@ -221,11 +220,11 @@ class TestInventory(unittest.TestCase): def test_subset_filename(self): inventory = self.simple_inventory() inventory.subset('@' + os.path.join(self.test_dir, 'restrict_pattern')) - self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor', 'odin'])) + self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin'])) @raises(errors.AnsibleError) def testinvalid_entry(self): - Inventory('1234') + Inventory('1234') ################################################### ### INI file advanced tests @@ -241,7 +240,7 @@ class TestInventory(unittest.TestCase): g=' g ', h=' h ', i="' i \"", j='" j', rga='1', rgb='2', rgc='3', inventory_hostname='rtp_a', inventory_hostname_short='rtp_a', - group_names=['eastcoast', 'nc', 'redundantgroup', 'redundantgroup2', 'redundantgroup3', 'rtp', 'us'] + group_names=[ 'eastcoast', 'nc', 'redundantgroup', 'redundantgroup2', 'redundantgroup3', 'rtp', 'us' ] ) print vars print expected @@ -250,9 +249,9 @@ class TestInventory(unittest.TestCase): def test_complex_group_names(self): inventory = self.complex_inventory() tests = { - 'host1': ['role1', 'role3'], - 'host2': ['role1', 'role2'], - 'host3': ['role2', 'role3'] + 'host1': [ 'role1', 'role3' ], + 'host2': [ 'role1', 'role2' ], + 'host3': [ 'role2', 'role3' ] } for host, roles in tests.iteritems(): group_names = inventory.get_variables(host)['group_names'] @@ -276,10 +275,11 @@ class TestInventory(unittest.TestCase): def test_complex_enumeration(self): + expected1 = ['rtp_b'] expected2 = ['rtp_a', 'rtp_b'] expected3 = ['rtp_a', 'rtp_b', 'rtp_c', 'tri_a', 'tri_b', 'tri_c'] - expected4 = ['rtp_b', 'orlando'] + expected4 = ['rtp_b', 'orlando' ] expected5 = ['blade-a-1'] inventory = self.complex_inventory() @@ -303,34 +303,34 @@ class TestInventory(unittest.TestCase): @raises(errors.AnsibleError) def test_invalid_range(self): - Inventory(os.path.join(self.test_dir, 'inventory', 'test_incorrect_range')) + Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_range')) @raises(errors.AnsibleError) def test_missing_end(self): - Inventory(os.path.join(self.test_dir, 'inventory', 'test_missing_end')) + Inventory(os.path.join(self.test_dir, 'inventory','test_missing_end')) @raises(errors.AnsibleError) def test_incorrect_format(self): - Inventory(os.path.join(self.test_dir, 'inventory', 'test_incorrect_format')) + Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_format')) @raises(errors.AnsibleError) def test_alpha_end_before_beg(self): - Inventory(os.path.join(self.test_dir, 'inventory', 'test_alpha_end_before_beg')) + Inventory(os.path.join(self.test_dir, 'inventory','test_alpha_end_before_beg')) def test_combined_range(self): - i = Inventory(os.path.join(self.test_dir, 'inventory', 'test_combined_range')) + i = Inventory(os.path.join(self.test_dir, 'inventory','test_combined_range')) hosts = i.list_hosts('test') - expected_hosts = ['host1A', 'host2A', 'host1B', 'host2B'] + expected_hosts=['host1A','host2A','host1B','host2B'] assert sorted(hosts) == sorted(expected_hosts) def test_leading_range(self): - i = Inventory(os.path.join(self.test_dir, 'inventory', 'test_leading_range')) + i = Inventory(os.path.join(self.test_dir, 'inventory','test_leading_range')) hosts = i.list_hosts('test') - expected_hosts = ['1.host', '2.host', 'A.host', 'B.host'] + expected_hosts=['1.host','2.host','A.host','B.host'] assert sorted(hosts) == sorted(expected_hosts) hosts2 = i.list_hosts('test2') - expected_hosts2 = ['1.host', '2.host', '3.host'] + expected_hosts2=['1.host','2.host','3.host'] assert sorted(hosts2) == sorted(expected_hosts2) ################################################### @@ -340,38 +340,38 @@ class TestInventory(unittest.TestCase): inventory = self.script_inventory() hosts = inventory.list_hosts() - expected_hosts = ['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] - print "Expected: %s" % expected_hosts - print "Got : %s" % hosts + print "Expected: %s"%(expected_hosts) + print "Got : %s"%(hosts) assert sorted(hosts) == sorted(expected_hosts) def test_script_all(self): inventory = self.script_inventory() hosts = inventory.list_hosts('all') - expected_hosts = ['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_script_norse(self): inventory = self.script_inventory() hosts = inventory.list_hosts("norse") - expected_hosts = ['thor', 'odin', 'loki'] + expected_hosts=['thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_script_combined(self): inventory = self.script_inventory() hosts = inventory.list_hosts("norse:greek") - expected_hosts = ['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] assert sorted(hosts) == sorted(expected_hosts) def test_script_restrict(self): inventory = self.script_inventory() restricted_hosts = ['hera', 'poseidon', 'thor'] - expected_hosts = ['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] + expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki'] inventory.restrict_to(restricted_hosts) hosts = inventory.list_hosts("norse:greek") @@ -389,7 +389,7 @@ class TestInventory(unittest.TestCase): print "VARS=%s" % vars - assert vars == {'hammer': True, + assert vars == {'hammer':True, 'group_names': ['norse'], 'inventory_hostname': 'thor', 'inventory_hostname_short': 'thor'} @@ -417,24 +417,15 @@ class TestInventory(unittest.TestCase): auth = inventory.get_variables('neptun')['auth'] assert auth == 'YWRtaW46YWRtaW4=' - def test_dir_inventory(self): - inventory = self.dir_inventory() - - host_vars = inventory.get_variables('zeus') - - expected_vars = {'inventory_hostname': 'zeus', - 'inventory_hostname_short': 'zeus', - 'group_names': ['greek', 'major-god', 'ungrouped'], - 'var_a': '2#3'} - - print "HOST VARS=%s" % host_vars - print "EXPECTED VARS=%s" % expected_vars - - assert host_vars == expected_vars - - def test_dir_inventory_multiple_groups(self): - inventory = self.dir_inventory() - group_greek = inventory.get_hosts('greek') - actual_host_names = [host.name for host in group_greek] - print "greek : %s " % actual_host_names - assert actual_host_names == ['zeus', 'morpheus'] + # test disabled as needs to be updated to model desired behavior + # + #def test_dir_inventory(self): + # inventory = self.dir_inventory() + # vars = inventory.get_variables('zeus') + # + # print "VARS=%s" % vars + # + # assert vars == {'inventory_hostname': 'zeus', + # 'inventory_hostname_short': 'zeus', + # 'group_names': ['greek', 'major-god', 'ungrouped'], + # 'var_a': '1#2'} diff --git a/test/units/inventory_test_data/inventory_dir/0hosts b/test/units/inventory_test_data/inventory_dir/0hosts index 6f78a33a228..27fc46e8530 100644 --- a/test/units/inventory_test_data/inventory_dir/0hosts +++ b/test/units/inventory_test_data/inventory_dir/0hosts @@ -1,3 +1,3 @@ -zeus var_a=0 +zeus var_a=2 morpheus thor diff --git a/test/units/inventory_test_data/inventory_dir/3comments b/test/units/inventory_test_data/inventory_dir/3comments index f18577fc7bd..74642f13cc7 100644 --- a/test/units/inventory_test_data/inventory_dir/3comments +++ b/test/units/inventory_test_data/inventory_dir/3comments @@ -1,8 +1,5 @@ -[titan] -cronus var_a="a#b" var_b="b#c" var_c="c#d" # Is this overkill? - [major-god] # group with inline comments -zeus var_a="2#3" # host with inline comments and "#" in the var string +zeus var_a="1#2" # host with inline comments and "#" in the var string # A comment thor From c09e0211f71b968634c8676509fa39d79670afc3 Mon Sep 17 00:00:00 2001 From: Harsha Yalamanchili Date: Wed, 5 Mar 2014 14:02:16 -0800 Subject: [PATCH 131/772] Update debian install documentation to include dependency resolution --- packaging/debian/README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index efd8677f400..bbca98cdb8b 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -11,4 +11,9 @@ To create an Ansible DEB package: The debian package file will be placed in the `../` directory. This can then be added to an APT repository or installed with `dpkg -i `. -Note that `dpkg -i` does not resolve dependencies +Note that `dpkg -i` does not resolve dependencies. + +To install the Ansbiel DEB package and resolve redepdencies: + + sudo dpkg -i + sudo apt-get -fy install \ No newline at end of file From 063dc5e971ad71f7f22dd60dd81aab8070b86a1c Mon Sep 17 00:00:00 2001 From: Harsha Yalamanchili Date: Wed, 5 Mar 2014 14:04:36 -0800 Subject: [PATCH 132/772] Update debian install documentation to include dependency resolution --- packaging/debian/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index bbca98cdb8b..b328a07b640 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -13,7 +13,7 @@ The debian package file will be placed in the `../` directory. This can then be Note that `dpkg -i` does not resolve dependencies. -To install the Ansbiel DEB package and resolve redepdencies: +To install the Ansible DEB package and resolve redepdencies: sudo dpkg -i sudo apt-get -fy install \ No newline at end of file From 4e8ed92130528dcecbd590f29597ff449f605656 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 5 Mar 2014 18:49:54 -0500 Subject: [PATCH 133/772] Fixes #6077 decode escaped newline characters in content for the copy module --- lib/ansible/runner/action_plugins/copy.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 0ee9b6f3ced..79acdaba587 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -54,6 +54,12 @@ class ActionModule(object): raw = utils.boolean(options.get('raw', 'no')) force = utils.boolean(options.get('force', 'yes')) + # content with newlines is going to be escaped to safely load in yaml + # now we need to unescape it so that the newlines are evaluated properly + # when writing the file to disk + if content: + content = content.decode('unicode-escape') + if (source is None and content is None and not 'first_available_file' in inject) or dest is None: result=dict(failed=True, msg="src (or content) and dest are required") return ReturnData(conn=conn, result=result) From 23a0468a4b1078bc94f1b68f6993755c7b48ed3d Mon Sep 17 00:00:00 2001 From: aresch Date: Wed, 5 Mar 2014 16:25:42 -0800 Subject: [PATCH 134/772] Fix respecting remote_tmp when sudo is used --- lib/ansible/runner/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 7bbc9e372e1..c09670970b1 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -986,11 +986,11 @@ class Runner(object): basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile) - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su != 'root') and basetmp.startswith('$HOME'): + if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root') and basetmp.startswith('$HOME'): basetmp = os.path.join('/tmp', basefile) cmd = 'mkdir -p %s' % basetmp - if self.remote_user != 'root' or ((self.sudo or self.su) and (self.sudo_user != 'root' or self.su != 'root')): + if self.remote_user != 'root' or ((self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root')): cmd += ' && chmod a+rx %s' % basetmp cmd += ' && echo %s' % basetmp From 94a7fb60fe580bc72078ed07edfb591df06b94c1 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 5 Mar 2014 20:10:25 -0500 Subject: [PATCH 135/772] Allow escaped comments in inventory files. --- lib/ansible/inventory/ini.py | 3 ++- lib/ansible/utils/__init__.py | 9 +++++++++ test/units/TestUtils.py | 23 +++++++++++++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index c50fae61164..024eb9a9a0e 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -23,6 +23,7 @@ from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range from ansible import errors +from ansible import utils import shlex import re import ast @@ -65,7 +66,7 @@ class InventoryParser(object): active_group_name = 'ungrouped' for line in self.lines: - line = line.split("#")[0].strip() + line = utils.before_comment(line).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if line.find(":vars") != -1 or line.find(":children") != -1: diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 02148faff0c..c3e777e4d65 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1071,3 +1071,12 @@ def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS): password.append(new_char) return ''.join(password) + +def before_comment(msg): + ''' what's the part of a string before a comment? ''' + msg = msg.replace("\#","**NOT_A_COMMENT**") + msg = msg.split("#")[0] + msg = msg.replace("**NOT_A_COMMENT**","#") + return msg + + diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 4bddb4748ba..28e0dfc0cd2 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -16,6 +16,29 @@ sys.setdefaultencoding("utf8") class TestUtils(unittest.TestCase): + def test_before_comment(self): + ''' see if we can detect the part of a string before a comment. Used by INI parser in inventory ''' + + input = "before # comment" + expected = "before " + actual = ansible.utils.before_comment(input) + assert expected == actual + + input = "before \# not a comment" + expected = "before # not a comment" + actual = ansible.utils.before_comment(input) + assert expected == actual + + input = "" + expected = "" + actual = ansible.utils.before_comment(input) + assert expected == actual + + input = "#" + expected = "" + actual = ansible.utils.before_comment(input) + assert expected == actual + ##################################### ### check_conditional tests From 25cf215f5498d203ea71572e1642f8b3ad5621f0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 5 Mar 2014 22:06:59 -0600 Subject: [PATCH 136/772] Create the tempdir in the accelerate module if it doesn't exist Fixes #6047 --- library/utilities/accelerate | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/library/utilities/accelerate b/library/utilities/accelerate index a6e84e32376..6508f1433ea 100644 --- a/library/utilities/accelerate +++ b/library/utilities/accelerate @@ -391,7 +391,13 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): final_path = None if 'user' in data and data.get('user') != getpass.getuser(): vv("the target user doesn't match this user, we'll move the file into place via sudo") - (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=os.path.expanduser('~/.ansible/tmp/')) + tmp_path = os.path.expanduser('~/.ansible/tmp/') + if not os.path.exists(tmp_path): + try: + os.makedirs(tmp_path, 0700) + except: + return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path) + (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path) out_fd = os.fdopen(fd, 'w', 0) final_path = data['out_path'] else: From 1d731e5c3064fe29e840c6b4afa56af415419423 Mon Sep 17 00:00:00 2001 From: amree Date: Thu, 6 Mar 2014 12:19:54 +0800 Subject: [PATCH 137/772] MASTER_PORT variable for CHANGE MASTER TO command can only accept integer value --- library/database/mysql_replication | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/database/mysql_replication b/library/database/mysql_replication index f18060e9556..fdbb379371a 100644 --- a/library/database/mysql_replication +++ b/library/database/mysql_replication @@ -325,7 +325,7 @@ def main(): if master_password: chm.append("MASTER_PASSWORD='" + master_password + "'") if master_port: - chm.append("MASTER_PORT='" + master_port + "'") + chm.append("MASTER_PORT=" + master_port) if master_connect_retry: chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'") if master_log_file: From b9a7352e0a8e2cd8a72e29a7cc9a800bc83155c8 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 5 Feb 2014 21:11:06 +1000 Subject: [PATCH 138/772] Work to allow security tokens and profiles to work with Ansible Allow security tokens and profiles to be used as arguments to the 'common' ec2 modules Mostly refactoring to provide two new methods, `get_aws_connection_info`, which results in a dict that can be passed through to the boto `connect_to_region` calls, and `connect_to_aws` that can pass that dict through to the `connect_to_region` method of the appropriate module. Tidied up some variable names Works around boto/boto#2100 profiles don't work with boto < 2.24, but this detects for that and fails with an appropriate message. It is designed to work if profile is not passed but boto < 2.24 is installed. Modifications to allow empty aws auth variables to be passed (this is useful if wanting to have the keys as an optional parameter in ec2 calls - if set, use this value, if not set, use boto config or env variables) Reworked validate_certs improvements to work with refactoring Added documentation for profile and security_token to affected modules --- lib/ansible/module_utils/ec2.py | 113 +++++++++++++++++++++++--------- library/cloud/ec2 | 14 ++++ library/cloud/ec2_ami | 14 ++++ library/cloud/ec2_eip | 14 ++++ library/cloud/ec2_group | 14 ++++ library/cloud/ec2_key | 14 ++++ library/cloud/ec2_snapshot | 19 +++++- library/cloud/ec2_tag | 14 ++++ library/cloud/ec2_vol | 14 ++++ 9 files changed, 198 insertions(+), 32 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 9156df766b2..ab6c1d27e9d 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -14,33 +14,44 @@ AWS_REGIONS = ['ap-northeast-1', 'us-west-2'] -def ec2_argument_keys_spec(): +def aws_common_argument_spec(): return dict( + ec2_url=dict(), aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), + validate_certs=dict(default=True, type='bool'), + security_token=dict(no_log=True), + profile=dict(), ) + return spec def ec2_argument_spec(): - spec = ec2_argument_keys_spec() + spec = aws_common_argument_spec() spec.update( dict( region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - validate_certs=dict(default=True, type='bool'), - ec2_url=dict(), ) ) return spec -def get_ec2_creds(module): +def boto_supports_profile_name(): + return hasattr(boto.ec2.EC2Connection, 'profile_name') + + +def get_aws_connection_info(module): # Check module args for credentials, then check environment vars + # access_key ec2_url = module.params.get('ec2_url') - ec2_secret_key = module.params.get('aws_secret_key') - ec2_access_key = module.params.get('aws_access_key') + access_key = module.params.get('aws_access_key') + secret_key = module.params.get('aws_secret_key') + security_token = module.params.get('security_token') region = module.params.get('region') + profile_name = module.params.get('profile') + validate_certs = module.params.get('validate_certs') if not ec2_url: if 'EC2_URL' in os.environ: @@ -48,21 +59,27 @@ def get_ec2_creds(module): elif 'AWS_URL' in os.environ: ec2_url = os.environ['AWS_URL'] - if not ec2_access_key: + if not access_key: if 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] + access_key = os.environ['EC2_ACCESS_KEY'] elif 'AWS_ACCESS_KEY_ID' in os.environ: - ec2_access_key = os.environ['AWS_ACCESS_KEY_ID'] + access_key = os.environ['AWS_ACCESS_KEY_ID'] elif 'AWS_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['AWS_ACCESS_KEY'] + access_key = os.environ['AWS_ACCESS_KEY'] + else: + # in case access_key came in as empty string + access_key = None - if not ec2_secret_key: + if not secret_key: if 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] + secret_key = os.environ['EC2_SECRET_KEY'] elif 'AWS_SECRET_ACCESS_KEY' in os.environ: - ec2_secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] + secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] elif 'AWS_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['AWS_SECRET_KEY'] + secret_key = os.environ['AWS_SECRET_KEY'] + else: + # in case secret_key came in as empty string + secret_key = None if not region: if 'EC2_REGION' in os.environ: @@ -71,39 +88,75 @@ def get_ec2_creds(module): region = os.environ['AWS_REGION'] else: # boto.config.get returns None if config not found - region = boto.config.get('Boto', 'aws_region') + region = boto.config.get('Boto', 'aws_region') if not region: region = boto.config.get('Boto', 'ec2_region') - return ec2_url, ec2_access_key, ec2_secret_key, region + if not security_token: + if 'AWS_SECURITY_TOKEN' in os.environ: + security_token = os.environ['AWS_SECURITY_TOKEN'] + else: + # in case security_token came in as empty string + security_token = None + + boto_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + security_token=security_token) + + # profile_name only works as a key in boto >= 2.24 + # so only set profile_name if passed as an argument + if profile_name: + if not boto_supports_profile_name(): + module.fail_json("boto does not support profile_name before 2.24") + boto_params['profile_name'] = profile_name + + if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): + boto_params['validate_certs'] = validate_certs + + return region, ec2_url, boto_params + + +def get_ec2_creds(module): + ''' for compatibility mode with old modules that don't/can't yet + use ec2_connect method ''' + region, ec2_url, boto_params = get_aws_connection_info(module) + return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region + + +def boto_fix_security_token_in_profile(conn, profile_name): + ''' monkey patch for boto issue boto/boto#2100 ''' + profile = 'profile ' + profile_name + if boto.config.has_option(profile, 'aws_security_token'): + conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token')) + return conn + + +def connect_to_aws(aws_module, region, **params): + conn = aws_module.connect_to_region(region, **params) + if params.get('profile_name'): + conn = boto_fix_security_token_in_profile(conn, params['profile_name']) + return conn def ec2_connect(module): """ Return an ec2 connection""" - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - validate_certs = module.params.get('validate_certs', True) + region, ec2_url, boto_params = get_aws_connection_info(module) # If we have a region specified, connect to its endpoint. if region: try: - if HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, validate_certs=validate_certs) - else: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) + ec2 = connect_to_aws(boto.ec2, region, **boto_params) except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) + module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: - if HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): - ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key, validate_certs=validate_certs) - else: - ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) + ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) + module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") - return ec2 + return ec2 diff --git a/library/cloud/ec2 b/library/cloud/ec2 index e050611fcf8..54fc9eea467 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -220,6 +220,20 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" requirements: [ "boto" ] author: Seth Vidal, Tim Gerla, Lester Wade diff --git a/library/cloud/ec2_ami b/library/cloud/ec2_ami index 866f2caf767..94c1e864a85 100644 --- a/library/cloud/ec2_ami +++ b/library/cloud/ec2_ami @@ -109,6 +109,20 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" requirements: [ "boto" ] author: Evan Duffield diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip index de041f42227..4d6d24eaa34 100644 --- a/library/cloud/ec2_eip +++ b/library/cloud/ec2_eip @@ -61,6 +61,20 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" requirements: [ "boto" ] author: Lorin Hochstein diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index bbbb0fc24e0..5d72c009acc 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -65,6 +65,20 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" requirements: [ "boto" ] ''' diff --git a/library/cloud/ec2_key b/library/cloud/ec2_key index 5e6950d2c8b..6523c70e95c 100644 --- a/library/cloud/ec2_key +++ b/library/cloud/ec2_key @@ -56,6 +56,20 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" requirements: [ "boto" ] author: Vincent Viallet diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index b5d9df3b525..81cf3554b3d 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -59,17 +59,32 @@ options: default: null aliases: [] instance_id: - description: + description: - instance that has a the required volume to snapshot mounted required: false default: null aliases: [] device_name: - description: + description: - device name of a mounted volume to be snapshotted required: false default: null aliases: [] + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" + requirements: [ "boto" ] author: Will Thames ''' diff --git a/library/cloud/ec2_tag b/library/cloud/ec2_tag index ca5a337646f..7e3e4776d0b 100644 --- a/library/cloud/ec2_tag +++ b/library/cloud/ec2_tag @@ -67,6 +67,20 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" requirements: [ "boto" ] author: Lester Wade diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index bdd2eae3822..fef476a2165 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -90,6 +90,20 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.5" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.5" requirements: [ "boto" ] author: Lester Wade From 4a093a970987df0e91557ac2a3f9a6bb3468a71e Mon Sep 17 00:00:00 2001 From: willthames Date: Mon, 17 Feb 2014 13:05:28 +1000 Subject: [PATCH 139/772] Make it easier to determine whether instance creation or tagging fails --- library/cloud/ec2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index e050611fcf8..f9d1a42813c 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -816,7 +816,7 @@ def create_instances(module, ec2, override_count=None): res = ec2.run_instances(**params) except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message)) instids = [ i.id for i in res.instances ] while True: @@ -834,7 +834,7 @@ def create_instances(module, ec2, override_count=None): try: ec2.create_tags(instids, instance_tags) except boto.exception.EC2ResponseError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) # wait here until the instances are up this_res = [] From f62cc1e3511c64aa5ecb741f7f4c95066431324a Mon Sep 17 00:00:00 2001 From: Sven Schliesing Date: Thu, 6 Mar 2014 13:16:29 +0100 Subject: [PATCH 140/772] Fix link in documentation c(dest) -> c(path) --- library/files/file | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/file b/library/files/file index 8e4e30a99b7..7a038c9f362 100644 --- a/library/files/file +++ b/library/files/file @@ -52,7 +52,7 @@ options: If C(link), the symbolic link will be created or changed. Use C(hard) for hardlinks. If C(absent), directories will be recursively deleted, and files or symlinks will be unlinked. If C(touch) (new in 1.4), an empty file will - be created if the c(dest) does not exist, while an existing file or + be created if the c(path) does not exist, while an existing file or directory will receive updated file access and modification times (similar to the way `touch` works from the command line). required: false From d3d5680d8f1a210b9c7f599c520e612074d3a048 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 6 Mar 2014 08:28:36 -0500 Subject: [PATCH 141/772] tags lists are properly uniqued and joined now, also avoids type issues when passed as list/set or strings Signed-off-by: Brian Coca --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index b9f740f2be0..70283d5c901 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -249,7 +249,7 @@ class Play(object): included_dep_vars = included_role_dep[2] if included_dep_name == dep: if "tags" in included_dep_vars: - included_dep_vars["tags"] = list(set(included_dep_vars["tags"] + passed_vars["tags"])) + included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"]))) else: included_dep_vars["tags"] = passed_vars["tags"][:] From cc6e1ec261b1806d8e8efc320b10a8abe4d12c20 Mon Sep 17 00:00:00 2001 From: Jimena Cabrera-Notari Date: Thu, 6 Mar 2014 13:33:53 +0000 Subject: [PATCH 142/772] Don't use keyword `error` in encode This is not supported in Python 2.6. Just use positional arguments. --- library/database/postgresql_privs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/database/postgresql_privs b/library/database/postgresql_privs index 2f3db9a93f1..de5fa94fa48 100644 --- a/library/database/postgresql_privs +++ b/library/database/postgresql_privs @@ -597,7 +597,8 @@ def main(): except psycopg2.Error, e: conn.rollback() # psycopg2 errors come in connection encoding, reencode - msg = e.message.decode(conn.encoding).encode(errors='replace') + msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(), + 'replace') module.fail_json(msg=msg) if module.check_mode: From 79809b67dc26d4a0858ea7b5fcd81f9cbcd9dc11 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 6 Mar 2014 08:43:24 -0500 Subject: [PATCH 143/772] fixed another line with same issues Signed-off-by: Brian Coca --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 70283d5c901..aaa79358825 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -236,7 +236,7 @@ class Play(object): new_tags = var_obj.get('tags', []) if isinstance(new_tags, basestring): new_tags = [new_tags, ] - return list(set(old_tags + new_tags)) + return list(set(old_tags).union(set(new_tags))) passed_vars['tags'] = __merge_tags(role_vars) passed_vars['tags'] = __merge_tags(dep_vars) From 9f2999df7963777e39334a789611eaa0f38442c3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 6 Mar 2014 09:44:56 -0600 Subject: [PATCH 144/772] Un-escape newlines in delimiters for assemble module --- lib/ansible/runner/action_plugins/assemble.py | 6 ++++++ library/files/assemble | 2 ++ 2 files changed, 8 insertions(+) diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index eb6faf5dfcf..c73964cda68 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -39,7 +39,13 @@ class ActionModule(object): for f in sorted(os.listdir(src_path)): fragment = "%s/%s" % (src_path, f) if delimit_me and delimiter: + # en-escape things like new-lines + delimiter = delimiter.decode('unicode-escape') tmp.write(delimiter) + # always make sure there's a newline after the + # delimiter, so lines don't run together + if delimiter[-1] != '\n': + tmp.write('\n') if os.path.isfile(fragment): tmp.write(file(fragment).read()) delimit_me = True diff --git a/library/files/assemble b/library/files/assemble index a8c78256e23..f4a60caf230 100644 --- a/library/files/assemble +++ b/library/files/assemble @@ -107,6 +107,8 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): continue fragment = "%s/%s" % (src_path, f) if delimit_me and delimiter: + # un-escape anything like newlines + delimiter = delimiter.decode('unicode-escape') tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together From cc5cf9ec633e4f244bfadc008f09702455f1f8d5 Mon Sep 17 00:00:00 2001 From: Harsha Yalamanchili Date: Thu, 6 Mar 2014 09:36:51 -0800 Subject: [PATCH 145/772] Typo --- packaging/debian/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index b328a07b640..9aa54060bb8 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -13,7 +13,7 @@ The debian package file will be placed in the `../` directory. This can then be Note that `dpkg -i` does not resolve dependencies. -To install the Ansible DEB package and resolve redepdencies: +To install the Ansible DEB package and resolve dependencies: sudo dpkg -i sudo apt-get -fy install \ No newline at end of file From 16fe09eef85a2c65ff46b3d9b49c1ca13507ac0c Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Thu, 6 Mar 2014 12:09:53 -0600 Subject: [PATCH 146/772] Fixes related to uncommenting test_dir_inventory in TestInventory. 0. Uncomment the test. 1. Test fails. 2. Make vars unique per file in test inventory files. 3. Modify token addition to not ast.literal_eval(v) a variable containing a hash. 4. Modify vars to have an escape in test inventory file. 5. Catch exceptions explicitly. Any unknown exceptions should be a bug. 6. Test passes. --- lib/ansible/inventory/ini.py | 22 +++++++++++----- test/units/TestInventory.py | 26 ++++++++++--------- .../inventory_test_data/inventory_dir/0hosts | 2 +- .../inventory_test_data/inventory_dir/2levels | 2 +- .../inventory_dir/3comments | 2 +- 5 files changed, 33 insertions(+), 21 deletions(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 024eb9a9a0e..718fee1338d 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -123,12 +123,22 @@ class InventoryParser(object): (k,v) = t.split("=", 1) except ValueError, e: raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) - try: - host.set_variable(k,ast.literal_eval(v)) - except: - # most likely a string that literal_eval - # doesn't like, so just set it - host.set_variable(k,v) + + # If there is a hash in the value don't pass it through to ast at ast will split at the hash. + if "#" in v: + host.set_variable(k, v) + else: + try: + host.set_variable(k,ast.literal_eval(v)) + # Using explicit exceptions. + # Likely a string that literal_eval does not like. We wil then just set it. + except ValueError: + # For some reason this was thought to be malformed. + host.set_variable(k, v) + except SyntaxError: + # Is this a hash with an equals at the end? + host.set_variable(k, v) + self.groups[active_group_name].add_host(host) # [southeast:children] diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index 2ae6256e62b..bd2f24c063b 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -417,15 +417,17 @@ class TestInventory(unittest.TestCase): auth = inventory.get_variables('neptun')['auth'] assert auth == 'YWRtaW46YWRtaW4=' - # test disabled as needs to be updated to model desired behavior - # - #def test_dir_inventory(self): - # inventory = self.dir_inventory() - # vars = inventory.get_variables('zeus') - # - # print "VARS=%s" % vars - # - # assert vars == {'inventory_hostname': 'zeus', - # 'inventory_hostname_short': 'zeus', - # 'group_names': ['greek', 'major-god', 'ungrouped'], - # 'var_a': '1#2'} + def test_dir_inventory(self): + inventory = self.dir_inventory() + + host_vars = inventory.get_variables('zeus') + + expected_vars = {'inventory_hostname': 'zeus', + 'inventory_hostname_short': 'zeus', + 'group_names': ['greek', 'major-god', 'ungrouped'], + 'var_a': '3#4'} + + print "HOST VARS=%s" % host_vars + print "EXPECTED VARS=%s" % expected_vars + + assert host_vars == expected_vars \ No newline at end of file diff --git a/test/units/inventory_test_data/inventory_dir/0hosts b/test/units/inventory_test_data/inventory_dir/0hosts index 27fc46e8530..6f78a33a228 100644 --- a/test/units/inventory_test_data/inventory_dir/0hosts +++ b/test/units/inventory_test_data/inventory_dir/0hosts @@ -1,3 +1,3 @@ -zeus var_a=2 +zeus var_a=0 morpheus thor diff --git a/test/units/inventory_test_data/inventory_dir/2levels b/test/units/inventory_test_data/inventory_dir/2levels index 22f06bcd436..363294923ef 100644 --- a/test/units/inventory_test_data/inventory_dir/2levels +++ b/test/units/inventory_test_data/inventory_dir/2levels @@ -1,5 +1,5 @@ [major-god] -zeus var_a=1 +zeus var_a=2 thor [minor-god] diff --git a/test/units/inventory_test_data/inventory_dir/3comments b/test/units/inventory_test_data/inventory_dir/3comments index 74642f13cc7..e11b5e416bd 100644 --- a/test/units/inventory_test_data/inventory_dir/3comments +++ b/test/units/inventory_test_data/inventory_dir/3comments @@ -1,5 +1,5 @@ [major-god] # group with inline comments -zeus var_a="1#2" # host with inline comments and "#" in the var string +zeus var_a="3\#4" # host with inline comments and "#" in the var string # A comment thor From 4d80019ac8c390f10405609a47a752759afa5d27 Mon Sep 17 00:00:00 2001 From: Andrew Resch Date: Thu, 6 Mar 2014 10:24:16 -0800 Subject: [PATCH 147/772] Fix logic checking for both sudo and su, and their respective users --- lib/ansible/runner/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index c09670970b1..1f2ca7f1419 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -415,7 +415,7 @@ class Runner(object): environment_string = self._compute_environment_string(inject) - if tmp.find("tmp") != -1 and (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root'): + if tmp.find("tmp") != -1 and (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # deal with possible umask issues once sudo'ed to other user cmd_chmod = "chmod a+r %s" % remote_module_path self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) @@ -444,7 +444,7 @@ class Runner(object): else: argsfile = self._transfer_str(conn, tmp, 'arguments', args) - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root'): + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # deal with possible umask issues once sudo'ed to other user cmd_args_chmod = "chmod a+r %s" % argsfile self._low_level_exec_command(conn, cmd_args_chmod, tmp, sudoable=False) @@ -486,7 +486,7 @@ class Runner(object): res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root'): + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = "rm -rf %s >/dev/null 2>&1" % tmp @@ -986,11 +986,11 @@ class Runner(object): basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile) - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root') and basetmp.startswith('$HOME'): + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root') and basetmp.startswith('$HOME'): basetmp = os.path.join('/tmp', basefile) cmd = 'mkdir -p %s' % basetmp - if self.remote_user != 'root' or ((self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root')): + if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): cmd += ' && chmod a+rx %s' % basetmp cmd += ' && echo %s' % basetmp From cd6d7ddc97fb5ae8d9e234395feba355bdbe112e Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 6 Mar 2014 13:32:00 -0500 Subject: [PATCH 148/772] Fix test_async by using env python --- test/integration/host_vars/testhost | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/host_vars/testhost b/test/integration/host_vars/testhost index facd519959b..6e1d11307f9 100644 --- a/test/integration/host_vars/testhost +++ b/test/integration/host_vars/testhost @@ -7,4 +7,4 @@ test_hash: host_vars_testhost: "this is in host_vars/testhost" # Support execution from within a virtualenv -ansible_python_interpreter: ${VIRTUAL_ENV-/usr}/bin/python +ansible_python_interpreter: '/usr/bin/env python' From 6626cb0b3d2bff379488d3bfb32e9b2499bbb11b Mon Sep 17 00:00:00 2001 From: g-k-r Date: Fri, 31 Jan 2014 12:46:00 +0100 Subject: [PATCH 149/772] add test same host in different files in dir tests issue #5749 same host defined in different groups which in turn are defined in different ini files in an inventory directory Conflicts: test/units/TestInventory.py --- test/units/TestInventory.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index bd2f24c063b..4e188cd49bf 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -430,4 +430,12 @@ class TestInventory(unittest.TestCase): print "HOST VARS=%s" % host_vars print "EXPECTED VARS=%s" % expected_vars - assert host_vars == expected_vars \ No newline at end of file + assert host_vars == expected_vars + + def test_dir_inventory_multiple_groups(self): + inventory = self.dir_inventory() + group_greek = inventory.get_group('greek') + group_major_god = inventory.get_group('major-god') + actual_host_names = [host.name for host in group_greek.get_hosts()]; + print "%s : %s " % (group_greek.name, actual_host_names) + assert actual_host_names == ['zeus','morpheus'] \ No newline at end of file From dbad5d71c6af83c8995793f036896865e7bfec41 Mon Sep 17 00:00:00 2001 From: g-k-r Date: Fri, 31 Jan 2014 12:34:21 +0100 Subject: [PATCH 150/772] modifed test to use get_hosts instead of get_groups closes #5749 Conflicts: test/units/TestInventory.py --- test/units/TestInventory.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index 4e188cd49bf..d7c27b38e00 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -434,8 +434,7 @@ class TestInventory(unittest.TestCase): def test_dir_inventory_multiple_groups(self): inventory = self.dir_inventory() - group_greek = inventory.get_group('greek') - group_major_god = inventory.get_group('major-god') - actual_host_names = [host.name for host in group_greek.get_hosts()]; - print "%s : %s " % (group_greek.name, actual_host_names) - assert actual_host_names == ['zeus','morpheus'] \ No newline at end of file + group_greek = inventory.get_hosts('greek') + actual_host_names = [host.name for host in group_greek]; + print "greek : %s " % (actual_host_names) + assert actual_host_names == ['zeus','morpheus'] From 913c855df492a509542980185fbbf0dffbba8b8c Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Thu, 6 Mar 2014 13:08:35 -0600 Subject: [PATCH 151/772] Formatting cleanup. Post merging of GH-5837 via GH-5749. --- test/units/TestInventory.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index d7c27b38e00..f8e9232c540 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -433,8 +433,8 @@ class TestInventory(unittest.TestCase): assert host_vars == expected_vars def test_dir_inventory_multiple_groups(self): - inventory = self.dir_inventory() - group_greek = inventory.get_hosts('greek') - actual_host_names = [host.name for host in group_greek]; - print "greek : %s " % (actual_host_names) - assert actual_host_names == ['zeus','morpheus'] + inventory = self.dir_inventory() + group_greek = inventory.get_hosts('greek') + actual_host_names = [host.name for host in group_greek] + print "greek : %s " % actual_host_names + assert actual_host_names == ['zeus', 'morpheus'] From 2a0028e687b704e23256bb6fbfe1316829f43540 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 6 Mar 2014 13:33:18 -0600 Subject: [PATCH 152/772] Sanitize run_command argument output Fixes #6316 --- lib/ansible/module_utils/basic.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index c2be621d4bf..540efeb4bfc 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -46,6 +46,7 @@ BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE import os import re +import pipes import shlex import subprocess import sys @@ -1017,6 +1018,30 @@ class AnsibleModule(object): if path_prefix: env['PATH']="%s:%s" % (path_prefix, env['PATH']) + # create a printable version of the command for use + # in reporting later, which strips out things like + # passwords from the args list + if isinstance(args, list): + clean_args = " ".join(pipes.quote(arg) for arg in args) + else: + clean_args = args + + # all clean strings should return two match groups, + # where the first is the CLI argument and the second + # is the password/key/phrase that will be hidden + clean_re_strings = [ + # this removes things like --password, --pass, --pass-wd, etc. + # optionally followed by an '=' or a space. The password can + # be quoted or not too, though it does not care about quotes + # that are not balanced + # source: http://blog.stevenlevithan.com/archives/match-quoted-string + r'([-]{0,2}pass[-]?(?:word|wd)?[=\s]?)((?:["\'])?(?:[^\s])*(?:\1)?)', + # TODO: add more regex checks here + ] + for re_str in clean_re_strings: + r = re.compile(re_str) + clean_args = r.sub(r'\1********', clean_args) + if data: st_in = subprocess.PIPE try: @@ -1044,12 +1069,12 @@ class AnsibleModule(object): out, err = cmd.communicate(input=data) rc = cmd.returncode except (OSError, IOError), e: - self.fail_json(rc=e.errno, msg=str(e), cmd=args) + self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args) except: - self.fail_json(rc=257, msg=traceback.format_exc(), cmd=args) + self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args) if rc != 0 and check_rc: msg = err.rstrip() - self.fail_json(cmd=args, rc=rc, stdout=out, stderr=err, msg=msg) + self.fail_json(cmd=clean_args, rc=rc, stdout=out, stderr=err, msg=msg) return (rc, out, err) def pretty_bytes(self,size): From 85bc14a8eb2d33b87a965ace41d068689dfaa35d Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 6 Mar 2014 14:49:40 -0500 Subject: [PATCH 153/772] Remve debug file from vault unit test --- test/units/TestVault.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/units/TestVault.py b/test/units/TestVault.py index f42188057f8..bcb494965cf 100644 --- a/test/units/TestVault.py +++ b/test/units/TestVault.py @@ -43,7 +43,6 @@ class TestVaultLib(TestCase): sensitive_data = "ansible" sensitive_hex = hexlify(sensitive_data) data = v._add_headers_and_hexify_encrypted_data(sensitive_data) - open("/tmp/awx.log", "a").write("data: %s\n" % data) lines = data.split('\n') assert len(lines) > 1, "failed to properly add header" header = lines[0] From ba9950a73a2352ab7976e1dc3f4a370f80b047c8 Mon Sep 17 00:00:00 2001 From: James Laska Date: Thu, 6 Mar 2014 15:52:15 -0500 Subject: [PATCH 154/772] Use correct value of ansible_distribution in integration tests This fixes integration tests when run on a RHEL system. Oddly, the ansible_distribution='RedHat' when run on RHEL. --- test/integration/roles/test_service/tasks/main.yml | 4 ++-- test/integration/roles/test_yum/tasks/main.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index a9da5d951a8..10a2a0d4837 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -11,7 +11,7 @@ - "install_result.mode == '0755'" - include: 'sysv_setup.yml' - when: ansible_distribution in ('RHEL', 'CentOS', 'ScientificLinux') + when: ansible_distribution in ['Redhat', 'CentOS', 'ScientificLinux'] - include: 'systemd_setup.yml' when: ansible_distribution == 'Fedora' - include: 'upstart_setup.yml' @@ -101,7 +101,7 @@ - "remove_result.state == 'absent'" - include: 'sysv_cleanup.yml' - when: ansible_distribution in ('RHEL', 'CentOS', 'ScientificLinux') + when: ansible_distribution in ['Redhat', 'CentOS', 'ScientificLinux'] - include: 'systemd_cleanup.yml' when: ansible_distribution == 'Fedora' - include: 'upstart_cleanup.yml' diff --git a/test/integration/roles/test_yum/tasks/main.yml b/test/integration/roles/test_yum/tasks/main.yml index 472dfff8e81..5df887ae9f9 100644 --- a/test/integration/roles/test_yum/tasks/main.yml +++ b/test/integration/roles/test_yum/tasks/main.yml @@ -17,5 +17,5 @@ # along with Ansible. If not, see . - include: 'yum.yml' - when: ansible_distribution in ('RHEL', 'CentOS', 'ScientificLinux', 'Fedora') + when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] From 868746b095b8767e77bb944661af0915735d022c Mon Sep 17 00:00:00 2001 From: James Laska Date: Thu, 6 Mar 2014 16:16:00 -0500 Subject: [PATCH 155/772] Correct ansible_distribution typo Redhat != RedHat --- test/integration/roles/test_service/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index 10a2a0d4837..749d164724e 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -11,7 +11,7 @@ - "install_result.mode == '0755'" - include: 'sysv_setup.yml' - when: ansible_distribution in ['Redhat', 'CentOS', 'ScientificLinux'] + when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] - include: 'systemd_setup.yml' when: ansible_distribution == 'Fedora' - include: 'upstart_setup.yml' @@ -101,7 +101,7 @@ - "remove_result.state == 'absent'" - include: 'sysv_cleanup.yml' - when: ansible_distribution in ['Redhat', 'CentOS', 'ScientificLinux'] + when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] - include: 'systemd_cleanup.yml' when: ansible_distribution == 'Fedora' - include: 'upstart_cleanup.yml' From 3cce3650e59b457efabeca60508a965394539057 Mon Sep 17 00:00:00 2001 From: Paul Durivage Date: Wed, 5 Mar 2014 14:38:47 -0600 Subject: [PATCH 156/772] Use isinstance() rather than a check using type() --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 7bbc9e372e1..1bf4673054e 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -243,7 +243,7 @@ class Runner(object): """ if complex_args is None: return module_args - if type(complex_args) != dict: + if not isinstance(complex_args, dict): raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args) for (k,v) in complex_args.iteritems(): if isinstance(v, basestring): From db67c3d77235b169c0bfa024a45743b4025c93bb Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 6 Mar 2014 19:10:56 -0500 Subject: [PATCH 157/772] Fixes a bug that prevents round-tripping of * and @ --- library/cloud/route53 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/library/cloud/route53 b/library/cloud/route53 index 2ff22ded9dc..f98f68f4bd9 100644 --- a/library/cloud/route53 +++ b/library/cloud/route53 @@ -220,11 +220,16 @@ def main(): found_record = False sets = conn.get_all_rrsets(zones[zone_in]) for rset in sets: - if rset.type == type_in and rset.name == record_in: + # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round + # tripping of things like * and @. + decoded_name = rset.name.replace(r'\052', '*') + decoded_name = rset.name.replace(r'\100', '@') + + if rset.type == type_in and decoded_name == record_in: found_record = True record['zone'] = zone_in record['type'] = rset.type - record['record'] = rset.name + record['record'] = decoded_name record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) From a45bfbe06eaaf8b0de02c0b62be332e6db286af8 Mon Sep 17 00:00:00 2001 From: aresch Date: Thu, 6 Mar 2014 16:12:11 -0800 Subject: [PATCH 158/772] Fix rpm_key on system with no gpg keys imported Without the -a option, rpm command will fail (exit code 1) and execute_command() will fail causing an initial key import to not work. [root@test ~]# rpm -q gpg-pubkey package gpg-pubkey is not installed [root@test ~]# echo $? 1 [root@test ~]# rpm -qa gpg-pubkey [root@test ~]# echo $? 0 --- library/packaging/rpm_key | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/rpm_key b/library/packaging/rpm_key index 82532477348..e9c3f34bd7f 100644 --- a/library/packaging/rpm_key +++ b/library/packaging/rpm_key @@ -161,7 +161,7 @@ class RpmKey: return stdout, stderr def is_key_imported(self, keyid): - stdout, stderr = self.execute_command([self.rpm, '-q', 'gpg-pubkey']) + stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey']) for line in stdout.splitlines(): line = line.strip() if not line: From 79799f681903141e815e4c44b035590124549f12 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 6 Mar 2014 21:42:01 -0500 Subject: [PATCH 159/772] Allow any file that can be in YAML to also be in JSON. This is primarily done to support non-visual editors better. --- CHANGELOG.md | 1 + lib/ansible/utils/__init__.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 35934bc541a..c36e6524ac0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Major features/changes: * The deprecated legacy variable templating system has been finally removed. Use {{ foo }} always not $foo or ${foo}. * Role dependencies are now tracked across multiple plays, making common roles easier to include in dependencies without any special variable tricks. +* Any data file can also be JSON. Use sparingly -- with great power comes great responsibility. Starting file with "{" or "[" denotes JSON. New Modules: diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index c3e777e4d65..1d09cfde990 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -42,6 +42,7 @@ import traceback import getpass import sys import textwrap +import json #import vault from vault import VaultLib @@ -351,7 +352,16 @@ def smush_ds(data): return data def parse_yaml(data): - ''' convert a yaml string to a data structure ''' + ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!''' + + data = data.lstrip() + if data.startswith("{") or data.startswith("["): + # since the line starts with { or [ we can infer this is a JSON document. + loaded = json.loads(data) + else: + # else this is pretty sure to be a YAML document + loaded = yaml.safe_load(data) + return smush_ds(yaml.safe_load(data)) def process_common_errors(msg, probline, column): From a4d01b0891ea87f9bb186d2a0ed7dcc760e88abc Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 6 Mar 2014 21:47:49 -0500 Subject: [PATCH 160/772] Also search .json filenames --- lib/ansible/constants.py | 4 ++-- lib/ansible/playbook/play.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 94070f641f2..6bf87e51f8a 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -93,8 +93,8 @@ else: DIST_MODULE_PATH = '/usr/share/ansible/' # check all of these extensions when looking for yaml files for things like -# group variables -YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml" ] +# group variables -- really anything we can load +YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ] # sections in config file DEFAULTS='defaults' diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index aaa79358825..41f246cbbb3 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -448,6 +448,7 @@ class Play(object): os.path.join(basepath, 'main'), os.path.join(basepath, 'main.yml'), os.path.join(basepath, 'main.yaml'), + os.path.join(basepath, 'main.json'), ) if sum([os.path.isfile(x) for x in mains]) > 1: raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) From 3f07ec3d73f59b9fdd77757612e93d8bed711f24 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 6 Mar 2014 22:07:35 -0500 Subject: [PATCH 161/772] Fixup JSON error reporting in previous commits. --- lib/ansible/utils/__init__.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 1d09cfde990..dd2989a94b5 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -351,13 +351,19 @@ def smush_ds(data): else: return data -def parse_yaml(data): +def parse_yaml(data, path_hint=None): ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!''' data = data.lstrip() if data.startswith("{") or data.startswith("["): # since the line starts with { or [ we can infer this is a JSON document. - loaded = json.loads(data) + try: + loaded = json.loads(data) + except ValueError, ve: + if path_hint: + raise errors.AnsibleError(path_hint + ": " + str(ve)) + else: + raise errors.AnsibleError(str(ve)) else: # else this is pretty sure to be a YAML document loaded = yaml.safe_load(data) @@ -522,7 +528,7 @@ def parse_yaml_from_file(path, vault_password=None): data = vault.decrypt(data) try: - return parse_yaml(data) + return parse_yaml(data, path_hint=path) except yaml.YAMLError, exc: process_yaml_error(exc, data, path) From e486dbab04d44dc0aac7ca2fefd769d2a3e95775 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 6 Mar 2014 22:15:21 -0500 Subject: [PATCH 162/772] Use the loaded variable since we have it assigned, not just for error detection. --- lib/ansible/utils/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index dd2989a94b5..405641eb163 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -355,6 +355,7 @@ def parse_yaml(data, path_hint=None): ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!''' data = data.lstrip() + loaded = None if data.startswith("{") or data.startswith("["): # since the line starts with { or [ we can infer this is a JSON document. try: @@ -368,7 +369,7 @@ def parse_yaml(data, path_hint=None): # else this is pretty sure to be a YAML document loaded = yaml.safe_load(data) - return smush_ds(yaml.safe_load(data)) + return smush_ds(loaded) def process_common_errors(msg, probline, column): replaced = probline.replace(" ","") From b6056044b741967dd2f688b5618e6276e5bbaee5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 6 Mar 2014 21:11:57 -0600 Subject: [PATCH 163/772] Partial revert of 73c883c due to issues with handlers in roles Fixes #6322 --- CHANGELOG.md | 1 - lib/ansible/playbook/__init__.py | 9 +-------- lib/ansible/playbook/play.py | 4 ++-- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c36e6524ac0..2703f769095 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,6 @@ Ansible Changes By Release Major features/changes: * The deprecated legacy variable templating system has been finally removed. Use {{ foo }} always not $foo or ${foo}. -* Role dependencies are now tracked across multiple plays, making common roles easier to include in dependencies without any special variable tricks. * Any data file can also be JSON. Use sparingly -- with great power comes great responsibility. Starting file with "{" or "[" denotes JSON. New Modules: diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 3fd84239fb7..65965526251 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -240,20 +240,13 @@ class PlayBook(object): plays = [] matched_tags_all = set() unmatched_tags_all = set() - included_roles = [] # loop through all patterns and run them self.callbacks.on_start() for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs): - play = Play(self, play_ds, play_basedir, included_roles=included_roles, vault_password=self.vault_password) + play = Play(self, play_ds, play_basedir, vault_password=self.vault_password) assert play is not None - # add any new roles brought in by this play to the - # global list of roles we're tracking - for role in play.included_roles: - if role not in included_roles: - included_roles.append(role) - matched_tags, unmatched_tags = play.compare_tags(self.only_tags) matched_tags_all = matched_tags_all | matched_tags unmatched_tags_all = unmatched_tags_all | unmatched_tags diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 41f246cbbb3..08564ba57e2 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -49,7 +49,7 @@ class Play(object): # ************************************************* - def __init__(self, playbook, ds, basedir, included_roles=[], vault_password=None): + def __init__(self, playbook, ds, basedir, vault_password=None): ''' constructor loads from a play datastructure ''' for x in ds.keys(): @@ -81,7 +81,7 @@ class Play(object): self._update_vars_files_for_host(None) # now we load the roles into the datastructure - self.included_roles = included_roles + self.included_roles = [] ds = self._load_roles(self.roles, ds) # and finally re-process the vars files as they may have From f80d02d84114c7565d3c906421bbcfe7c0222f05 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 7 Mar 2014 00:07:10 -0600 Subject: [PATCH 164/772] Properly wrap logical elements together for su/sudo detection --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 1f2ca7f1419..ec4878e015a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -415,7 +415,7 @@ class Runner(object): environment_string = self._compute_environment_string(inject) - if tmp.find("tmp") != -1 and (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): + if tmp.find("tmp") != -1 and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): # deal with possible umask issues once sudo'ed to other user cmd_chmod = "chmod a+r %s" % remote_module_path self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) From 2cf3a7f7be24ea5624b4fe3f986c35afb8ab4c0c Mon Sep 17 00:00:00 2001 From: karelstriegel Date: Fri, 7 Mar 2014 10:34:44 +0100 Subject: [PATCH 165/772] Update intro_adhoc.rst Small grammar fix --- docsite/rst/intro_adhoc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index a49fdcfdc40..f849a1021c0 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -248,7 +248,7 @@ Be sure to use a high enough ``--forks`` value if you want to get all of your jo very quickly. After the time limit (in seconds) runs out (``-B``), the process on the remote nodes will be terminated. -Typically you'll be only be backgrounding long-running +Typically you'll only be backgrounding long-running shell commands or software upgrades only. Backgrounding the copy module does not do a background file transfer. :doc:`Playbooks ` also support polling, and have a simplified syntax for this. .. _checking_facts: From e846b4c6199912493f6a570b9cc99d257269065c Mon Sep 17 00:00:00 2001 From: George Angelopoulos Date: Fri, 7 Mar 2014 14:36:46 +0200 Subject: [PATCH 166/772] english syntax fix remove redundant 'are' --- docsite/rst/playbooks_acceleration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_acceleration.rst b/docsite/rst/playbooks_acceleration.rst index c11961ca9d6..6b25f6cced1 100644 --- a/docsite/rst/playbooks_acceleration.rst +++ b/docsite/rst/playbooks_acceleration.rst @@ -8,7 +8,7 @@ You Might Not Need This! Are you running Ansible 1.5 or later? If so, you may not need accelerate mode due to a new feature called "SSH pipelining" and should read the :ref:`pipelining` section of the documentation. -For users on 1.5 and later, accelerate mode only makes sense if you are (A) are managing from an Enterprise Linux 6 or earlier host +For users on 1.5 and later, accelerate mode only makes sense if you (A) are managing from an Enterprise Linux 6 or earlier host and still are on paramiko, or (B) can't enable TTYs with sudo as described in the pipelining docs. If you can use pipelining, Ansible will reduce the amount of files transferred over the wire, From c7d1c1a30617280fdeb99310c98dab1bdec00b53 Mon Sep 17 00:00:00 2001 From: James Laska Date: Fri, 7 Mar 2014 10:41:01 -0500 Subject: [PATCH 167/772] [test_service] correct upstart service name and permissions The 'service' utility was unable to find the 'ansible_test' service due to an unexpected filename. This patch corrects the filename and adjusts the permissions to match other service scripts within /etc/init/. --- .../roles/test_service/tasks/upstart_cleanup.yml | 4 ++-- test/integration/roles/test_service/tasks/upstart_setup.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/roles/test_service/tasks/upstart_cleanup.yml b/test/integration/roles/test_service/tasks/upstart_cleanup.yml index 3c4e4e50477..c99446bf652 100644 --- a/test/integration/roles/test_service/tasks/upstart_cleanup.yml +++ b/test/integration/roles/test_service/tasks/upstart_cleanup.yml @@ -1,10 +1,10 @@ - name: remove the upstart init file - file: path=/etc/init/ansible_test state=absent + file: path=/etc/init/ansible_test.conf state=absent register: remove_upstart_result - name: assert that the upstart init file was removed assert: that: - - "remove_upstart_result.path == '/etc/init/ansible_test'" + - "remove_upstart_result.path == '/etc/init/ansible_test.conf'" - "remove_upstart_result.state == 'absent'" diff --git a/test/integration/roles/test_service/tasks/upstart_setup.yml b/test/integration/roles/test_service/tasks/upstart_setup.yml index 70fbee26d05..e889ef2789d 100644 --- a/test/integration/roles/test_service/tasks/upstart_setup.yml +++ b/test/integration/roles/test_service/tasks/upstart_setup.yml @@ -1,12 +1,12 @@ - name: install the upstart init file - copy: src=ansible.upstart dest=/etc/init/ansible_test mode=0755 + copy: src=ansible.upstart dest=/etc/init/ansible_test.conf mode=0644 register: install_upstart_result - name: assert that the upstart init file was installed assert: that: - - "install_upstart_result.dest == '/etc/init/ansible_test'" + - "install_upstart_result.dest == '/etc/init/ansible_test.conf'" - "install_upstart_result.state == 'file'" - - "install_upstart_result.mode == '0755'" + - "install_upstart_result.mode == '0644'" - "install_upstart_result.md5sum == 'ab3900ea4de8423add764c12aeb90c01'" From cbd1da645756e6d6fbd255bde37d9be452c547fe Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 16 Feb 2014 11:20:56 -0800 Subject: [PATCH 168/772] add yum module downgrade support --- library/packaging/yum | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/library/packaging/yum b/library/packaging/yum index 61bb836b43a..8a1a59458be 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -28,6 +28,7 @@ import yum try: from yum.misc import find_unfinished_transactions, find_ts_remaining from rpmUtils.miscutils import splitFilename + from rpmUtils.miscutils import compareEVR transaction_helpers = True except: transaction_helpers = False @@ -38,7 +39,7 @@ module: yum version_added: historical short_description: Manages packages with the I(yum) package manager description: - - Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager. + - Installs, upgrades, downgrades, removes and lists packages and groups with the I(yum) package manager. options: name: description: @@ -461,6 +462,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['msg'] = '' res['rc'] = 0 res['changed'] = False + downgrade = False for spec in items: pkg = None @@ -535,12 +537,30 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): if found: continue + + # downgrade - the yum install command will only install or upgrade to a spec version, it will + # not install an older version of an RPM even if specifed by the install spec. So we need to + # determine if this is a downgrade, and then use the yum downgrade command to install the RPM. + pkg_name = splitFilename(spec)[0] + pkgs = is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) + if pkgs: + (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(pkgs[0]) + (new_name, new_ver, new_rel, new_epoch, new_arch) = splitFilename(spec) + + compare = compareEVR((cur_epoch, cur_ver, cur_rel), (new_epoch, new_ver, new_rel)) + if compare > 0: + downgrade = True + # if not - then pass in the spec as what to install # we could get here if nothing provides it but that's not # the error we're catching here pkg = spec - cmd = yum_basecmd + ['install', pkg] + operation = 'install' + if downgrade: + operation = 'downgrade' + + cmd = yum_basecmd + [operation, pkg] if module.check_mode: module.exit_json(changed=True) From f47fe7af62bc4e24974c752d9fa1310eb6b86799 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 16 Feb 2014 11:23:00 -0800 Subject: [PATCH 169/772] add sponsor copyright/attribution --- library/packaging/yum | 1 + 1 file changed, 1 insertion(+) diff --git a/library/packaging/yum b/library/packaging/yum index 8a1a59458be..d40e9226f0e 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -1,6 +1,7 @@ #!/usr/bin/python -tt # -*- coding: utf-8 -*- +# (c) 2014, Epic Games, Inc. # (c) 2012, Red Hat, Inc # Written by Seth Vidal # From 29f1528492d82a6b6650b3142e3cc9b69a0e828a Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sun, 16 Feb 2014 15:06:46 -0800 Subject: [PATCH 170/772] change order of copyright --- library/packaging/yum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/yum b/library/packaging/yum index d40e9226f0e..09ce5b6dd60 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -1,9 +1,9 @@ #!/usr/bin/python -tt # -*- coding: utf-8 -*- -# (c) 2014, Epic Games, Inc. # (c) 2012, Red Hat, Inc # Written by Seth Vidal +# (c) 2014, Epic Games, Inc. # # This file is part of Ansible # From 9244283122bd3167a36755dee5c3cc399ee1ac2f Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Fri, 7 Mar 2014 13:53:31 -0600 Subject: [PATCH 171/772] Spelling and formatting. --- library/packaging/yum | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/library/packaging/yum b/library/packaging/yum index 09ce5b6dd60..15f086f6b81 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -110,7 +110,7 @@ EXAMPLES = ''' - name: remove the Apache package yum: name=httpd state=removed -- name: install the latest version of Apche from the testing repo +- name: install the latest version of Apache from the testing repo yum: name=httpd enablerepo=testing state=installed - name: upgrade all packages @@ -463,6 +463,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['msg'] = '' res['rc'] = 0 res['changed'] = False + downgrade = False for spec in items: @@ -540,7 +541,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): continue # downgrade - the yum install command will only install or upgrade to a spec version, it will - # not install an older version of an RPM even if specifed by the install spec. So we need to + # not install an older version of an RPM even if specified by the install spec. So we need to # determine if this is a downgrade, and then use the yum downgrade command to install the RPM. pkg_name = splitFilename(spec)[0] pkgs = is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) From 3cd7d47b7ec2a8a560c6da803768eb3d9983c252 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 7 Mar 2014 14:36:54 -0600 Subject: [PATCH 172/772] Correctly catch template errors without returning invalid data Fixes #6329 --- lib/ansible/utils/template.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index fc4ff9fd204..e58543c411f 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -316,6 +316,8 @@ def template_from_string(basedir, data, vars, fail_on_undefined=False): except Exception, e: if 'recursion' in str(e): raise errors.AnsibleError("recursive loop detected in template string: %s" % data) + elif isinstance(e, TemplateSyntaxError): + raise errors.AnsibleError("there was an error in the template: %s" % data) else: return data From 47b2dc611ce649b4087f751de253ab3fd8d9404b Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Fri, 7 Mar 2014 15:01:39 -0600 Subject: [PATCH 173/772] Look at the filename and if it has a version defined possibly process this as a downgrade. --- library/packaging/yum | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/library/packaging/yum b/library/packaging/yum index 15f086f6b81..1a77cf0f0b0 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -543,15 +543,18 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # downgrade - the yum install command will only install or upgrade to a spec version, it will # not install an older version of an RPM even if specified by the install spec. So we need to # determine if this is a downgrade, and then use the yum downgrade command to install the RPM. - pkg_name = splitFilename(spec)[0] - pkgs = is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) - if pkgs: - (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(pkgs[0]) - (new_name, new_ver, new_rel, new_epoch, new_arch) = splitFilename(spec) - - compare = compareEVR((cur_epoch, cur_ver, cur_rel), (new_epoch, new_ver, new_rel)) - if compare > 0: - downgrade = True + split_pkg_name = splitFilename(spec) + # if the Name and Version match a version was not provided and this is not a downgrade. + if split_pkg_name[0] == split_pkg_name[1]: + pkg_name = split_pkg_name[0] + pkgs = is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) + if pkgs: + (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(pkgs[0]) + (new_name, new_ver, new_rel, new_epoch, new_arch) = splitFilename(spec) + + compare = compareEVR((cur_epoch, cur_ver, cur_rel), (new_epoch, new_ver, new_rel)) + if compare > 0: + downgrade = True # if not - then pass in the spec as what to install # we could get here if nothing provides it but that's not From d6247a35832d14e2c6d52ba9e6e3160b5b20b47b Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Fri, 7 Mar 2014 15:22:59 -0600 Subject: [PATCH 174/772] Yum: Fix logic in yum. --- library/packaging/yum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/yum b/library/packaging/yum index 1a77cf0f0b0..eb8472ee0a9 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -545,7 +545,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # determine if this is a downgrade, and then use the yum downgrade command to install the RPM. split_pkg_name = splitFilename(spec) # if the Name and Version match a version was not provided and this is not a downgrade. - if split_pkg_name[0] == split_pkg_name[1]: + if split_pkg_name[0] != split_pkg_name[1]: pkg_name = split_pkg_name[0] pkgs = is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) if pkgs: From e3b887f770db983393cd920ce8699a9aeb6630e2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 7 Mar 2014 16:34:04 -0600 Subject: [PATCH 175/772] Fix range issue in inventory and add additional error checking Fixes #6331 --- lib/ansible/inventory/__init__.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 8f74d5ea9e9..171a4f2a04c 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -208,12 +208,14 @@ class Inventory(object): """ # The regex used to match on the range, which can be [x] or [x-y]. - pattern_re = re.compile("^(.*)\[([0-9]+)(?:(?:-)([0-9]+))?\](.*)$") + pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$") m = pattern_re.match(pattern) if m: (target, first, last, rest) = m.groups() first = int(first) if last: + if first < 0: + raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range") last = int(last) else: last = first @@ -245,10 +247,13 @@ class Inventory(object): right = 0 left=int(left) right=int(right) - if left != right: - return hosts[left:right] - else: - return [ hosts[left] ] + try: + if left != right: + return hosts[left:right] + else: + return [ hosts[left] ] + except IndexError: + raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat) def _create_implicit_localhost(self, pattern): new_host = Host(pattern) From a483e42583998f091a77a99908b65f3bf9dc9545 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Fri, 7 Mar 2014 18:19:46 -0600 Subject: [PATCH 176/772] unarchive module: Add creates functionality. Closes GH-5354. Forklift copy logic into unarchive. Provides integration unit tests. --- library/files/unarchive | 24 ++++++++++++++++- .../roles/test_unarchive/tasks/main.yml | 27 +++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/library/files/unarchive b/library/files/unarchive index 661f3899690..ab04e57475c 100644 --- a/library/files/unarchive +++ b/library/files/unarchive @@ -43,7 +43,13 @@ options: required: false choices: [ "yes", "no" ] default: "yes" -author: Dylan Martin + creates: + description: + - a filename, when it already exists, this step will B(not) be run. + required: no + default: null + version_added: "1.6" +author: Dylan Martin todo: - detect changed/unchanged for .zip files - handle common unarchive args, like preserve owner/timestamp etc... @@ -168,6 +174,7 @@ def main(): original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack dest = dict(required=True), copy = dict(default=True, type='bool'), + creates = dict(required=False), ), add_file_common_args=True, ) @@ -175,6 +182,7 @@ def main(): src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) copy = module.params['copy'] + creates = module.params['creates'] # did tar file arrive? if not os.path.exists(src): @@ -185,6 +193,20 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source '%s' not readable" % src) + if creates: + # do not run the command if the line contains creates=filename + # and the filename already exists. This allows idempotence + # of command executions. + v = os.path.expanduser(creates) + if os.path.exists(v): + module.exit_json( + stdout="skipped, since %s exists" % v, + skipped=True, + changed=False, + stderr=False, + rc=0 + ) + # is dest OK to receive tar file? if not os.path.exists(os.path.dirname(dest)): module.fail_json(msg="Destination directory '%s' does not exist" % (os.path.dirname(dest))) diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index 817096617bf..56b31e6b2d0 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -64,6 +64,33 @@ - name: remove our tar.gz unarchive destination file: path={{output_dir}}/test-unarchive-tar-gz state=absent +- name: create our tar.gz unarchive destination for creates + file: path={{output_dir}}/test-unarchive-tar-gz state=directory + +- name: unarchive a tar.gz file with creates set + unarchive: src={{output_dir}}/test-unarchive.tar.gz dest={{output_dir | expanduser}}/test-unarchive-tar-gz copy=no creates={{output_dir}}/test-unarchive-tar-gz/foo-unarchive.txt + register: unarchive02b + +- name: verify that the file was marked as changed + assert: + that: + - "unarchive02b.changed == true" + +- name: verify that the file was unarchived + file: path={{output_dir}}/test-unarchive-tar-gz/foo-unarchive.txt state=file + +- name: unarchive a tar.gz file with creates over an existing file + unarchive: src={{output_dir}}/test-unarchive.tar.gz dest={{output_dir | expanduser}}/test-unarchive-tar-gz copy=no creates={{output_dir}}/test-unarchive-tar-gz/foo-unarchive.txt + register: unarchive02c + +- name: verify that the file was not marked as changed + assert: + that: + - "unarchive02c.changed == false" + +- name: remove our tar.gz unarchive destination + file: path={{output_dir}}/test-unarchive-tar-gz state=absent + - name: create our zip unarchive destination file: path={{output_dir}}/test-unarchive-zip state=directory From 44ebd3f4f3569a029355460407c2cb70c8d21fec Mon Sep 17 00:00:00 2001 From: Charles Duffy Date: Fri, 7 Mar 2014 18:52:47 -0600 Subject: [PATCH 177/772] systemd template services must not discard template part --- library/system/service | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/system/service b/library/system/service index 2e26a47b636..fd3bc8d2434 100644 --- a/library/system/service +++ b/library/system/service @@ -410,11 +410,13 @@ class LinuxService(Service): # adjust the service name to account for template service unit files index = name.find('@') if index != -1: - name = name[:index+1] + template_name = name[:index+1] + else: + template_name = name self.__systemd_unit = None for line in out.splitlines(): - if line.startswith(name): + if line.startswith(template_name): self.__systemd_unit = name return True return False From 7686204b4368f77c0675d6fc66345fd6b79186ad Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 15 Jan 2014 21:24:55 -0600 Subject: [PATCH 178/772] Refactored a few things to be more inline with the Ansible way. Passing testing. --- library/system/at | 166 ++++++++++++++++++++++------------------------ 1 file changed, 79 insertions(+), 87 deletions(-) diff --git a/library/system/at b/library/system/at index ffac9d1d535..274aa1dd77d 100644 --- a/library/system/at +++ b/library/system/at @@ -25,13 +25,8 @@ short_description: Schedule the execution of a command or scripts via the at com description: - Use this module to schedule a command or script to run once in the future. - All jobs are executed in the a queue. -version_added: "0.0" +version_added: "1.5" options: - user: - description: - - The user to execute the at command as. - required: false - default: null command: description: - A command to be executed in the future. @@ -42,22 +37,26 @@ options: - An existing script to be executed in the future. required: false default: null - unit_count: + count: description: - The count of units in the future to execute the command or script. required: true - unit_type: + units: description: - The type of units in the future to execute the command or script. required: true choices: ["minutes", "hours", "days", "weeks"] - action: + state: description: - - The action to take for the job defaulting to add. Unique will verify that there is only one entry in the queue. - - Delete will remove all existing queued jobs. - required: true - choices: ["add", "delete", "unique"] - default: add + - The state dictates if the command or script_file should be evaluated as present(added) or absent(deleted). + required: false + choices: ["present", "absent"] + default: "present" + unique: + description: + - If a matching job is present a new job will not be added. + required: false + default: false requirements: - at author: Richard Isaacson @@ -65,33 +64,41 @@ author: Richard Isaacson EXAMPLES = ''' # Schedule a command to execute in 20 minutes as root. -- at: command="ls -d / > /dev/null" unit_count=20 unit_type="minutes" - -# Schedule a script to execute in 1 hour as the neo user. -- at: script_file="/some/script.sh" user="neo" unit_count=1 unit_type="hours" +- at: command="ls -d / > /dev/null" count=20 units="minutes" # Match a command to an existing job and delete the job. -- at: command="ls -d / > /dev/null" action="delete" +- at: command="ls -d / > /dev/null" state="absent" # Schedule a command to execute in 20 minutes making sure it is unique in the queue. -- at: command="ls -d / > /dev/null" action="unique" unit_count=20 unit_type="minutes" +- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes" ''' import os import tempfile +def add_job(module, result, at_cmd, count, units, command, script_file): + at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file) + rc, out, err = module.run_command(at_command, check_rc=True) + if command: + os.unlink(script_file) + result['changed'] = True + +def delete_job(module, result, at_cmd, command, script_file): + for matching_job in matching_jobs(module, at_cmd, script_file): + at_command = "%s -d %s" % (at_cmd, matching_job) + rc, out, err = module.run_command(at_command, check_rc=True) + result['changed'] = True + if command: + os.unlink(script_file) + module.exit_json(**result) -def matching_jobs(module, at_cmd, script_file, user=None): +def matching_jobs(module, at_cmd, script_file): matching_jobs = [] atq_cmd = module.get_bin_path('atq', True) # Get list of job numbers for the user. atq_command = "%s" % (atq_cmd) - if user: - atq_command = "su '%s' -c '%s'" % (user, atq_command) - rc, out, err = module.run_command(atq_command) - if rc != 0: - module.fail_json(msg=err) + rc, out, err = module.run_command(atq_command, check_rc=True) current_jobs = out.splitlines() if len(current_jobs) == 0: return matching_jobs @@ -104,100 +111,85 @@ def matching_jobs(module, at_cmd, script_file, user=None): for current_job in current_jobs: split_current_job = current_job.split() at_command = "%s -c %s" % (at_cmd, split_current_job[0]) - if user: - at_command = "su '%s' -c '%s'" % (user, at_command) - rc, out, err = module.run_command(at_command) - if rc != 0: - module.fail_json(msg=err) + rc, out, err = module.run_command(at_command, check_rc=True) if script_file_string in out: matching_jobs.append(split_current_job[0]) # Return the list. return matching_jobs +def create_tempfile(command): + filed, script_file = tempfile.mkstemp(prefix='at') + fileh = os.fdopen(filed, 'w') + fileh.write(command) + fileh.close() + return script_file + #================================================ def main(): module = AnsibleModule( argument_spec = dict( - user=dict(required=False), - command=dict(required=False), - script_file=dict(required=False), - unit_count=dict(required=False, + command=dict(required=False, + type='str'), + script_file=dict(required=False, + type='str'), + count=dict(required=False, type='int'), - unit_type=dict(required=False, + units=dict(required=False, default=None, - choices=["minutes", "hours", "days", "weeks"], - type="str"), - action=dict(required=False, - default="add", - choices=["add", "delete", "unique"], - type="str") + choices=['minutes', 'hours', 'days', 'weeks'], + type='str'), + state=dict(required=False, + default='present', + choices=['present', 'absent'], + type='str'), + unique=dict(required=False, + default=False, + type='bool') ), - supports_check_mode = False, + mutually_exclusive = [['command', 'script_file']], + required_one_of = [['command', 'script_file']], + supports_check_mode = False ) at_cmd = module.get_bin_path('at', True) - user = module.params['user'] command = module.params['command'] script_file = module.params['script_file'] - unit_count = module.params['unit_count'] - unit_type = module.params['unit_type'] - action = module.params['action'] - - if ((action == 'add') and (not unit_count or not unit_type)): - module.fail_json(msg="add action requires unit_count and unit_type") - - if (not command) and (not script_file): - module.fail_json(msg="command or script_file not specified") + count = module.params['count'] + units = module.params['units'] + state = module.params['state'] + unique = module.params['unique'] - if command and script_file: - module.fail_json(msg="command and script_file are mutually exclusive") + if ((state == 'present') and (not count or not units)): + module.fail_json(msg="present state requires count and units") result = {} - result['action'] = action + result['state'] = state result['changed'] = False # If command transform it into a script_file if command: - filed, script_file = tempfile.mkstemp(prefix='at') - fileh = os.fdopen(filed, 'w') - fileh.write(command) - fileh.close() - - # if delete then return - if action == 'delete': - for matching_job in matching_jobs(module, at_cmd, script_file, user): - at_command = "%s -d %s" % (at_cmd, matching_job) - if user: - at_command = "su '%s' -c '%s'" % (user, at_ccommand) - rc, out, err = module.run_command(at_command) - if rc != 0: - module.fail_json(msg=err) - result['changed'] = True - module.exit_json(**result) + script_file = create_tempfile(command) + + # if absent remove existing and return + if state == 'absent': + delete_job(module, result, at_cmd, command, script_file) # if unique if existing return unchanged - if action == 'unique': - if len(matching_jobs(module, at_cmd, script_file, user)) != 0: + if unique: + if len(matching_jobs(module, at_cmd, script_file)) != 0: + if command: + os.unlink(script_file) module.exit_json(**result) result['script_file'] = script_file - result['unit_count'] = unit_count - result['unit_type'] = unit_type - - at_command = "%s now + %s %s -f %s" % (at_cmd, unit_count, unit_type, script_file) - if user: - # We expect that if this is an installed the permissions are already correct for the user to execute it. - at_command = "su '%s' -c '%s'" % (user, at_command) - rc, out, err = module.run_command(at_command) - if rc != 0: - module.fail_json(msg=err) - if command: - os.unlink(script_file) - result['changed'] = True + result['count'] = count + result['units'] = units + + add_job(module, result, at_cmd, count, units, command, script_file) module.exit_json(**result) From f3c1177265804092ced1213f210410d00ea3f517 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 15 Jan 2014 21:28:13 -0600 Subject: [PATCH 179/772] Documentation cleanup. --- library/system/at | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/library/system/at b/library/system/at index 274aa1dd77d..917aa062f87 100644 --- a/library/system/at +++ b/library/system/at @@ -21,10 +21,10 @@ DOCUMENTATION = ''' --- module: at -short_description: Schedule the execution of a command or scripts via the at command. +short_description: Schedule the execution of a command or script file via the at command. description: - - Use this module to schedule a command or script to run once in the future. - - All jobs are executed in the a queue. + - Use this module to schedule a command or script file to run once in the future. + - All jobs are executed in the 'a' queue. version_added: "1.5" options: command: @@ -34,21 +34,21 @@ options: default: null script_file: description: - - An existing script to be executed in the future. + - An existing script file to be executed in the future. required: false default: null count: description: - - The count of units in the future to execute the command or script. + - The count of units in the future to execute the command or script file. required: true units: description: - - The type of units in the future to execute the command or script. + - The type of units in the future to execute the command or script file. required: true choices: ["minutes", "hours", "days", "weeks"] state: description: - - The state dictates if the command or script_file should be evaluated as present(added) or absent(deleted). + - The state dictates if the command or script file should be evaluated as present(added) or absent(deleted). required: false choices: ["present", "absent"] default: "present" From aba86e36576a8f49fada328e899029a3b75de80e Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Sun, 19 Jan 2014 20:51:39 -0600 Subject: [PATCH 180/772] Cleaning up pep8 alerts. --- library/system/at | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/library/system/at b/library/system/at index 917aa062f87..36131286388 100644 --- a/library/system/at +++ b/library/system/at @@ -132,19 +132,19 @@ def main(): module = AnsibleModule( argument_spec = dict( command=dict(required=False, - type='str'), + type='str'), script_file=dict(required=False, - type='str'), + type='str'), count=dict(required=False, - type='int'), + type='int'), units=dict(required=False, - default=None, - choices=['minutes', 'hours', 'days', 'weeks'], - type='str'), + default=None, + choices=['minutes', 'hours', 'days', 'weeks'], + type='str'), state=dict(required=False, - default='present', - choices=['present', 'absent'], - type='str'), + default='present', + choices=['present', 'absent'], + type='str'), unique=dict(required=False, default=False, type='bool') From 8808e029f9cafc4857a6395ae0f669ab54a24645 Mon Sep 17 00:00:00 2001 From: Dolph Mathews Date: Sat, 8 Mar 2014 04:49:55 -0600 Subject: [PATCH 181/772] spelling correction for "separate" s/seperate/separate/ http://en.wiktionary.org/wiki/separate --- docsite/rst/guide_aws.rst | 2 +- examples/ansible.cfg | 4 ++-- library/files/assemble | 2 +- plugins/inventory/ec2.ini | 2 +- test/README.md | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index dbe5427bc52..39f2440f195 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -129,7 +129,7 @@ it will be automatically discoverable via a dynamic group like so:: - ping Using this philosophy can be a great way to manage groups dynamically, without -having to maintain seperate inventory. +having to maintain separate inventory. .. _aws_pull: diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 2edbe361b0b..5b23e101269 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -23,7 +23,7 @@ sudo_user = root transport = smart remote_port = 22 -# additional paths to search for roles in, colon seperated +# additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles # uncomment this to disable SSH key host checking @@ -82,7 +82,7 @@ ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} # to revert the behavior to pre-1.3. #error_on_undefined_vars = False -# set plugin path directories here, seperate with colons +# set plugin path directories here, separate with colons action_plugins = /usr/share/ansible_plugins/action_plugins callback_plugins = /usr/share/ansible_plugins/callback_plugins connection_plugins = /usr/share/ansible_plugins/connection_plugins diff --git a/library/files/assemble b/library/files/assemble index f4a60caf230..90c1a90aeb3 100644 --- a/library/files/assemble +++ b/library/files/assemble @@ -59,7 +59,7 @@ options: default: "no" delimiter: description: - - A delimiter to seperate the file contents. + - A delimiter to separate the file contents. version_added: "1.4" required: false default: null diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index 9d05dfad031..b931c4a7da9 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -39,7 +39,7 @@ vpc_destination_variable = ip_address route53 = False # Additionally, you can specify the list of zones to exclude looking up in -# 'route53_excluded_zones' as a comma-seperated list. +# 'route53_excluded_zones' as a comma-separated list. # route53_excluded_zones = samplezone1.com, samplezone2.com # API calls to EC2 are slow. For this reason, we cache the results of an API diff --git a/test/README.md b/test/README.md index e5339acc625..526b448e087 100644 --- a/test/README.md +++ b/test/README.md @@ -16,7 +16,7 @@ integration Integration test layer, constructed using playbooks. -Some tests may require cloud credentials, others will not, and destructive tests are seperated from non-destructive so a subset +Some tests may require cloud credentials, others will not, and destructive tests are separated from non-destructive so a subset can be run on development machines. learn more From 3f681ebc5257ac15b4134c81831febb182c4e720 Mon Sep 17 00:00:00 2001 From: Markus Amalthea Magnuson Date: Sat, 8 Mar 2014 12:40:45 +0100 Subject: [PATCH 182/772] Touch up documentation strings for DigitalOcean. --- library/cloud/digital_ocean | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index a6721a55da1..6cac2c82589 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -20,7 +20,7 @@ DOCUMENTATION = ''' module: digital_ocean short_description: Create/delete a droplet/SSH_key in DigitalOcean description: - - Create/delete a droplet in DigitalOcean and optionally waits for it to be 'running', or deploy an SSH key. + - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. version_added: "1.3" options: command: @@ -35,10 +35,10 @@ options: choices: ['present', 'active', 'absent', 'deleted'] client_id: description: - - Digital Ocean manager id. + - DigitalOcean manager id. api_key: description: - - Digital Ocean api key. + - DigitalOcean api key. id: description: - Numeric, the droplet id you want to operate on. @@ -47,31 +47,31 @@ options: - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. unique_name: description: - - Bool, require unique hostnames. By default, digital ocean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. + - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. version_added: "1.4" default: "no" choices: [ "yes", "no" ] size_id: description: - - Numeric, this is the id of the size you would like the droplet created at. + - Numeric, this is the id of the size you would like the droplet created with. image_id: description: - Numeric, this is the id of the image you would like the droplet created with. region_id: description: - - "Numeric, this is the id of the region you would like your server" + - "Numeric, this is the id of the region you would like your server to be created in." ssh_key_ids: description: - - Optional, comma separated list of ssh_key_ids that you would like to be added to the server + - Optional, comma separated list of ssh_key_ids that you would like to be added to the server. virtio: description: - - "Bool, turn on virtio driver in droplet for improved network and storage I/O" + - "Bool, turn on virtio driver in droplet for improved network and storage I/O." version_added: "1.4" default: "yes" choices: [ "yes", "no" ] private_networking: description: - - "Bool, add an additional, private network interface to droplet for inter-droplet communication" + - "Bool, add an additional, private network interface to droplet for inter-droplet communication." version_added: "1.4" default: "no" choices: [ "yes", "no" ] From 00ed2b811fd15461fef538fb3ad67d645e48dd66 Mon Sep 17 00:00:00 2001 From: David Wheaton Date: Sat, 8 Mar 2014 13:53:16 -0800 Subject: [PATCH 183/772] Update playbooks_variables.rst incorrect/confusing wording in Jinja2 note? --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index bdb31577ed1..0ab668135cf 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -101,7 +101,7 @@ Inside a template you automatically have access to all of the variables that are it's more than that -- you can also read variables about other hosts. We'll show how to do that in a bit. .. note:: ansible allows Jinja2 loops and conditionals in templates, but in playbooks, we do not use them. Ansible - templates are pure machine-parseable YAML. This is a rather important feature as it means it is possible to code-generate + playbooks are pure machine-parseable YAML. This is a rather important feature as it means it is possible to code-generate pieces of files, or to have other ecosystem tools read Ansible files. Not everyone will need this but it can unlock possibilities. From 04a6dc6d12802a3d5de28a00773c5e4be794a1d2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 8 Mar 2014 14:02:42 -0600 Subject: [PATCH 184/772] Check for skipped tags in passed role variables before adding to them Fixes #6330 --- lib/ansible/playbook/play.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 08564ba57e2..198f15d061b 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -227,6 +227,13 @@ class Play(object): if meta_data: allow_dupes = utils.boolean(meta_data.get('allow_duplicates','')) + if "tags" in passed_vars: + if not self._is_valid_tag(passed_vars["tags"]): + # one of the tags specified for this role was in the + # skip list, or we're limiting the tags and it didn't + # match one, so we just skip it completely + continue + # if any tags were specified as role/dep variables, merge # them into the passed_vars so they're passed on to any # further dependencies too, and so we only have one place @@ -268,13 +275,6 @@ class Play(object): if 'role' in dep_vars: del dep_vars['role'] - if "tags" in passed_vars: - if not self._is_valid_tag(passed_vars["tags"]): - # one of the tags specified for this role was in the - # skip list, or we're limiting the tags and it didn't - # match one, so we just skip it completely - continue - if not allow_dupes: if dep in self.included_roles: # skip back to the top, since we don't want to From 54b3262e7158ee0f4857e433e1ad7d399e5d1e23 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sat, 8 Mar 2014 17:14:10 -0800 Subject: [PATCH 185/772] Remove mention to AWS in Linode's inventory script. --- plugins/inventory/linode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index b4bcb1fad61..0cc825aa847 100755 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -5,7 +5,7 @@ Linode external inventory script ================================= Generates inventory that Ansible can understand by making API request to -AWS Linode using the Chube library. +Linode using the Chube library. NOTE: This script assumes Ansible is being executed where Chube is already installed and has a valid config at ~/.chube. If not, run: From 1d5db300676dff9f5457050fa9e413e87463e90c Mon Sep 17 00:00:00 2001 From: Seth Woodworth Date: Sun, 9 Mar 2014 00:19:28 -0500 Subject: [PATCH 186/772] Updates loop warning, removes unnecessary c --- lib/ansible/playbook/task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 99e99d4ba18..11b356f99ab 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -85,7 +85,7 @@ class Task(object): elif x.startswith("with_"): if isinstance(ds[x], basestring) and ds[x].lstrip().startswith("{{"): - utils.warning("It is unneccessary to use '{{' in loops, leave variables in loop expressions bare.") + utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") plugin_name = x.replace("with_","") if plugin_name in utils.plugins.lookup_loader: @@ -97,7 +97,7 @@ class Task(object): elif x in [ 'changed_when', 'failed_when', 'when']: if isinstance(ds[x], basestring) and ds[x].lstrip().startswith("{{"): - utils.warning("It is unneccessary to use '{{' in conditionals, leave variables in loop expressions bare.") + utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") elif x.startswith("when_"): utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) From 748e8a77409d57750c42a2ffa08aa9e9e92de60c Mon Sep 17 00:00:00 2001 From: Hagai Date: Sun, 9 Mar 2014 12:48:44 +0200 Subject: [PATCH 187/772] Add a package to missing list if npm list reported it as invalid --- library/packaging/npm | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/packaging/npm b/library/packaging/npm index 62179c373aa..8bfb21afde0 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -142,6 +142,8 @@ class Npm(object): for dep in data['dependencies']: if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: missing.append(dep) + elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: + missing.append(dep) else: installed.append(dep) #Named dependency not installed From 651c04a3ecffb5f0838c125a1e5d54d9389c7944 Mon Sep 17 00:00:00 2001 From: Aleksey Ovcharenko Date: Mon, 2 Dec 2013 16:41:17 +0200 Subject: [PATCH 188/772] New module [ufw]: this module handles Ubuntu UFW operations * Updated documentation related to IPv6 usage. BugFixes: * Solved the default_policy and state mutual exclusive status. * Fixed changed status for IPv6 addresses. Added @otnateos patch. --- library/system/ufw | 268 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 268 insertions(+) create mode 100644 library/system/ufw diff --git a/library/system/ufw b/library/system/ufw new file mode 100644 index 00000000000..0857c2e7c95 --- /dev/null +++ b/library/system/ufw @@ -0,0 +1,268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Aleksey Ovcharenko +# (c) 2013, James Martin +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: ufw +short_description: This module handles Ubuntu UFW operations +description: + - This module handles Ubuntu UFW operations +options: + default_policy: + description: + - Change the default policy for incoming traffic. + required: false + choices: ['allow', 'deny', 'reject'] + default: None + delete: + description: + - Delete rule instead of creation. + required: false + choices: ['yes', 'no'] + default: 'no' + state: + description: | + I(enable) reloads firewall and enables firewall on boot. + I(disable) unloads firewall and disables firewall on boot. + I(reload) reloads firewall. + I(reset) disables and resets firewall to installation defaults. + I(allow) adds allow rule. See B(EXAMPLES). + I(deny) adds deny rule. See B(EXAMPLES). + I(reject) adds reject rule. See B(EXAMPLES). + I(limit) adds limit rule. Currently only IPv4 is supported. See B(EXAMPLES). + required: false + choices: ['enable', 'disable', 'reload', 'reset', 'allow', 'deny', 'reject', 'limit'] + aliases: ['rule'] + default: 'allow' + name: + description: + - Use profile located in /etc/ufw/applications.d + required: false + default: None + version_added: "2.1" + from_ip: + description: + - Source IP address. + required: false + aliases: ['src'] + default: 'any' + from_port: + description: + - Source port. + required: false + default: 'any' + to_ip: + description: + - Destination IP address. + required: false + aliases: ['dest'] + default: 'any' + to_port: + description: + - Destination port. + required: false + default: 'any' + aliases: ['port'] + proto: + description: + - TCP/IP protocol. + choices: ['any', 'tcp', 'udp', 'ipv6'] + required: false + log: + description: + - Toggles logging. Logged packets use the LOG_KERN syslog facility. + choices: ['yes', 'no'] + required: false + default: 'no' +version_added: 2.0 +notes: + - See C(man 8 ufw) for more example. +requirements: [ ] +author: Aleksey Ovcharenko +''' + +EXAMPLES = ''' +# Allow everything and enable UFW +ufw: state={{ item }} +with_items: +- allow +- enable + +# Sometimes it is desirable to let the sender know when traffic is +# being denied, rather than simply ignoring it. In these cases, use +# reject instead of deny. For example: +ufw: state=reject port=auth + +# ufw supports connection rate limiting, which is useful for protecting +# against brute-force login attacks. ufw will deny connections if an IP +# address has attempted to initiate 6 or more connections in the last +# 30 seconds. See http://www.debian-administration.org/articles/187 +# for details. Typical usage is: +ufw: state=limit port=ssh proto=tcp + +# Allow OpenSSH +ufw: state=allow name=OpenSSH + +# Deny all access to port 53: +ufw: state=deny port=53 + +# Allow all access to tcp port 80: +ufw: state=allow to_port=80 proto=tcp + +# Allow all access from RFC1918 networks to this host: +ufw: state=allow from_ip={{ item }} +with_items: +- 10.0.0.0/8 +- 172.16.0.0/12 +- 192.168.0.0/16 + +# Deny access to udp port 514 from host 1.2.3.4: +ufw: state=deny proto=udp from_ip=1.2.3.4 to_port=514 + +# Allow access to udp 1.2.3.4 port 5469 from 1.2.3.5 port 5469: +ufw: state=allow proto=udp from_ip=1.2.3.5 from_port=5469 to_ip=1.2.3.4 to_port=5469 + +# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. +# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. +ufw: state=deny proto=tcp src=2001:db8::/32 port=25 +''' + +import platform + +def main(): + module = AnsibleModule( + argument_spec = dict( + default_policy = dict(default=None, choices=['allow', 'deny', 'reject'], required=False), + state = dict(default=None, aliases=['rule'], choices=['enable', 'disable', 'reload', 'reset', 'allow', 'deny', 'reject', 'limit'], required=False), + name = dict(default=None, required=False), + from_ip = dict(default='any', aliases=['src'], required=False), + from_port = dict(default='any', required=False), + to_ip = dict(default='any', aliases=['dest'], required=False), + to_port = dict(default='any', aliases=['port'], required=False), + proto = dict(default='any', choices=['any', 'tcp', 'udp', 'ipv6'], required=False), + delete = dict(default=False, choices=BOOLEANS, required=False), + log = dict(default=False, choices=BOOLEANS, required=False) + ), + supports_check_mode = True + ) + + default_policy = module.params.get('default_policy') + state = module.params.get('state') + name = module.params.get('name') + from_ip = module.params.get('from_ip') + from_port = module.params.get('from_port') + to_ip = module.params.get('to_ip') + to_port = module.params.get('to_port') + proto = module.params.get('proto') + delete = module.params['delete'] + log = module.params['log'] + + system = platform.system() + + if "Linux" not in system: + module.exit_json(msg="Not implemented for system %s. Only Linux (Ubuntu) is supported" % (system), changed=False) + else: + dist = platform.dist() + if dist and 'Ubuntu' not in dist[0]: + module.exit_json(msg="Not implemented for distrubution %s. Only Ubuntu is supported" % (dist[0]), changed=False) + + result = {} + result['state'] = state + + cmd = module.get_bin_path('ufw') + + if module.check_mode: + cmd = cmd + ' --dry-run' + + if default_policy: + if state: + module.fail_json(msg="'default_policy' and 'state' are mutually exclusive options.") + else: + if default_policy in ['allow', 'deny', 'reject']: + cmd = cmd + ' default %s' % (default_policy) + changed_marker = "Default incoming policy changed to '%s'\n(be sure to update your rules accordingly)" % (default_policy) + else: + module.fail_json(msg="Wrong default policy %s. See 'ansible-doc ufw' for usage." % (default_policy)) + + if not default_policy: + if not state: + module.fail_json(msg="You must specify either 'default_policy' or 'state' option.") + else: + if state in 'enable': + cmd = cmd + ' -f %s' % (state) + changed_marker = 'Firewall is active and enabled on system startup' + elif state in 'disable': + cmd = cmd + ' -f %s' % (state) + changed_marker = 'Firewall stopped and disabled on system startup' + elif state in 'reload': + cmd = cmd + ' -f %s' % (state) + changed_marker = 'Firewall reloaded' + elif state in 'reset': + cmd = cmd + ' -f %s' % (state) + changed_marker = 'Backing up' + elif state in ['allow', 'deny', 'reject', 'limit']: + changed_marker = ['Rules updated', 'Rules updated (v6)', 'Rule added', 'Rule added (v6)', 'Rule deleted', 'Rule deleted (v6)' ] + if delete: + cmd = cmd + ' delete' + + cmd = cmd + ' %s' % (state) + if log: + cmd = cmd + ' log' + if name: + cmd = cmd + ' %s' % (name) + else: + if proto and proto not in 'any': + cmd = cmd + ' proto %s' % (proto) + if from_ip and from_ip not in 'any': + cmd = cmd + ' from %s' % (from_ip) + if from_port and from_port not in 'any': + cmd = cmd + ' port %s' % (from_port) + elif from_port and from_port not in 'any': + cmd = cmd + ' from port %s' % (from_port) + + if to_ip: + cmd = cmd + ' to %s' % (to_ip) + if to_port and to_port not in 'any': + cmd = cmd + ' port %s' % (to_port) + elif to_port and to_port not in 'any': + cmd = cmd + ' to port %s' % (to_port) + else: + module.fail_json(msg="Wrong rule %s. See 'ansible-doc ufw' for usage." % (state)) + + (rc, out, err) = module.run_command(cmd) + + if rc != 0: + if err: + module.fail_json(msg=err) + else: + module.fail_json(msg=out) + + result['cmd'] = cmd + result['msg'] = out.rstrip() + + if isinstance(changed_marker, basestring): + result['changed'] = result['msg'] in changed_marker + else: + result['changed'] = any(item in result['msg'] for item in changed_marker) + + return module.exit_json(**result) + +# include magic from lib/ansible/module_common.py +#<> + +main() From f4e8a86c87a170267d1c8b48ae8c66393579d9f8 Mon Sep 17 00:00:00 2001 From: Jarno Keskikangas Date: Mon, 6 Jan 2014 22:44:25 +0200 Subject: [PATCH 189/772] Add functionality and reduce complexity. * Separate 'state', 'policy' and 'rule' commands * Support for 'logging' command * Support for 'direction' and 'interface' attributes * Reliable change notifications based on 'ufw status verbose' diff * Update documentation * Cleanup --- library/system/ufw | 312 +++++++++++++++++++++------------------------ 1 file changed, 146 insertions(+), 166 deletions(-) diff --git a/library/system/ufw b/library/system/ufw index 0857c2e7c95..cafebffba9b 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -1,9 +1,12 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# (c) 2014, Jarno Keskikangas # (c) 2013, Aleksey Ovcharenko # (c) 2013, James Martin # +# This file is part of Ansible +# # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or @@ -16,251 +19,228 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: ufw -short_description: This module handles Ubuntu UFW operations +short_description: Manage firewall with UFW description: - - This module handles Ubuntu UFW operations + - Manage firewall with UFW. +version_added: 1.5 +author: Aleksey Ovcharenko, Jarno Keskikangas +notes: + - See C(man ufw) for more examples. +requirements: + - C(ufw) package options: - default_policy: + state: + description: + - C(enabled) reloads firewall and enables firewall on boot. + - C(disabled) unloads firewall and disables firewall on boot. + - C(reloaded) reloads firewall. + - C(reseted) disables and resets firewall to installation defaults. + required: false + choices: ['enabled', 'disabled', 'reloaded', 'reseted'] + policy: description: - - Change the default policy for incoming traffic. + - Change the default policy for incoming or outgoing traffic. required: false + alias: default choices: ['allow', 'deny', 'reject'] - default: None - delete: + direction: description: - - Delete rule instead of creation. + - Select direction for a rule or default policy command. required: false - choices: ['yes', 'no'] - default: 'no' - state: - description: | - I(enable) reloads firewall and enables firewall on boot. - I(disable) unloads firewall and disables firewall on boot. - I(reload) reloads firewall. - I(reset) disables and resets firewall to installation defaults. - I(allow) adds allow rule. See B(EXAMPLES). - I(deny) adds deny rule. See B(EXAMPLES). - I(reject) adds reject rule. See B(EXAMPLES). - I(limit) adds limit rule. Currently only IPv4 is supported. See B(EXAMPLES). + choices: ['in', 'out', 'incoming', 'outgoing'] + logging: + description: + - Toggles logging. Logged packets use the LOG_KERN syslog facility. + choices: ['on', 'off', 'low', 'medium', 'high', 'full'] required: false - choices: ['enable', 'disable', 'reload', 'reset', 'allow', 'deny', 'reject', 'limit'] - aliases: ['rule'] - default: 'allow' - name: + rule: description: - - Use profile located in /etc/ufw/applications.d + - Add firewall rule required: false - default: None - version_added: "2.1" + choises: ['allow', 'deny', 'reject', 'limit'] + log: + description: + - Log new connections matched to this rule + required: false + choises: ['yes', 'no'] from_ip: description: - Source IP address. required: false - aliases: ['src'] + aliases: ['from', 'src'] default: 'any' from_port: description: - Source port. required: false - default: 'any' to_ip: description: - Destination IP address. required: false - aliases: ['dest'] + aliases: ['to', 'dest'] default: 'any' to_port: description: - Destination port. required: false - default: 'any' aliases: ['port'] proto: description: - TCP/IP protocol. choices: ['any', 'tcp', 'udp', 'ipv6'] required: false - log: + name: description: - - Toggles logging. Logged packets use the LOG_KERN syslog facility. - choices: ['yes', 'no'] + - Use profile located in C(/etc/ufw/applications.d) required: false - default: 'no' -version_added: 2.0 -notes: - - See C(man 8 ufw) for more example. -requirements: [ ] -author: Aleksey Ovcharenko + aliases: ['app'] + delete: + description: + - Delete rule. + required: false + choices: ['yes', 'no'] ''' EXAMPLES = ''' # Allow everything and enable UFW -ufw: state={{ item }} -with_items: -- allow -- enable +ufw: state=enable policy=allow logging=on # Sometimes it is desirable to let the sender know when traffic is # being denied, rather than simply ignoring it. In these cases, use -# reject instead of deny. For example: -ufw: state=reject port=auth +# reject instead of deny. In addition, log rejected connections: +ufw: rule=reject port=auth log=yes # ufw supports connection rate limiting, which is useful for protecting # against brute-force login attacks. ufw will deny connections if an IP # address has attempted to initiate 6 or more connections in the last # 30 seconds. See http://www.debian-administration.org/articles/187 # for details. Typical usage is: -ufw: state=limit port=ssh proto=tcp +ufw: rule=limit port=ssh proto=tcp # Allow OpenSSH -ufw: state=allow name=OpenSSH +ufw: rule=allow name=OpenSSH + +# Delete OpenSSH rule +ufw: rule=allow name=OpenSSH delete=yes # Deny all access to port 53: -ufw: state=deny port=53 +ufw: rule=deny port=53 # Allow all access to tcp port 80: -ufw: state=allow to_port=80 proto=tcp +ufw: rule=allow port=80 proto=tcp # Allow all access from RFC1918 networks to this host: -ufw: state=allow from_ip={{ item }} +ufw: rule=allow src={{ item }} with_items: - 10.0.0.0/8 - 172.16.0.0/12 - 192.168.0.0/16 # Deny access to udp port 514 from host 1.2.3.4: -ufw: state=deny proto=udp from_ip=1.2.3.4 to_port=514 +ufw: rule=deny proto=udp src=1.2.3.4 port=514 -# Allow access to udp 1.2.3.4 port 5469 from 1.2.3.5 port 5469: -ufw: state=allow proto=udp from_ip=1.2.3.5 from_port=5469 to_ip=1.2.3.4 to_port=5469 +# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469 +ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469 # Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. # Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. -ufw: state=deny proto=tcp src=2001:db8::/32 port=25 +ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 ''' -import platform +from operator import itemgetter + def main(): module = AnsibleModule( argument_spec = dict( - default_policy = dict(default=None, choices=['allow', 'deny', 'reject'], required=False), - state = dict(default=None, aliases=['rule'], choices=['enable', 'disable', 'reload', 'reset', 'allow', 'deny', 'reject', 'limit'], required=False), - name = dict(default=None, required=False), - from_ip = dict(default='any', aliases=['src'], required=False), - from_port = dict(default='any', required=False), - to_ip = dict(default='any', aliases=['dest'], required=False), - to_port = dict(default='any', aliases=['port'], required=False), - proto = dict(default='any', choices=['any', 'tcp', 'udp', 'ipv6'], required=False), - delete = dict(default=False, choices=BOOLEANS, required=False), - log = dict(default=False, choices=BOOLEANS, required=False) + state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reseted']), + default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), + logging = dict(default=None, choises=['on', 'off', 'low', 'medium', 'high', 'full']), + direction = dict(default=None, choises=['in', 'incoming', 'out', 'outgoing']), + delete = dict(default=False, choices=BOOLEANS), + rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), + interface = dict(default=None, aliases=['if']), + log = dict(default=False, choices=BOOLEANS), + from_ip = dict(default='any', aliases=['src', 'from']), + from_port = dict(default=None), + to_ip = dict(default='any', aliases=['dest', 'to']), + to_port = dict(default=None, aliases=['port']), + proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6']), + app = dict(default=None, aliases=['name']) ), - supports_check_mode = True + supports_check_mode = True, + mutually_exclusive = [['app', 'proto']] ) - default_policy = module.params.get('default_policy') - state = module.params.get('state') - name = module.params.get('name') - from_ip = module.params.get('from_ip') - from_port = module.params.get('from_port') - to_ip = module.params.get('to_ip') - to_port = module.params.get('to_port') - proto = module.params.get('proto') - delete = module.params['delete'] - log = module.params['log'] - - system = platform.system() - - if "Linux" not in system: - module.exit_json(msg="Not implemented for system %s. Only Linux (Ubuntu) is supported" % (system), changed=False) - else: - dist = platform.dist() - if dist and 'Ubuntu' not in dist[0]: - module.exit_json(msg="Not implemented for distrubution %s. Only Ubuntu is supported" % (dist[0]), changed=False) - - result = {} - result['state'] = state - - cmd = module.get_bin_path('ufw') - - if module.check_mode: - cmd = cmd + ' --dry-run' - - if default_policy: - if state: - module.fail_json(msg="'default_policy' and 'state' are mutually exclusive options.") - else: - if default_policy in ['allow', 'deny', 'reject']: - cmd = cmd + ' default %s' % (default_policy) - changed_marker = "Default incoming policy changed to '%s'\n(be sure to update your rules accordingly)" % (default_policy) - else: - module.fail_json(msg="Wrong default policy %s. See 'ansible-doc ufw' for usage." % (default_policy)) - - if not default_policy: - if not state: - module.fail_json(msg="You must specify either 'default_policy' or 'state' option.") - else: - if state in 'enable': - cmd = cmd + ' -f %s' % (state) - changed_marker = 'Firewall is active and enabled on system startup' - elif state in 'disable': - cmd = cmd + ' -f %s' % (state) - changed_marker = 'Firewall stopped and disabled on system startup' - elif state in 'reload': - cmd = cmd + ' -f %s' % (state) - changed_marker = 'Firewall reloaded' - elif state in 'reset': - cmd = cmd + ' -f %s' % (state) - changed_marker = 'Backing up' - elif state in ['allow', 'deny', 'reject', 'limit']: - changed_marker = ['Rules updated', 'Rules updated (v6)', 'Rule added', 'Rule added (v6)', 'Rule deleted', 'Rule deleted (v6)' ] - if delete: - cmd = cmd + ' delete' - - cmd = cmd + ' %s' % (state) - if log: - cmd = cmd + ' log' - if name: - cmd = cmd + ' %s' % (name) - else: - if proto and proto not in 'any': - cmd = cmd + ' proto %s' % (proto) - if from_ip and from_ip not in 'any': - cmd = cmd + ' from %s' % (from_ip) - if from_port and from_port not in 'any': - cmd = cmd + ' port %s' % (from_port) - elif from_port and from_port not in 'any': - cmd = cmd + ' from port %s' % (from_port) - - if to_ip: - cmd = cmd + ' to %s' % (to_ip) - if to_port and to_port not in 'any': - cmd = cmd + ' port %s' % (to_port) - elif to_port and to_port not in 'any': - cmd = cmd + ' to port %s' % (to_port) - else: - module.fail_json(msg="Wrong rule %s. See 'ansible-doc ufw' for usage." % (state)) - - (rc, out, err) = module.run_command(cmd) - - if rc != 0: - if err: - module.fail_json(msg=err) - else: - module.fail_json(msg=out) - - result['cmd'] = cmd - result['msg'] = out.rstrip() - - if isinstance(changed_marker, basestring): - result['changed'] = result['msg'] in changed_marker - else: - result['changed'] = any(item in result['msg'] for item in changed_marker) - - return module.exit_json(**result) + cmds = [] + + def execute(cmd): + cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) + cmds.append(cmd) + (rc, out, err) = module.run_command(cmd) + + if rc != 0: + module.fail_json(msg=err or out) + + params = module.params + + # Ensure at least one of the command arguments are given + command_keys = ['state', 'default', 'rule', 'logging'] + commands = dict((key, params[key]) for key in command_keys if params[key]) + + if len(commands) < 1: + module.fail_json(msg="Not any of the command arguments %s given" % commands) + + # Ensure ufw is available + ufw_bin = module.get_bin_path('ufw', True) + + # Save the pre state in order to recognize changes reliably + (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') + + # Execute commands + for (command, value) in commands.iteritems(): + cmd = [[ufw_bin], [module.check_mode, '--dry-run']] + + if command == 'state': + states = { 'enabled': 'enable', 'disabled': 'disable', + 'reloaded': 'reload', 'reseted': 'reset' } + execute(cmd + [['-f'], [states[value]]]) + + elif command == 'logging': + execute(cmd + [[command, value]]) + + elif command == 'default': + execute(cmd + [[command], [value], [params['direction']]]) + + elif command == 'rule': + # Rules are constructed according to the long format + # + # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ + # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ + # [proto protocol] [app application] + cmd.append([module.boolean(params['delete']), 'delete']) + cmd.append([value]) + cmd.append([module.boolean(params['log']), 'log']) + + for (key, template) in [('direction', "%s" ), ('interface', "on %s" ), + ('from_ip', "from %s" ), ('from_port', "port %s" ), + ('to_ip', "to %s" ), ('to_port', "port %s" ), + ('proto', "proto %s"), ('app', "app '%s'")]: + + value = params[key] + cmd.append([value, template % (value)]) + + execute(cmd) + + # Get the new state + (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') + changed = pre_state != post_state + + return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) # include magic from lib/ansible/module_common.py #<> From 2613e6fb14de76545401b8970a194e7c4a156586 Mon Sep 17 00:00:00 2001 From: Jarno Keskikangas Date: Sun, 9 Feb 2014 12:09:12 +0200 Subject: [PATCH 190/772] Add protocol options 'esp' and 'ah'. --- library/system/ufw | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/ufw b/library/system/ufw index cafebffba9b..55bcea0b221 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -91,7 +91,7 @@ options: proto: description: - TCP/IP protocol. - choices: ['any', 'tcp', 'udp', 'ipv6'] + choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah'] required: false name: description: @@ -169,7 +169,7 @@ def main(): from_port = dict(default=None), to_ip = dict(default='any', aliases=['dest', 'to']), to_port = dict(default=None, aliases=['port']), - proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6']), + proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']), app = dict(default=None, aliases=['name']) ), supports_check_mode = True, From 024770819931d1b70738d6535e8220adc6cb9dcc Mon Sep 17 00:00:00 2001 From: Jarno Keskikangas Date: Sun, 9 Feb 2014 12:29:00 +0200 Subject: [PATCH 191/772] Fix typo: 'reseted' -> 'reset'. --- library/system/ufw | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/ufw b/library/system/ufw index 55bcea0b221..b1386ff6020 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -157,7 +157,7 @@ from operator import itemgetter def main(): module = AnsibleModule( argument_spec = dict( - state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reseted']), + state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']), default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), logging = dict(default=None, choises=['on', 'off', 'low', 'medium', 'high', 'full']), direction = dict(default=None, choises=['in', 'incoming', 'out', 'outgoing']), @@ -207,7 +207,7 @@ def main(): if command == 'state': states = { 'enabled': 'enable', 'disabled': 'disable', - 'reloaded': 'reload', 'reseted': 'reset' } + 'reloaded': 'reload', 'reset': 'reset' } execute(cmd + [['-f'], [states[value]]]) elif command == 'logging': From 4d5340587e0e6ad8a020bbca6515484ad993df4c Mon Sep 17 00:00:00 2001 From: Jarno Keskikangas Date: Sat, 8 Mar 2014 14:01:26 +0200 Subject: [PATCH 192/772] Insert rule to a specific num. --- library/system/ufw | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/library/system/ufw b/library/system/ufw index b1386ff6020..e343f934280 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -57,6 +57,10 @@ options: - Toggles logging. Logged packets use the LOG_KERN syslog facility. choices: ['on', 'off', 'low', 'medium', 'high', 'full'] required: false + insert: + description: + - Insert the corresponding rule as rule number NUM + required: false rule: description: - Add firewall rule @@ -162,6 +166,7 @@ def main(): logging = dict(default=None, choises=['on', 'off', 'low', 'medium', 'high', 'full']), direction = dict(default=None, choises=['in', 'incoming', 'out', 'outgoing']), delete = dict(default=False, choices=BOOLEANS), + insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), interface = dict(default=None, aliases=['if']), log = dict(default=False, choices=BOOLEANS), @@ -223,6 +228,7 @@ def main(): # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] cmd.append([module.boolean(params['delete']), 'delete']) + cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) cmd.append([module.boolean(params['log']), 'log']) From 37ef39d0fa1a86039b94b51c491653ea40a72f64 Mon Sep 17 00:00:00 2001 From: Jarno Keskikangas Date: Sun, 9 Mar 2014 12:55:58 +0200 Subject: [PATCH 193/772] Recognize rule changes even if ufw is in disabled state. See http://askubuntu.com/questions/30781/see-configured-rules-even-when-inactive for the details. --- library/system/ufw | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/system/ufw b/library/system/ufw index e343f934280..5ac20978ec2 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -203,8 +203,9 @@ def main(): # Ensure ufw is available ufw_bin = module.get_bin_path('ufw', True) - # Save the pre state in order to recognize changes reliably + # Save the pre state and rules in order to recognize changes (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') + (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") # Execute commands for (command, value) in commands.iteritems(): @@ -244,7 +245,8 @@ def main(): # Get the new state (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') - changed = pre_state != post_state + (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") + changed = (pre_state != post_state) or (pre_rules != post_rules) return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) From d59973295d0349abcdfcedfcd81531dcaddb1b85 Mon Sep 17 00:00:00 2001 From: Charles Duffy Date: Mon, 10 Mar 2014 00:09:08 -0500 Subject: [PATCH 194/772] #6341: check systemd service status with show subcommand --- library/system/service | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/library/system/service b/library/system/service index 2e26a47b636..49708ce28e6 100644 --- a/library/system/service +++ b/library/system/service @@ -473,7 +473,27 @@ class LinuxService(Service): if location.get('initctl', None): self.svc_initctl = location['initctl'] + def get_systemd_service_status(self): + (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit)) + d = dict(line.split('=', 1) for line in out.splitlines()) + if d['ActiveState'] == 'active': + # run-once services (for which a single successful exit indicates + # that they are running as designed) should not be restarted here. + # Thus, we are not checking d['SubState']. + self.running = True + self.crashed = False + elif d['ActiveState'] == 'failed': + self.running = False + self.crashed = True + else: + self.running = False + self.crashed = False + return self.running + def get_service_status(self): + if self.svc_cmd and self.svc_cmd.endswith('systemctl'): + return self.get_systemd_service_status() + self.action = "status" rc, status_stdout, status_stderr = self.service_control() From 18f53d92195a6707afd2f8cbcea7362dd261594e Mon Sep 17 00:00:00 2001 From: Charles Duffy Date: Mon, 10 Mar 2014 01:05:48 -0500 Subject: [PATCH 195/772] #6341: use shared function for parsing systemd status; check rc code --- library/system/service | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/library/system/service b/library/system/service index 49708ce28e6..a25cf208ed9 100644 --- a/library/system/service +++ b/library/system/service @@ -473,18 +473,25 @@ class LinuxService(Service): if location.get('initctl', None): self.svc_initctl = location['initctl'] + def get_systemd_status_dict(self): + (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) + if rc != 0: + self.module.fail_json('failure %d running systemctl show for %r: %s' % (self.__systemd_unit, rc, err)) + return dict(line.split('=', 1) for line in out.splitlines()) + def get_systemd_service_status(self): - (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit)) - d = dict(line.split('=', 1) for line in out.splitlines()) - if d['ActiveState'] == 'active': + d = self.get_systemd_status_dict() + if d.get('ActiveState') == 'active': # run-once services (for which a single successful exit indicates # that they are running as designed) should not be restarted here. # Thus, we are not checking d['SubState']. self.running = True self.crashed = False - elif d['ActiveState'] == 'failed': + elif d.get('ActiveState') == 'failed': self.running = False self.crashed = True + elif d.get('ActiveState') is None: + self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,)) else: self.running = False self.crashed = False @@ -605,9 +612,7 @@ class LinuxService(Service): return if self.enable_cmd.endswith("systemctl"): - (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit)) - - d = dict(line.split('=', 1) for line in out.splitlines()) + d = self.get_systemd_status_dict() if "UnitFileState" in d: if self.enable and d["UnitFileState"] == "enabled": return From b9ab31ebd89cd298092ef88df4ae4560d5983959 Mon Sep 17 00:00:00 2001 From: Federico Feroldi Date: Mon, 10 Mar 2014 13:11:09 +0100 Subject: [PATCH 196/772] Fix for #6365: cloudformation module fails to update if stack exists --- library/cloud/cloudformation | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/cloudformation b/library/cloud/cloudformation index e072f3923f8..822bd6e2503 100644 --- a/library/cloud/cloudformation +++ b/library/cloud/cloudformation @@ -250,7 +250,7 @@ def main(): operation = 'CREATE' except Exception, err: error_msg = boto_exception(err) - if 'AlreadyExistsException' in error_msg: + if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg: update = True else: module.fail_json(msg=error_msg) From 2ccbdd65c168ba2ef548e9f5805b4fe83f23e400 Mon Sep 17 00:00:00 2001 From: jjshoe Date: Mon, 10 Mar 2014 09:02:51 -0500 Subject: [PATCH 197/772] Make this statement understandable grammatically --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index db82e2c483a..70925521007 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -350,7 +350,7 @@ Assuming you load balance your checkout location, ansible-pull scales essentiall Run ``ansible-pull --help`` for details. -There's also a `clever playbook `_ available to using ansible in push mode to configure ansible-pull via a crontab! +There's also a `clever playbook `_ available to configure ansible-pull via a crontab from push mode. .. _tips_and_tricks: From c6fbb0059b1a15d739819dc4f20fa44f836f3c23 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Mon, 10 Mar 2014 10:31:08 -0500 Subject: [PATCH 198/772] Some cleanup. --- library/system/at | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/library/system/at b/library/system/at index 36131286388..d1055abfc26 100644 --- a/library/system/at +++ b/library/system/at @@ -75,6 +75,8 @@ EXAMPLES = ''' import os import tempfile + + def add_job(module, result, at_cmd, count, units, command, script_file): at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file) rc, out, err = module.run_command(at_command, check_rc=True) @@ -82,8 +84,9 @@ def add_job(module, result, at_cmd, count, units, command, script_file): os.unlink(script_file) result['changed'] = True + def delete_job(module, result, at_cmd, command, script_file): - for matching_job in matching_jobs(module, at_cmd, script_file): + for matching_job in get_matching_jobs(module, at_cmd, script_file): at_command = "%s -d %s" % (at_cmd, matching_job) rc, out, err = module.run_command(at_command, check_rc=True) result['changed'] = True @@ -91,13 +94,14 @@ def delete_job(module, result, at_cmd, command, script_file): os.unlink(script_file) module.exit_json(**result) -def matching_jobs(module, at_cmd, script_file): + +def get_matching_jobs(module, at_cmd, script_file): matching_jobs = [] atq_cmd = module.get_bin_path('atq', True) # Get list of job numbers for the user. - atq_command = "%s" % (atq_cmd) + atq_command = "%s" % atq_cmd rc, out, err = module.run_command(atq_command, check_rc=True) current_jobs = out.splitlines() if len(current_jobs) == 0: @@ -118,6 +122,7 @@ def matching_jobs(module, at_cmd, script_file): # Return the list. return matching_jobs + def create_tempfile(command): filed, script_file = tempfile.mkstemp(prefix='at') fileh = os.fdopen(filed, 'w') @@ -125,7 +130,6 @@ def create_tempfile(command): fileh.close() return script_file -#================================================ def main(): @@ -149,9 +153,9 @@ def main(): default=False, type='bool') ), - mutually_exclusive = [['command', 'script_file']], - required_one_of = [['command', 'script_file']], - supports_check_mode = False + mutually_exclusive=[['command', 'script_file']], + required_one_of=[['command', 'script_file']], + supports_check_mode=False ) at_cmd = module.get_bin_path('at', True) @@ -163,12 +167,10 @@ def main(): state = module.params['state'] unique = module.params['unique'] - if ((state == 'present') and (not count or not units)): + if (state == 'present') and (not count or not units): module.fail_json(msg="present state requires count and units") - result = {} - result['state'] = state - result['changed'] = False + result = {'state': state, 'changed': False} # If command transform it into a script_file if command: @@ -180,7 +182,7 @@ def main(): # if unique if existing return unchanged if unique: - if len(matching_jobs(module, at_cmd, script_file)) != 0: + if len(get_matching_jobs(module, at_cmd, script_file)) != 0: if command: os.unlink(script_file) module.exit_json(**result) From 5875d19f4da65f6519ad818dbd4b5564a8da36b8 Mon Sep 17 00:00:00 2001 From: Matthew Riedel Date: Mon, 10 Mar 2014 11:58:04 -0400 Subject: [PATCH 199/772] Returning 'NA' when no virtualization found --- library/system/setup | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/library/system/setup b/library/system/setup index f140991dc27..46fa934a4b5 100644 --- a/library/system/setup +++ b/library/system/setup @@ -2185,6 +2185,13 @@ class LinuxVirtual(Virtual): self.facts['virtualization_role'] = 'host' return + # If none of the above matches, return 'NA' for virtualization_type + # and virtualization_role. This allows for proper grouping. + self.facts['virtualization_type'] = 'NA' + self.facts['virtualization_role'] = 'NA' + return + + class HPUXVirtual(Virtual): """ This is a HP-UX specific subclass of Virtual. It defines From 4b400ca5e97eeccfb1bd00457634b83ac2f5ef44 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 10 Mar 2014 13:43:34 -0500 Subject: [PATCH 200/772] Only used stripped data for testing if the file is json, but used unstripped when actually parsing. Fixes #6348 --- lib/ansible/utils/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 405641eb163..6c2f8112aba 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -354,9 +354,9 @@ def smush_ds(data): def parse_yaml(data, path_hint=None): ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!''' - data = data.lstrip() + stripped_data = data.lstrip() loaded = None - if data.startswith("{") or data.startswith("["): + if stripped_data.startswith("{") or stripped_data.startswith("["): # since the line starts with { or [ we can infer this is a JSON document. try: loaded = json.loads(data) From 82f99bfa3d2f5e95c95deffef0c8cd5fda0d1e7c Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Mon, 10 Mar 2014 12:02:45 -0700 Subject: [PATCH 201/772] Add BSD license to module_utils/gce.py --- lib/ansible/module_utils/gce.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/lib/ansible/module_utils/gce.py b/lib/ansible/module_utils/gce.py index f6401c68d01..6d6fb158ffc 100644 --- a/lib/ansible/module_utils/gce.py +++ b/lib/ansible/module_utils/gce.py @@ -1,3 +1,32 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Franck Cuny , 2014 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + USER_AGENT_PRODUCT="Ansible-gce" USER_AGENT_VERSION="v1" From d3b452ecb12262dbe055d0d092b4d4b5fc3d9195 Mon Sep 17 00:00:00 2001 From: jctanner Date: Wed, 5 Mar 2014 12:58:05 -0500 Subject: [PATCH 202/772] Merge pull request #5872 from tomdymond/add-ignoreerror-to-sysctl Add option to enable the sysctl -e option --- library/system/sysctl | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/library/system/sysctl b/library/system/sysctl index 2e3ce767e22..97e5bc5e6c1 100644 --- a/library/system/sysctl +++ b/library/system/sysctl @@ -45,6 +45,11 @@ options: - Whether the entry should be present or absent in the sysctl file. choices: [ "present", "absent" ] default: present + ignoreerrors: + description: + - Use this option to ignore errors about unknown keys. + choices: [ "yes", "no" ] + default: no reload: description: - If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is @@ -214,8 +219,12 @@ class SysctlModule(object): # freebsd doesn't support -p, so reload the sysctl service rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload') else: - # system supports reloading via the -p flag to sysctl, so we'll use that - rc,out,err = self.module.run_command([self.sysctl_cmd, '-p', self.sysctl_file]) + # system supports reloading via the -p flag to sysctl, so we'll use that + sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file] + if self.args['ignoreerrors']: + sysctl_args.insert(1, '-e') + + rc,out,err = self.module.run_command(sysctl_args) if rc != 0: self.module.fail_json(msg="Failed to reload sysctl: %s" % str(out) + str(err)) @@ -296,6 +305,7 @@ def main(): state = dict(default='present', choices=['present', 'absent']), reload = dict(default=True, type='bool'), sysctl_set = dict(default=False, type='bool'), + ignoreerrors = dict(default=False, type='bool'), sysctl_file = dict(default='/etc/sysctl.conf') ), supports_check_mode=True From 60055348ba58f1f67e61bdf4f7baca0ff7465007 Mon Sep 17 00:00:00 2001 From: Iordan Iordanov Date: Mon, 24 Feb 2014 16:05:03 -0500 Subject: [PATCH 203/772] Add support for checking host against global known host files. --- lib/ansible/runner/connection_plugins/ssh.py | 63 ++++++++++++-------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index c5fab75ce16..22189caadf3 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -118,35 +118,46 @@ class Connection(object): def not_in_host_file(self, host): if 'USER' in os.environ: - host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") + user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: - host_file = "~/.ssh/known_hosts" - host_file = os.path.expanduser(host_file) - if not os.path.exists(host_file): - print "previous known host file not found" - return True - host_fh = open(host_file) - data = host_fh.read() - host_fh.close() - for line in data.split("\n"): - if line is None or line.find(" ") == -1: + user_host_file = "~/.ssh/known_hosts" + user_host_file = os.path.expanduser(user_host_file) + + host_file_list = [] + host_file_list.append(user_host_file) + host_file_list.append("/etc/ssh/ssh_known_hosts") + host_file_list.append("/etc/ssh/ssh_known_hosts2") + + hfiles_not_found = 0 + for hf in host_file_list: + if not os.path.exists(hf): + hfiles_not_found += 1 continue - tokens = line.split() - if tokens[0].find(self.HASHED_KEY_MAGIC) == 0: - # this is a hashed known host entry - try: - (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2) - hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) - hash.update(host) - if hash.digest() == kn_host.decode('base64'): - return False - except: - # invalid hashed host key, skip it + host_fh = open(hf) + data = host_fh.read() + host_fh.close() + for line in data.split("\n"): + if line is None or line.find(" ") == -1: continue - else: - # standard host file entry - if host in tokens[0]: - return False + tokens = line.split() + if tokens[0].find(self.HASHED_KEY_MAGIC) == 0: + # this is a hashed known host entry + try: + (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2) + hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) + hash.update(host) + if hash.digest() == kn_host.decode('base64'): + return False + except: + # invalid hashed host key, skip it + continue + else: + # standard host file entry + if host in tokens[0]: + return False + + if (hfiles_not_found == len(host_file_list)): + print "previous known host file not found" return True def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False): From 53788e4c981d612c87637244690267687e7b67b9 Mon Sep 17 00:00:00 2001 From: Pavel Antonov Date: Thu, 27 Feb 2014 00:27:39 +0400 Subject: [PATCH 204/772] Support docker_py >= 0.3.0, Docker API >= 1.8, extended error reporting --- library/cloud/docker_image | 55 +++++++++++++++++++++++++++++--------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/library/cloud/docker_image b/library/cloud/docker_image index 6d910c8bd70..5d1bebaf7a3 100644 --- a/library/cloud/docker_image +++ b/library/cloud/docker_image @@ -104,6 +104,8 @@ Remove image from local docker storage: try: import sys + import re + import json import docker.client from requests.exceptions import * from urlparse import urlparse @@ -122,12 +124,33 @@ class DockerImageManager: docker_url = urlparse(module.params.get('docker_url')) self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout')) self.changed = False + self.log = [] + self.error_msg = None + + def get_log(self, as_string=True): + return "".join(self.log) if as_string else self.log def build(self): - res = self.client.build(self.path, tag=":".join([self.name, self.tag]), nocache=self.nocache, rm=True) + stream = self.client.build(self.path, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True) + success_search = r'Successfully built ([0-9a-f]+)' + image_id = None self.changed = True - return res + for chunk in stream: + chunk_json = json.loads(chunk) + + if 'error' in chunk_json: + self.error_msg = chunk_json['error'] + return None + + if 'stream' in chunk_json: + output = chunk_json['stream'] + self.log.append(output) + match = re.search(success_search, output) + if match: + image_id = match.group(1) + + return image_id def has_changed(self): return self.changed @@ -136,7 +159,13 @@ class DockerImageManager: filtered_images = [] images = self.client.images() for i in images: - if (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']): + # Docker-py version >= 0.3 (Docker API >= 1.8) + if 'RepoTags' in i: + repotag = '%s:%s' % (getattr(self, 'name', ''), getattr(self, 'tags', 'latest')) + if not self.name or repotag in i['RepoTags']: + filtered_images.append(i) + # Docker-py version < 0.3 (Docker API < 1.8) + elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']): filtered_images.append(i) return filtered_images @@ -170,25 +199,27 @@ def main(): failed = False image_id = None msg = '' + do_build = False # build image if not exists if state == "present": images = manager.get_images() if len(images) == 0: - image_id, msg = manager.build() - if image_id is None: - failed = True - - + do_build = True + # build image + elif state == "build": + do_build = True # remove image or images elif state == "absent": manager.remove_images() - # build image - elif state == "build": - image_id, msg = manager.build() - if image_id is None: + if do_build: + image_id = manager.build() + if image_id: + msg = "Image builded: %s" % image_id + else: failed = True + msg = "Error: %s\nLog:%s" % (manager.error_msg, manager.get_log()) module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) From c85081e9a9b12e3b438ad78d41a8e0505d8d2571 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Thu, 27 Feb 2014 18:06:34 +0100 Subject: [PATCH 205/772] Fixes templating of ansible_ssh_host for delegates --- lib/ansible/runner/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 207862857aa..800774cfd1a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -308,7 +308,7 @@ class Runner(object): delegate = {} - # allow ansible_ssh_host to be templated + # allow delegated host to be templated delegate['host'] = template.template(self.basedir, host, remote_inject, fail_on_undefined=True) @@ -333,7 +333,10 @@ class Runner(object): this_info = {} # get the real ssh_address for the delegate - delegate['ssh_host'] = this_info.get('ansible_ssh_host', delegate['host']) + # and allow ansible_ssh_host to be templated + delegate['ssh_host'] = template.template(self.basedir, + this_info.get('ansible_ssh_host', this_host), + this_info, fail_on_undefined=True) delegate['port'] = this_info.get('ansible_ssh_port', port) From fbf500ba1f43837d88b0d52bce2bcaacd026d48a Mon Sep 17 00:00:00 2001 From: Tefnet Date: Sun, 2 Mar 2014 00:59:17 +0100 Subject: [PATCH 206/772] missing import in assert module Fixed missing ansible.errors import in assert module --- lib/ansible/runner/action_plugins/assert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/assert.py b/lib/ansible/runner/action_plugins/assert.py index fbd0b3888ae..e217bdb4aa2 100644 --- a/lib/ansible/runner/action_plugins/assert.py +++ b/lib/ansible/runner/action_plugins/assert.py @@ -17,7 +17,7 @@ import ansible -from ansible import utils +from ansible import utils, errors from ansible.runner.return_data import ReturnData class ActionModule(object): From 14b4cb60d69b213468f5b8705a7fe4c4a8b072b4 Mon Sep 17 00:00:00 2001 From: Francesc Esplugas Date: Sun, 2 Mar 2014 12:41:07 +0100 Subject: [PATCH 207/772] get rid of newline chars when reading password file --- bin/ansible-vault | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/ansible-vault b/bin/ansible-vault index 75250b5e813..902653d40bf 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -105,6 +105,8 @@ def _read_password(filename): f = open(filename, "rb") data = f.read() f.close + # get rid of newline chars + data = data.strip() return data def execute_create(args, options, parser): From b06f3e5dd68a049c44ce598b89bc4e3f4fb9459a Mon Sep 17 00:00:00 2001 From: Hagai Date: Mon, 3 Mar 2014 14:47:57 +0200 Subject: [PATCH 208/772] Fix incorrect use of copy on list --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 5ea31b526ce..e9f00e47024 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -237,7 +237,7 @@ class Play(object): if "tags" in included_dep_vars: included_dep_vars["tags"] = list(set(included_dep_vars["tags"] + passed_vars["tags"])) else: - included_dep_vars["tags"] = passed_vars["tags"].copy() + included_dep_vars["tags"] = passed_vars["tags"][:] dep_vars = utils.combine_vars(passed_vars, dep_vars) dep_vars = utils.combine_vars(role_vars, dep_vars) From 5341040c0580007f3bf73007105a95555c8456e0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 3 Mar 2014 10:12:03 -0600 Subject: [PATCH 209/772] Adding a wait loop to ec2_elb for the initial lb state when registering Fixes #5305 --- library/cloud/ec2_elb | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index ebd90aeda82..c6c61fd199b 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -157,7 +157,17 @@ class ElbManager: to report the instance in-service""" for lb in self.lbs: if wait: - initial_state = self._get_instance_health(lb) + tries = 1 + while True: + initial_state = self._get_instance_health(lb) + if initial_state: + break + time.sleep(1) + tries += 1 + # FIXME: this should be configurable, but since it didn't + # wait at all before this is at least better + if tries > 10: + self.module.fail_json(msg='failed to find the initial state of the load balancer') if enable_availability_zone: self._enable_availailability_zone(lb) From b14932465d768e634e500961434e6f05a033f2c8 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Mon, 3 Mar 2014 13:23:27 -0800 Subject: [PATCH 210/772] Avoid range selection on empty groups This prevents a traceback when the group is empty. Fixes #6258 --- lib/ansible/inventory/__init__.py | 6 ++++++ test/units/TestInventory.py | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 67117919d06..8f74d5ea9e9 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -227,6 +227,12 @@ class Inventory(object): given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts """ + # If there are no hosts to select from, just return the + # empty set. This prevents trying to do selections on an empty set. + # issue#6258 + if not hosts: + return hosts + (loose_pattern, limits) = self._enumeration_info(pat) if not limits: return hosts diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index 2554d432041..2ae6256e62b 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -212,6 +212,11 @@ class TestInventory(unittest.TestCase): inventory.subset('greek[0-2];norse[0]') self.assertEqual(sorted(inventory.list_hosts()), sorted(['zeus','hera','thor'])) + def test_subet_range_empty_group(self): + inventory = self.simple_inventory() + inventory.subset('missing[0]') + self.assertEqual(sorted(inventory.list_hosts()), sorted([])) + def test_subset_filename(self): inventory = self.simple_inventory() inventory.subset('@' + os.path.join(self.test_dir, 'restrict_pattern')) From dd3fa2aebeba08f20d090b8ce61166e6aa214257 Mon Sep 17 00:00:00 2001 From: Jim Kytola Date: Tue, 4 Mar 2014 10:07:32 -0500 Subject: [PATCH 211/772] Removes pluralization of manager tag attribute. --- library/cloud/docker_image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/docker_image b/library/cloud/docker_image index 5d1bebaf7a3..5fcdfad573c 100644 --- a/library/cloud/docker_image +++ b/library/cloud/docker_image @@ -161,7 +161,7 @@ class DockerImageManager: for i in images: # Docker-py version >= 0.3 (Docker API >= 1.8) if 'RepoTags' in i: - repotag = '%s:%s' % (getattr(self, 'name', ''), getattr(self, 'tags', 'latest')) + repotag = '%s:%s' % (getattr(self, 'name', ''), getattr(self, 'tag', 'latest')) if not self.name or repotag in i['RepoTags']: filtered_images.append(i) # Docker-py version < 0.3 (Docker API < 1.8) From f7c4668a4d95204540f642140cb90167308f7e45 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 4 Mar 2014 17:30:15 +0100 Subject: [PATCH 212/772] Fix wrong module name for exception in nova compute failed: [127.0.0.1] => {"failed": true, "parsed": false} invalid output was: Traceback (most recent call last): File "/tmp/ansible-tmp-1393950384.39-102240090845592/nova_compute", line 1328, in main() File "/tmp/ansible-tmp-1393950384.39-102240090845592/nova_compute", line 241, in main except exc.Unauthorized, e: NameError: global name 'exc' is not defined --- library/cloud/nova_compute | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/nova_compute b/library/cloud/nova_compute index af693229333..d0bc79b1a2a 100644 --- a/library/cloud/nova_compute +++ b/library/cloud/nova_compute @@ -238,9 +238,9 @@ def main(): service_type='compute') try: nova.authenticate() - except exc.Unauthorized, e: + except exceptions.Unauthorized, e: module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) - except exc.AuthorizationFailure, e: + except exceptions.AuthorizationFailure, e: module.fail_json(msg = "Unable to authorize user: %s" % e.message) if module.params['state'] == 'present': From 9de3b035a6f29e6be7f41273f3a9bdb5dcf4b652 Mon Sep 17 00:00:00 2001 From: anatoly techtonik Date: Tue, 4 Mar 2014 20:00:18 +0200 Subject: [PATCH 213/772] setup: Fix KeyError: 'ipv4_secondaries' (issue #6274) --- library/system/setup | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/system/setup b/library/system/setup index 1c51e521627..941a5dcd31a 100644 --- a/library/system/setup +++ b/library/system/setup @@ -1562,13 +1562,13 @@ class LinuxNetwork(Network): iface = words[-1] if iface != device: interfaces[iface] = {} - if not secondary and "ipv4_secondaries" not in interfaces[iface]: - interfaces[iface]["ipv4_secondaries"] = [] if not secondary or "ipv4" not in interfaces[iface]: interfaces[iface]['ipv4'] = {'address': address, 'netmask': netmask, 'network': network} else: + if "ipv4_secondaries" not in interfaces[iface]: + interfaces[iface]["ipv4_secondaries"] = [] interfaces[iface]["ipv4_secondaries"].append({ 'address': address, 'netmask': netmask, @@ -1577,6 +1577,8 @@ class LinuxNetwork(Network): # add this secondary IP to the main device if secondary: + if "ipv4_secondaries" not in interfaces[device]: + interfaces[device]["ipv4_secondaries"] = [] interfaces[device]["ipv4_secondaries"].append({ 'address': address, 'netmask': netmask, From 118d24d171f1615fa9906a26ccf05cd1996755c8 Mon Sep 17 00:00:00 2001 From: Luca Berruti Date: Tue, 4 Mar 2014 18:20:01 +0100 Subject: [PATCH 214/772] fixes #6244 --- lib/ansible/runner/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 800774cfd1a..a809a4aa7e3 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -633,13 +633,13 @@ class Runner(object): all_failed = False results = [] for x in items: - # use a fresh inject for each item + # use a fresh inject for each item this_inject = inject.copy() this_inject['item'] = x # TODO: this idiom should be replaced with an up-conversion to a Jinja2 template evaluation if isinstance(self.complex_args, basestring): - complex_args = template.template(self.basedir, self.complex_args, inject, convert_bare=True) + complex_args = template.template(self.basedir, self.complex_args, this_inject, convert_bare=True) complex_args = utils.safe_eval(complex_args) if type(complex_args) != dict: raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) From 76037168b10f7e9ac09a084cbc16a19fd1a1c8c1 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 5 Mar 2014 14:51:40 -0500 Subject: [PATCH 215/772] Fixes #6298 and adds a sudo unit test for synchronize --- .../runner/action_plugins/synchronize.py | 9 ++++-- test/units/TestSynchronize.py | 30 ++++++++++++++++++- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py index d7c9113f28e..c66fcdff3ce 100644 --- a/lib/ansible/runner/action_plugins/synchronize.py +++ b/lib/ansible/runner/action_plugins/synchronize.py @@ -173,6 +173,11 @@ class ActionModule(object): if self.runner.noop_on_check(inject): module_items += " CHECKMODE=True" - return self.runner._execute_module(conn, tmp, 'synchronize', - module_items, inject=inject) + # run the module and store the result + result = self.runner._execute_module(conn, tmp, 'synchronize', module_items, inject=inject) + + # reset the sudo property + self.runner.sudo = self.original_sudo + + return result diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py index 7965f2295e7..dfb1a129e5a 100644 --- a/test/units/TestSynchronize.py +++ b/test/units/TestSynchronize.py @@ -61,7 +61,35 @@ class TestSynchronize(unittest.TestCase): assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" assert runner.executed_args == "dest=root@el6.lab.net:/tmp/bar src=/tmp/foo", "wrong args used" - assert runner.sudo == False, "sudo not set to false" + assert runner.sudo == None, "sudo was not reset to None" + + def test_synchronize_action_sudo(self): + + """ verify the synchronize action plugin unsets and then sets sudo """ + + runner = FakeRunner() + runner.sudo = True + runner.remote_user = "root" + runner.transport = "ssh" + conn = FakeConn() + inject = { + 'inventory_hostname': "el6.lab.net", + 'inventory_hostname_short': "el6", + 'ansible_connection': None, + 'ansible_ssh_user': 'root', + 'delegate_to': None, + 'playbook_dir': '.', + } + + x = Synchronize(runner) + x.setup("synchronize", inject) + x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject) + + assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" + assert runner.executed_args == 'dest=root@el6.lab.net:/tmp/bar src=/tmp/foo rsync_path="sudo rsync"', \ + "wrong args used: %s" % runner.executed_args + assert runner.sudo == True, "sudo was not reset to True" + def test_synchronize_action_local(self): From 9ba1245a84370957b0cc1c350725a2e726b3d03a Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 5 Mar 2014 18:49:54 -0500 Subject: [PATCH 216/772] Fixes #6077 decode escaped newline characters in content for the copy module --- lib/ansible/runner/action_plugins/copy.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 0ee9b6f3ced..79acdaba587 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -54,6 +54,12 @@ class ActionModule(object): raw = utils.boolean(options.get('raw', 'no')) force = utils.boolean(options.get('force', 'yes')) + # content with newlines is going to be escaped to safely load in yaml + # now we need to unescape it so that the newlines are evaluated properly + # when writing the file to disk + if content: + content = content.decode('unicode-escape') + if (source is None and content is None and not 'first_available_file' in inject) or dest is None: result=dict(failed=True, msg="src (or content) and dest are required") return ReturnData(conn=conn, result=result) From 52e809fcb75f181a5e9523bd766c2cabebd69590 Mon Sep 17 00:00:00 2001 From: aresch Date: Wed, 5 Mar 2014 16:25:42 -0800 Subject: [PATCH 217/772] Fix respecting remote_tmp when sudo is used --- lib/ansible/runner/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index a809a4aa7e3..2a117ef1c77 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1001,11 +1001,11 @@ class Runner(object): basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile) - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su != 'root') and basetmp.startswith('$HOME'): + if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root') and basetmp.startswith('$HOME'): basetmp = os.path.join('/tmp', basefile) cmd = 'mkdir -p %s' % basetmp - if self.remote_user != 'root' or ((self.sudo or self.su) and (self.sudo_user != 'root' or self.su != 'root')): + if self.remote_user != 'root' or ((self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root')): cmd += ' && chmod a+rx %s' % basetmp cmd += ' && echo %s' % basetmp From e215f564c5e9370431a8a665fecc56b34e6545fd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 5 Mar 2014 22:06:59 -0600 Subject: [PATCH 218/772] Create the tempdir in the accelerate module if it doesn't exist Fixes #6047 --- library/utilities/accelerate | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/library/utilities/accelerate b/library/utilities/accelerate index a6e84e32376..6508f1433ea 100644 --- a/library/utilities/accelerate +++ b/library/utilities/accelerate @@ -391,7 +391,13 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): final_path = None if 'user' in data and data.get('user') != getpass.getuser(): vv("the target user doesn't match this user, we'll move the file into place via sudo") - (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=os.path.expanduser('~/.ansible/tmp/')) + tmp_path = os.path.expanduser('~/.ansible/tmp/') + if not os.path.exists(tmp_path): + try: + os.makedirs(tmp_path, 0700) + except: + return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path) + (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path) out_fd = os.fdopen(fd, 'w', 0) final_path = data['out_path'] else: From fe07ebc8010b5e701d573857aeedd5ab5e4cc165 Mon Sep 17 00:00:00 2001 From: amree Date: Thu, 6 Mar 2014 12:19:54 +0800 Subject: [PATCH 219/772] MASTER_PORT variable for CHANGE MASTER TO command can only accept integer value --- library/database/mysql_replication | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/database/mysql_replication b/library/database/mysql_replication index f18060e9556..fdbb379371a 100644 --- a/library/database/mysql_replication +++ b/library/database/mysql_replication @@ -325,7 +325,7 @@ def main(): if master_password: chm.append("MASTER_PASSWORD='" + master_password + "'") if master_port: - chm.append("MASTER_PORT='" + master_port + "'") + chm.append("MASTER_PORT=" + master_port) if master_connect_retry: chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'") if master_log_file: From 8e7a384fcccc1c249075b90043939e4b89ed86a6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 6 Mar 2014 08:28:36 -0500 Subject: [PATCH 220/772] tags lists are properly uniqued and joined now, also avoids type issues when passed as list/set or strings Signed-off-by: Brian Coca --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index e9f00e47024..78a1d0b08b3 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -235,7 +235,7 @@ class Play(object): included_dep_vars = included_role_dep[2] if included_dep_name == dep: if "tags" in included_dep_vars: - included_dep_vars["tags"] = list(set(included_dep_vars["tags"] + passed_vars["tags"])) + included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"]))) else: included_dep_vars["tags"] = passed_vars["tags"][:] From 27d52fd9cc74ac0a8aac18e13d85563557479ea0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 6 Mar 2014 09:44:56 -0600 Subject: [PATCH 221/772] Un-escape newlines in delimiters for assemble module --- lib/ansible/runner/action_plugins/assemble.py | 6 ++++++ library/files/assemble | 2 ++ 2 files changed, 8 insertions(+) diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index eb6faf5dfcf..c73964cda68 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -39,7 +39,13 @@ class ActionModule(object): for f in sorted(os.listdir(src_path)): fragment = "%s/%s" % (src_path, f) if delimit_me and delimiter: + # en-escape things like new-lines + delimiter = delimiter.decode('unicode-escape') tmp.write(delimiter) + # always make sure there's a newline after the + # delimiter, so lines don't run together + if delimiter[-1] != '\n': + tmp.write('\n') if os.path.isfile(fragment): tmp.write(file(fragment).read()) delimit_me = True diff --git a/library/files/assemble b/library/files/assemble index a8c78256e23..f4a60caf230 100644 --- a/library/files/assemble +++ b/library/files/assemble @@ -107,6 +107,8 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): continue fragment = "%s/%s" % (src_path, f) if delimit_me and delimiter: + # un-escape anything like newlines + delimiter = delimiter.decode('unicode-escape') tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together From 3fc8a83e7986b5da205cedb87d3b1ca948e3db12 Mon Sep 17 00:00:00 2001 From: Andrew Resch Date: Thu, 6 Mar 2014 10:24:16 -0800 Subject: [PATCH 222/772] Fix logic checking for both sudo and su, and their respective users --- lib/ansible/runner/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 2a117ef1c77..f9b7d0a3044 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -420,7 +420,7 @@ class Runner(object): environment_string = self._compute_environment_string(inject) - if tmp.find("tmp") != -1 and (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root'): + if tmp.find("tmp") != -1 and (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # deal with possible umask issues once sudo'ed to other user cmd_chmod = "chmod a+r %s" % remote_module_path self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) @@ -449,7 +449,7 @@ class Runner(object): else: argsfile = self._transfer_str(conn, tmp, 'arguments', args) - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root'): + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # deal with possible umask issues once sudo'ed to other user cmd_args_chmod = "chmod a+r %s" % argsfile self._low_level_exec_command(conn, cmd_args_chmod, tmp, sudoable=False) @@ -491,7 +491,7 @@ class Runner(object): res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root'): + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = "rm -rf %s >/dev/null 2>&1" % tmp @@ -1001,11 +1001,11 @@ class Runner(object): basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile) - if (self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root') and basetmp.startswith('$HOME'): + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root') and basetmp.startswith('$HOME'): basetmp = os.path.join('/tmp', basefile) cmd = 'mkdir -p %s' % basetmp - if self.remote_user != 'root' or ((self.sudo or self.su) and (self.sudo_user != 'root' or self.su_user != 'root')): + if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): cmd += ' && chmod a+rx %s' % basetmp cmd += ' && echo %s' % basetmp From ae1e9a3ec1cbe5a150097b92b2d6345ce06a9809 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 7 Mar 2014 00:07:10 -0600 Subject: [PATCH 223/772] Properly wrap logical elements together for su/sudo detection --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index f9b7d0a3044..1e168e6ebb7 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -420,7 +420,7 @@ class Runner(object): environment_string = self._compute_environment_string(inject) - if tmp.find("tmp") != -1 and (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): + if tmp.find("tmp") != -1 and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): # deal with possible umask issues once sudo'ed to other user cmd_chmod = "chmod a+r %s" % remote_module_path self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) From c920f78cc3439aa9db0dfb0dd6e72815b73e6afd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 7 Mar 2014 16:34:04 -0600 Subject: [PATCH 224/772] Fix range issue in inventory and add additional error checking Fixes #6331 --- lib/ansible/inventory/__init__.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 8f74d5ea9e9..171a4f2a04c 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -208,12 +208,14 @@ class Inventory(object): """ # The regex used to match on the range, which can be [x] or [x-y]. - pattern_re = re.compile("^(.*)\[([0-9]+)(?:(?:-)([0-9]+))?\](.*)$") + pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$") m = pattern_re.match(pattern) if m: (target, first, last, rest) = m.groups() first = int(first) if last: + if first < 0: + raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range") last = int(last) else: last = first @@ -245,10 +247,13 @@ class Inventory(object): right = 0 left=int(left) right=int(right) - if left != right: - return hosts[left:right] - else: - return [ hosts[left] ] + try: + if left != right: + return hosts[left:right] + else: + return [ hosts[left] ] + except IndexError: + raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat) def _create_implicit_localhost(self, pattern): new_host = Host(pattern) From 6577ff5f85bf27048739013d91066c7b4e1dd466 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 10 Mar 2014 10:05:31 -0500 Subject: [PATCH 225/772] Add tags from handlers to tag availability calculation --- lib/ansible/playbook/play.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 198f15d061b..af66ee25746 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -579,6 +579,7 @@ class Play(object): for x in results: if self.tags is not None: + self.tags = list(set(self.tags).union(set(x.tags))) x.tags.extend(self.tags) return results @@ -686,11 +687,15 @@ class Play(object): unmatched_tags: tags that were found within the current play but do not match any provided by the user ''' - # gather all the tags in all the tasks into one list + # gather all the tags in all the tasks and handlers into one list + # FIXME: isn't this in self.tags already? + all_tags = [] for task in self._tasks: if not task.meta: all_tags.extend(task.tags) + for handler in self._handlers: + all_tags.extend(handler.tags) # compare the lists of tags using sets and return the matched and unmatched all_tags_set = set(all_tags) From 9730157525c8cd539c2f657f40c34b73bb189e44 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 10 Mar 2014 16:06:52 -0500 Subject: [PATCH 226/772] Validate SSL certs accessed through urllib* * Adds another module utility file which generalizes the access of urls via the urllib* libraries. * Adds a new spec generator for common arguments. * Makes the user-agent string configurable. Fixes #6211 --- examples/ansible.cfg | 14 ++ lib/ansible/constants.py | 4 + lib/ansible/module_utils/basic.py | 10 +- lib/ansible/module_utils/ec2.py | 28 +++ lib/ansible/module_utils/known_hosts.py | 28 +++ lib/ansible/module_utils/rax.py | 29 ++- lib/ansible/module_utils/urls.py | 262 ++++++++++++++++++++++++ library/cloud/ec2_facts | 24 +-- library/database/riak | 21 +- library/monitoring/airbrake_deployment | 41 ++-- library/monitoring/boundary_meter | 61 ++---- library/monitoring/datadog_event | 18 +- library/monitoring/newrelic_deployment | 48 +---- library/monitoring/pagerduty | 35 ++-- library/net_infrastructure/dnsmadeeasy | 32 +-- library/net_infrastructure/netscaler | 40 ++-- library/network/get_url | 116 ++--------- library/notification/flowdock | 31 +-- library/notification/grove | 7 +- library/notification/hipchat | 35 +--- library/packaging/apt_key | 20 +- library/packaging/rpm_key | 17 +- library/source_control/github_hooks | 83 ++++---- 23 files changed, 600 insertions(+), 404 deletions(-) create mode 100644 lib/ansible/module_utils/urls.py diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 5b23e101269..fde76e5b558 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -98,6 +98,20 @@ filter_plugins = /usr/share/ansible_plugins/filter_plugins # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 #nocolor = 1 +# the CA certificate path used for validating SSL certs. This path +# should exist on the controlling node, not the target nodes +# common locations: +# RHEL/CentOS: /etc/pki/tls/certs/ca-bundle.crt +# Fedora : /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem +# Ubuntu : /usr/share/ca-certificates/cacert.org/cacert.org.crt +#ca_file_path = + +# the http user-agent string to use when fetching urls. Some web server +# operators block the default urllib user agent as it is frequently used +# by malicious attacks/scripts, so we set it to something unique to +# avoid issues. +#http_user_agent = ansible-agent + [paramiko_connection] # uncomment this line to cause the paramiko connection plugin to not record new host diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6bf87e51f8a..9d3f37c180d 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -143,6 +143,10 @@ DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', ' DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +# URL Arguments for generic module urllib2 use +DEFAULT_HTTP_USER_AGENT = get_config(p, DEFAULTS, 'http_user_agent', 'ANSIBLE_HTTP_USER_AGENT', 'ansible-agent') +DEFAULT_CA_FILE_PATH = shell_expand_path(get_config(p, DEFAULTS, 'ca_file_path', 'ANSIBLE_CA_FILE_PATH', '')) + ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 540efeb4bfc..8025563e58e 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -60,6 +60,7 @@ import grp import pwd import platform import errno +import tempfile try: import json @@ -115,6 +116,7 @@ FILE_COMMON_ARGUMENTS=dict( remote_src = dict(), # used by assemble ) + def get_platform(): ''' what's the platform? example: Linux is a platform. ''' return platform.system() @@ -189,7 +191,7 @@ class AnsibleModule(object): os.environ['LANG'] = MODULE_LANG (self.params, self.args) = self._load_params() - self._legal_inputs = [ 'CHECKMODE', 'NO_LOG' ] + self._legal_inputs = ['CHECKMODE', 'NO_LOG'] self.aliases = self._handle_aliases() @@ -572,8 +574,9 @@ class AnsibleModule(object): def _check_invalid_arguments(self): for (k,v) in self.params.iteritems(): - if k in ('CHECKMODE', 'NO_LOG'): - continue + # these should be in legal inputs already + #if k in ('CHECKMODE', 'NO_LOG'): + # continue if k not in self._legal_inputs: self.fail_json(msg="unsupported parameter for module: %s" % k) @@ -1093,4 +1096,3 @@ class AnsibleModule(object): break return '%.2f %s' % (float(size)/ limit, suffix) - diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 9156df766b2..58291c2d5d5 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -1,3 +1,31 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + try: from distutils.version import LooseVersion HAS_LOOSE_VERSION = True diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 000db9d1e62..36f5b87fff5 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -1,3 +1,31 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + def add_git_host_key(module, url, accept_hostkey=True): """ idempotently add a git url hostkey """ diff --git a/lib/ansible/module_utils/rax.py b/lib/ansible/module_utils/rax.py index 84e5686d24f..98623c7d38e 100644 --- a/lib/ansible/module_utils/rax.py +++ b/lib/ansible/module_utils/rax.py @@ -1,5 +1,32 @@ -import os +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import os def rax_argument_spec(): return dict( diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py new file mode 100644 index 00000000000..f251c6b407f --- /dev/null +++ b/lib/ansible/module_utils/urls.py @@ -0,0 +1,262 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +try: + import urllib + HAS_URLLIB = True +except: + HAS_URLLIB = False + +try: + import urllib2 + HAS_URLLIB2 = True +except: + HAS_URLLIB2 = False + +try: + import urlparse + HAS_URLPARSE = True +except: + HAS_URLPARSE = False + +try: + import ssl + HAS_SSL=True +except: + HAS_SSL=False + + +class RequestWithMethod(urllib2.Request): + ''' + Workaround for using DELETE/PUT/etc with urllib2 + Originally contained in library/net_infrastructure/dnsmadeeasy + ''' + + def __init__(self, url, method, data=None, headers={}): + self._method = method + urllib2.Request.__init__(self, url, data, headers) + + def get_method(self): + if self._method: + return self._method + else: + return urllib2.Request.get_method(self) + + +class SSLValidationHandler(urllib2.BaseHandler): + ''' + A custom handler class for SSL validation. + + Based on: + http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python + http://techknack.net/python-urllib2-handlers/ + ''' + + def __init__(self, module, hostname, port, ca_cert=None): + self.module = module + self.hostname = hostname + self.port = port + self.ca_cert = ca_cert + + def get_ca_cert(self): + # tries to find a valid CA cert in one of the + # standard locations for the current distribution + + if self.ca_cert and os.path.exists(self.ca_cert): + # the user provided a custom CA cert (ie. one they + # uploaded themselves), so use it + return self.ca_cert + + ca_cert = None + platform = get_platform() + distribution = get_distribution() + if platform == 'Linux': + if distribution in ('Fedora',): + ca_cert = '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem' + elif distribution in ('RHEL','CentOS','ScientificLinux'): + ca_cert = '/etc/pki/tls/certs/ca-bundle.crt' + elif distribution in ('Ubuntu','Debian'): + ca_cert = '/usr/share/ca-certificates/cacert.org/cacert.org.crt' + elif platform == 'FreeBSD': + ca_cert = '/usr/local/share/certs/ca-root.crt' + elif platform == 'OpenBSD': + ca_cert = '/etc/ssl/cert.pem' + elif platform == 'NetBSD': + ca_cert = '/etc/openssl/certs/ca-cert.pem' + elif platform == 'SunOS': + # FIXME? + pass + elif platform == 'AIX': + # FIXME? + pass + + if ca_cert and os.path.exists(ca_cert): + return ca_cert + elif os.path.exists('/etc/ansible/ca-cert.pem'): + # fall back to a user-deployed cert in a standard + # location if the OS platform one is not available + return '/etc/ansible/ca-cert.pem' + else: + # CA cert isn't available, no validation + return None + + def http_request(self, req): + try: + server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=self.get_ca_cert()) + except ssl.SSLError: + self.module.fail_json(msg='failed to validate the SSL certificate for %s:%s. You can use validate_certs=no, however this is unsafe and not recommended' % (self.hostname, self.port)) + return req + + https_request = http_request + + +def url_argument_spec(): + ''' + Creates an argument spec that can be used with any module + that will be requesting content via urllib/urllib2 + ''' + return dict( + url = dict(), + force = dict(default='no', aliases=['thirsty'], type='bool'), + http_agent = dict(default='ansible-httpget'), + use_proxy = dict(default='yes', type='bool'), + validate_certs = dict(default='yes', type='bool'), + ) + + +def fetch_url(module, url, data=None, headers=None, method=None, + use_proxy=False, validate_certs=True, force=False, last_mod_time=None, timeout=10): + ''' + Fetches a file from an HTTP/FTP server using urllib2 + ''' + + if not HAS_URLLIB: + module.fail_json(msg='urllib is not installed') + if not HAS_URLLIB2: + module.fail_json(msg='urllib2 is not installed') + elif not HAS_URLPARSE: + module.fail_json(msg='urlparse is not installed') + + r = None + handlers = [] + info = dict(url=url) + + parsed = urlparse.urlparse(url) + if parsed[0] == 'https': + if not HAS_SSL and validate_certs: + module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended') + elif validate_certs: + # do the cert validation + netloc = parsed[1] + if '@' in netloc: + netloc = netloc.split('@', 1)[1] + if ':' in netloc: + hostname, port = netloc.split(':', 1) + else: + hostname = netloc + port = 443 + # create the SSL validation handler and + # add it to the list of handlers + ssl_handler = SSLValidationHandler(module, hostname, port) + handlers.append(ssl_handler) + + if '@' in parsed[1]: + credentials, netloc = parsed[1].split('@', 1) + if ':' in credentials: + username, password = credentials.split(':', 1) + else: + username = credentials + password = '' + parsed = list(parsed) + parsed[1] = netloc + + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # this creates a password manager + passman.add_password(None, netloc, username, password) + # because we have put None at the start it will always + # use this username/password combination for urls + # for which `theurl` is a super-url + + authhandler = urllib2.HTTPBasicAuthHandler(passman) + # create the AuthHandler + handlers.append(authhandler) + + #reconstruct url without credentials + url = urlparse.urlunparse(parsed) + + if not use_proxy: + proxyhandler = urllib2.ProxyHandler({}) + handlers.append(proxyhandler) + + opener = urllib2.build_opener(*handlers) + urllib2.install_opener(opener) + + if method: + if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'): + module.fail_json(msg='invalid HTTP request method; %s' % method.upper()) + request = RequestWithMethod(url, method.upper(), data) + else: + request = urllib2.Request(url, data) + + # add the custom agent header, to help prevent issues + # with sites that block the default urllib agent string + request.add_header('User-agent', module.params.get('http_agent')) + + # if we're ok with getting a 304, set the timestamp in the + # header, otherwise make sure we don't get a cached copy + if last_mod_time and not force: + tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000') + request.add_header('If-Modified-Since', tstamp) + else: + request.add_header('cache-control', 'no-cache') + + # user defined headers now, which may override things we've set above + if headers: + if not isinstance(headers, dict): + module.fail_json("headers provided to fetch_url() must be a dict") + for header in headers: + request.add_header(header, headers[header]) + + try: + if sys.version_info < (2,6,0): + # urlopen in python prior to 2.6.0 did not + # have a timeout parameter + r = urllib2.urlopen(request, None) + else: + r = urllib2.urlopen(request, None, timeout) + info.update(r.info()) + info['url'] = r.geturl() # The URL goes in too, because of redirects. + info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) + except urllib2.HTTPError, e: + info.update(dict(msg=str(e), status=e.code)) + except urllib2.URLError, e: + code = int(getattr(e, 'code', -1)) + info.update(dict(msg="Request failed: %s" % str(e), status=code)) + + return r, info + diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts index 1c17fa5b717..c6a6670a58b 100644 --- a/library/cloud/ec2_facts +++ b/library/cloud/ec2_facts @@ -41,7 +41,6 @@ EXAMPLES = ''' when: ansible_ec2_instance_type == "t1.micro" ''' -import urllib2 import socket import re @@ -62,7 +61,8 @@ class Ec2Metadata(object): 'us-west-1', 'us-west-2') - def __init__(self, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): + def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): + self.module = module self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri @@ -70,12 +70,9 @@ class Ec2Metadata(object): self._prefix = 'ansible_ec2_%s' def _fetch(self, url): - try: - return urllib2.urlopen(url).read() - except urllib2.HTTPError: - return - except urllib2.URLError: - return + self.module.fail_json(msg="url is %s" % url) + (response, info) = fetch_url(self.module, url, force=True) + return response.read() def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): new_fields = {} @@ -150,17 +147,20 @@ class Ec2Metadata(object): return data def main(): - - ec2_facts = Ec2Metadata().run() - ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts) + argument_spec = url_argument_spec() module = AnsibleModule( - argument_spec = dict(), + argument_spec = argument_spec, supports_check_mode = True, ) + + ec2_facts = Ec2Metadata(module).run() + ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts) + module.exit_json(**ec2_facts_result) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/database/riak b/library/database/riak index 53faba6e983..e0a7552f0ae 100644 --- a/library/database/riak +++ b/library/database/riak @@ -138,24 +138,13 @@ def main(): while True: if time.time() > timeout: module.fail_json(msg='Timeout, could not fetch Riak stats.') - try: - if sys.version_info<(2,6,0): - stats_raw = urllib2.urlopen( - 'http://%s/stats' % (http_conn), None).read() - else: - stats_raw = urllib2.urlopen( - 'http://%s/stats' % (http_conn), None, 5).read() + (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) + if info['status'] == 200: + stats_raw = response.read() break - except urllib2.HTTPError, e: - time.sleep(5) - except urllib2.URLError, e: - time.sleep(5) - except socket.timeout: - time.sleep(5) - except Exception, e: - module.fail_json(msg='Could not fetch Riak stats: %s' % e) + time.sleep(5) -# here we attempt to load those stats, + # here we attempt to load those stats, try: stats = json.loads(stats_raw) except: diff --git a/library/monitoring/airbrake_deployment b/library/monitoring/airbrake_deployment index 8a4a834be7c..6a83459906a 100644 --- a/library/monitoring/airbrake_deployment +++ b/library/monitoring/airbrake_deployment @@ -52,6 +52,13 @@ options: - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. required: false default: https://airbrake.io/deploys + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] # informational: requirements for nodes requirements: [ urllib, urllib2 ] @@ -64,29 +71,12 @@ EXAMPLES = ''' revision=4.2 ''' -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - # =========================================== # Module execution. # def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -95,6 +85,7 @@ def main(): repo=dict(required=False), revision=dict(required=False), url=dict(required=False, default='https://api.airbrake.io/deploys.txt') + validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -123,18 +114,16 @@ def main(): module.exit_json(changed=True) # Send the data to airbrake - try: - req = urllib2.Request(url, urllib.urlencode(params)) - result=urllib2.urlopen(req) - except Exception, e: - module.fail_json(msg="unable to update airbrake via %s?%s : %s" % (url, urllib.urlencode(params), e)) + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data, validate_certs=module.params['validate_certs']) + if info['status'] == 200: + module.exit_json(changed=True) else: - if result.code == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (result.code, url)) + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/monitoring/boundary_meter b/library/monitoring/boundary_meter index 202dfd03ae3..3c9f90a4ce9 100644 --- a/library/monitoring/boundary_meter +++ b/library/monitoring/boundary_meter @@ -24,7 +24,6 @@ along with Ansible. If not, see . import json import datetime -import urllib2 import base64 import os @@ -74,12 +73,6 @@ EXAMPLES=''' ''' -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False - api_host = "api.boundary.com" config_directory = "/etc/bprobe" @@ -101,7 +94,7 @@ def build_url(name, apiid, action, meter_id=None, cert_type=None): elif action == "delete": return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id) -def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None): +def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None): if meter_id is None: url = build_url(name, apiid, action) @@ -111,11 +104,11 @@ def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None): else: url = build_url(name, apiid, action, meter_id, cert_type) - auth = auth_encode(apikey) - request = urllib2.Request(url) - request.add_header("Authorization", "Basic %s" % (auth)) - request.add_header("Content-Type", "application/json") - return request + headers = dict() + headers["Authorization"] = "Basic %s" % auth_encode(apikey) + headers["Content-Type"] = "application/json" + + return fetch_url(module, url, data=data, headers=headers) def create_meter(module, name, apiid, apikey): @@ -126,14 +119,10 @@ def create_meter(module, name, apiid, apikey): module.exit_json(status="Meter " + name + " already exists",changed=False) else: # If it doesn't exist, create it - request = http_request(name, apiid, apikey, action="create") - # A create request seems to need a json body with the name of the meter in it body = '{"name":"' + name + '"}' - request.add_data(body) + response, info = http_request(module, name, apiid, apikey, data=body, action="create") - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: + if info['status'] != 200: module.fail_json(msg="Failed to connect to api host to create meter") # If the config directory doesn't exist, create it @@ -160,15 +149,13 @@ def create_meter(module, name, apiid, apikey): def search_meter(module, name, apiid, apikey): - request = http_request(name, apiid, apikey, action="search") + response, info = http_request(module, name, apiid, apikey, action="search") - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: + if info['status'] != 200: module.fail_json("Failed to connect to api host to search for meter") # Return meters - return json.loads(result.read()) + return json.loads(response.read()) def get_meter_id(module, name, apiid, apikey): # In order to delete the meter we need its id @@ -186,16 +173,9 @@ def delete_meter(module, name, apiid, apikey): if meter_id is None: return 1, "Meter does not exist, so can't delete it" else: - action = "delete" - request = http_request(name, apiid, apikey, action, meter_id) - # See http://stackoverflow.com/questions/4511598/how-to-make-http-delete-method-using-urllib2 - # urllib2 only does GET or POST I believe, but here we need delete - request.get_method = lambda: 'DELETE' - - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: - module.fail_json("Failed to connect to api host to delete meter") + response, info = http_request(module, name, apiid, apikey, action, meter_id) + if info['status'] != 200: + module.fail_json("Failed to delete meter") # Each new meter gets a new key.pem and ca.pem file, so they should be deleted types = ['cert', 'key'] @@ -214,17 +194,14 @@ def download_request(module, name, apiid, apikey, cert_type): if meter_id is not None: action = "certificates" - request = http_request(name, apiid, apikey, action, meter_id, cert_type) - - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: + response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type) + if info['status'] != 200: module.fail_json("Failed to connect to api host to download certificate") if result: try: cert_file_path = '%s/%s.pem' % (config_directory,cert_type) - body = result.read() + body = response.read() cert_file = open(cert_file_path, 'w') cert_file.write(body) cert_file.close @@ -238,9 +215,6 @@ def download_request(module, name, apiid, apikey, cert_type): def main(): - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( state=dict(required=True, choices=['present', 'absent']), @@ -268,5 +242,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/monitoring/datadog_event b/library/monitoring/datadog_event index 629e86e98ab..878aee6d343 100644 --- a/library/monitoring/datadog_event +++ b/library/monitoring/datadog_event @@ -67,7 +67,6 @@ datadog_event: title="Testing from ansible" text="Test!" ''' import socket -from urllib2 import urlopen, Request, URLError def main(): module = AnsibleModule( @@ -97,8 +96,7 @@ def main(): post_event(module) def post_event(module): - uri = "https://app.datadoghq.com/api/v1/events?api_key=" + \ - module.params['api_key'] + uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key'] body = dict( title=module.params['title'], @@ -117,22 +115,20 @@ def post_event(module): json_body = module.jsonify(body) headers = {"Content-Type": "application/json"} - request = Request(uri, json_body, headers, unverifiable=True) - try: - response = urlopen(request) + (response, info) = fetch_url(module, uri, data=json_body, headers=headers) + if info['status'] == 200: response_body = response.read() response_json = module.from_json(response_body) if response_json['status'] == 'ok': module.exit_json(changed=True) else: module.fail_json(msg=response) - - except URLError, e: - module.fail_json(msg="URL error: %s." % e) - except socket.error, e: - module.fail_json(msg="Socket error: %s to %s" % (e, uri)) + else: + module.fail_json(**info) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/monitoring/newrelic_deployment b/library/monitoring/newrelic_deployment index de64651969c..08132722e1d 100644 --- a/library/monitoring/newrelic_deployment +++ b/library/monitoring/newrelic_deployment @@ -75,29 +75,12 @@ EXAMPLES = ''' revision=1.0 ''' -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - # =========================================== # Module execution. # def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -134,29 +117,20 @@ def main(): module.exit_json(changed=True) # Send the data to NewRelic - try: - req = urllib2.Request("https://rpm.newrelic.com/deployments.xml", urllib.urlencode(params)) - req.add_header('x-api-key',module.params["token"]) - result=urllib2.urlopen(req) - # urlopen behaves differently in python 2.4 and 2.6 so we handle - # both cases here. In python 2.4 it throws an exception if the - # return code is anything other than a 200. In python 2.6 it - # doesn't throw an exception for any 2xx return codes. In both - # cases we expect newrelic should return a 201 on success. So - # to handle both cases, both the except & else cases below are - # effectively identical. - except Exception, e: - if e.code == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="unable to update newrelic: %s" % e) + url = "https://rpm.newrelic.com/deployments.xml" + data = urllib.urlencode(params) + headers = { + 'x-api-key': module.params["token"], + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] in (200, 201): + module.exit_json(changed=True) else: - if result.code == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="result code: %d" % result.code) + module.fail_json(msg="unable to update newrelic: %s" % info['msg']) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/monitoring/pagerduty b/library/monitoring/pagerduty index bfd0573f4de..9a7f21d0779 100644 --- a/library/monitoring/pagerduty +++ b/library/monitoring/pagerduty @@ -87,24 +87,23 @@ EXAMPLES=''' import json import datetime -import urllib2 import base64 -def ongoing(name, user, passwd): +def ongoing(module, name, user, passwd): url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing" auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') + headers = {"Authorization": "Basic %s" % auth} - req = urllib2.Request(url) - req.add_header("Authorization", "Basic %s" % auth) - res = urllib2.urlopen(req) - out = res.read() + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - return False, out + return False, response.read() -def create(name, user, passwd, service, hours, desc): +def create(module, name, user, passwd, service, hours, desc): now = datetime.datetime.utcnow() later = now + datetime.timedelta(hours=int(hours)) @@ -113,15 +112,17 @@ def create(name, user, passwd, service, hours, desc): url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows" auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') + headers = { + 'Authorization': 'Basic %s' % auth, + 'Content-Type' : 'application/json', + } data = json.dumps({'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}}) - req = urllib2.Request(url, data) - req.add_header("Authorization", "Basic %s" % auth) - req.add_header('Content-Type', 'application/json') - res = urllib2.urlopen(req) - out = res.read() + response, info = fetch_url(module, url, data=data, headers=headers, method='POST') + if info['status'] != 200: + module.fail_json(msg="failed to create the window: %s" % info['msg']) - return False, out + return False, response.read() def main(): @@ -149,10 +150,10 @@ def main(): if state == "running" or state == "started": if not service: module.fail_json(msg="service not specified") - (rc, out) = create(name, user, passwd, service, hours, desc) + (rc, out) = create(module, name, user, passwd, service, hours, desc) if state == "ongoing": - (rc, out) = ongoing(name, user, passwd) + (rc, out) = ongoing(module, name, user, passwd) if rc != 0: module.fail_json(msg="failed", result=out) @@ -161,4 +162,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/net_infrastructure/dnsmadeeasy b/library/net_infrastructure/dnsmadeeasy index d4af13e884a..9e2c14480eb 100644 --- a/library/net_infrastructure/dnsmadeeasy +++ b/library/net_infrastructure/dnsmadeeasy @@ -106,8 +106,6 @@ EXAMPLES = ''' IMPORT_ERROR = None try: - import urllib - import urllib2 import json from time import strftime, gmtime import hashlib @@ -115,22 +113,6 @@ try: except ImportError, e: IMPORT_ERROR = str(e) - -class RequestWithMethod(urllib2.Request): - - """Workaround for using DELETE/PUT/etc with urllib2""" - - def __init__(self, url, method, data=None, headers={}): - self._method = method - urllib2.Request.__init__(self, url, data, headers) - - def get_method(self): - if self._method: - return self._method - else: - return urllib2.Request.get_method(self) - - class DME2: def __init__(self, apikey, secret, domain, module): @@ -169,16 +151,10 @@ class DME2: url = self.baseurl + resource if data and not isinstance(data, basestring): data = urllib.urlencode(data) - request = RequestWithMethod(url, method, data, self._headers()) - try: - response = urllib2.urlopen(request) - except urllib2.HTTPError, e: - self.module.fail_json( - msg="%s returned %s, with body: %s" % (url, e.code, e.read())) - except Exception, e: - self.module.fail_json( - msg="Failed contacting: %s : Exception %s" % (url, e.message())) + response, info = fetch_url(self.module, url, data=data, method=method) + if info['status'] not in (200, 201, 204): + self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) try: return json.load(response) @@ -338,4 +314,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/net_infrastructure/netscaler b/library/net_infrastructure/netscaler index 1aa370895d5..4756d90abdc 100644 --- a/library/net_infrastructure/netscaler +++ b/library/net_infrastructure/netscaler @@ -73,6 +73,14 @@ options: default: server choices: ["server", "service"] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + requirements: [ "urllib", "urllib2" ] author: Nandor Sivok ''' @@ -90,8 +98,6 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api import json -import urllib -import urllib2 import base64 import socket @@ -100,23 +106,25 @@ class netscaler(object): _nitro_base_url = '/nitro/v1/' + def __init__(self, module): + self.module = module + def http_request(self, api_endpoint, data_json={}): request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint - data_json = urllib.urlencode(data_json) - if len(data_json): - req = urllib2.Request(request_url, data_json) - req.add_header('Content-Type', 'application/x-www-form-urlencoded') - else: - req = urllib2.Request(request_url) + data_json = urllib.urlencode(data_json) + if not len(data_json): + data_json = None - base64string = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() - req.add_header('Authorization', "Basic %s" % base64string) + auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() + headers = { + 'Authorization': 'Basic %s' % auth, + 'Content-Type' : 'application/x-www-form-urlencoded', + } - resp = urllib2.urlopen(req) - resp = json.load(resp) + response, info = fetch_url(self.module, request_url, data=data_json, validate_certs=self.module.params['validate_certs']) - return resp + return json.load(response.read()) def prepare_request(self, action): resp = self.http_request( @@ -134,7 +142,7 @@ class netscaler(object): def core(module): - n = netscaler() + n = netscaler(module) n._nsc_host = module.params.get('nsc_host') n._nsc_user = module.params.get('user') n._nsc_pass = module.params.get('password') @@ -158,7 +166,8 @@ def main(): password = dict(required=True), action = dict(default='enable', choices=['enable','disable']), name = dict(default=socket.gethostname()), - type = dict(default='server', choices=['service', 'server']) + type = dict(default='server', choices=['service', 'server']), + validate_certs=dict(default='yes', type='bool'), ) ) @@ -177,4 +186,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/network/get_url b/library/network/get_url index 9704b8dbadb..c249c44049a 100644 --- a/library/network/get_url +++ b/library/network/get_url @@ -83,6 +83,13 @@ options: required: false default: 'yes' choices: ['yes', 'no'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] others: description: - all arguments accepted by the M(file) module also work here @@ -108,19 +115,6 @@ try: except ImportError: HAS_HASHLIB=False -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False - -try: - import urlparse - import socket - HAS_URLPARSE = True -except ImportError: - HAS_URLPARSE=False - # ============================================================== # url handling @@ -130,80 +124,14 @@ def url_filename(url): return 'index.html' return fn -def url_do_get(module, url, dest, use_proxy, last_mod_time, force): - """ - Get url and return request and info - Credits: http://stackoverflow.com/questions/7006574/how-to-download-file-from-ftp - """ - - USERAGENT = 'ansible-httpget' - info = dict(url=url, dest=dest) - r = None - handlers = [] - - parsed = urlparse.urlparse(url) - - if '@' in parsed[1]: - credentials, netloc = parsed[1].split('@', 1) - if ':' in credentials: - username, password = credentials.split(':', 1) - else: - username = credentials - password = '' - parsed = list(parsed) - parsed[1] = netloc - - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() - # this creates a password manager - passman.add_password(None, netloc, username, password) - # because we have put None at the start it will always - # use this username/password combination for urls - # for which `theurl` is a super-url - - authhandler = urllib2.HTTPBasicAuthHandler(passman) - # create the AuthHandler - handlers.append(authhandler) - - #reconstruct url without credentials - url = urlparse.urlunparse(parsed) - - if not use_proxy: - proxyhandler = urllib2.ProxyHandler({}) - handlers.append(proxyhandler) - - opener = urllib2.build_opener(*handlers) - urllib2.install_opener(opener) - request = urllib2.Request(url) - request.add_header('User-agent', USERAGENT) - - if last_mod_time and not force: - tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000') - request.add_header('If-Modified-Since', tstamp) - else: - request.add_header('cache-control', 'no-cache') - - try: - r = urllib2.urlopen(request) - info.update(r.info()) - info['url'] = r.geturl() # The URL goes in too, because of redirects. - info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) - except urllib2.HTTPError, e: - # Must not fail_json() here so caller can handle HTTP 304 unmodified - info.update(dict(msg=str(e), status=e.code)) - except urllib2.URLError, e: - code = getattr(e, 'code', -1) - module.fail_json(msg="Request failed: %s" % str(e), status_code=code) - - return r, info - -def url_get(module, url, dest, use_proxy, last_mod_time, force): +def url_get(module, url, dest, use_proxy, last_mod_time, force, validate_certs): """ Download data from the url and store in a temporary file. Return (tempfile, info about the request) """ - req, info = url_do_get(module, url, dest, use_proxy, last_mod_time, force) + rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, validate_certs=validate_certs) if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) @@ -215,12 +143,12 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force): fd, tempname = tempfile.mkstemp() f = os.fdopen(fd, 'wb') try: - shutil.copyfileobj(req, f) + shutil.copyfileobj(rsp, f) except Exception, err: os.remove(tempname) module.fail_json(msg="failed to create temporary content file: %s" % str(err)) f.close() - req.close() + rsp.close() return tempname, info def extract_filename_from_headers(headers): @@ -247,21 +175,15 @@ def extract_filename_from_headers(headers): def main(): - # does this really happen on non-ancient python? - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - if not HAS_URLPARSE: - module.fail_json(msg="urlparse is not installed") + argument_spec = url_argument_spec() + argument_spec.update( + dest = dict(required=True), + sha256sum = dict(default=''), + ) module = AnsibleModule( # not checking because of daisy chain to file module - argument_spec = dict( - url = dict(required=True), - dest = dict(required=True), - force = dict(default='no', aliases=['thirsty'], type='bool'), - sha256sum = dict(default=''), - use_proxy = dict(default='yes', type='bool') - ), + argument_spec = argument_spec, add_file_common_args=True ) @@ -270,6 +192,7 @@ def main(): force = module.params['force'] sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] + validate_certs = module.params['validate_certs'] dest_is_dir = os.path.isdir(dest) last_mod_time = None @@ -284,7 +207,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, validate_certs) # Now the request has completed, we can finally generate the final # destination file name from the info dict. @@ -366,4 +289,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/notification/flowdock b/library/notification/flowdock index a5be40d1f10..32817d756dc 100644 --- a/library/notification/flowdock +++ b/library/notification/flowdock @@ -96,31 +96,12 @@ EXAMPLES = ''' tags=tag1,tag2,tag3 ''' -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - - - # =========================================== # Module execution. # def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -187,14 +168,16 @@ def main(): module.exit_json(changed=False) # Send the data to Flowdock - try: - response = urllib2.urlopen(url, urllib.urlencode(params)) - except Exception, e: - module.fail_json(msg="unable to send msg: %s" % e) + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] != 200: + module.fail_json(msg="unable to send msg: %s" % info['msg']) - module.exit_json(changed=False, msg=module.params["msg"]) + module.exit_json(changed=True, msg=module.params["msg"]) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/notification/grove b/library/notification/grove index b759f025e29..1e2132cfb73 100644 --- a/library/notification/grove +++ b/library/notification/grove @@ -41,8 +41,6 @@ EXAMPLES = ''' message=deployed {{ target }} ''' -import urllib - BASE_URL = 'https://grove.io/api/notice/%s/' # ============================================================== @@ -57,7 +55,10 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url= if icon_url is not None: my_data['icon_url'] = icon_url - urllib.urlopen(my_url, urllib.urlencode(my_data)) + data = urllib.urlencode(my_data) + response, info = fetch_url(module, my_url, data=data) + if info['status'] != 200: + module.fail_json(msg="failed to send notification: %s" % info['msg']) # ============================================================== # main diff --git a/library/notification/hipchat b/library/notification/hipchat index eec2b8c3618..c4b36d64ce7 100644 --- a/library/notification/hipchat +++ b/library/notification/hipchat @@ -60,22 +60,10 @@ EXAMPLES = ''' # HipChat module specific support methods. # -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - MSG_URI = "https://api.hipchat.com/v1/rooms/message?" -def send_msg(token, room, msg_from, msg, msg_format='text', +def send_msg(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False): '''sending message to hipchat''' @@ -92,8 +80,12 @@ def send_msg(token, room, msg_from, msg, msg_format='text', params['notify'] = 0 url = MSG_URI + "auth_token=%s" % (token) - response = urllib2.urlopen(url, urllib.urlencode(params)) - return response.read() + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== @@ -102,11 +94,6 @@ def send_msg(token, room, msg_from, msg, msg_format='text', def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -130,15 +117,15 @@ def main(): notify = module.params["notify"] try: - send_msg(token, room, msg_from, msg, msg_format, - color, notify) + send_msg(module, token, room, msg_from, msg, msg_format, color, notify) except Exception, e: module.fail_json(msg="unable to sent msg: %s" % e) changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, - msg=msg) + module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/packaging/apt_key b/library/packaging/apt_key index eee86337020..ff05bb93d1a 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -64,6 +64,14 @@ options: default: present description: - used to specify if key is being added or revoked + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + ''' EXAMPLES = ''' @@ -88,7 +96,6 @@ EXAMPLES = ''' # FIXME: standardize into module_common -from urllib2 import urlopen, URLError from traceback import format_exc from re import compile as re_compile # FIXME: standardize into module_common @@ -133,11 +140,8 @@ def download_key(module, url): if url is None: module.fail_json(msg="needed a URL but was not specified") try: - connection = urlopen(url) - if connection is None: - module.fail_json("error connecting to download key from url") - data = connection.read() - return data + rsp, info = fetch_url(module, url, validate_certs=module.params['validate_certs']) + return rsp.read() except Exception: module.fail_json(msg="error getting key id from url", traceback=format_exc()) @@ -175,7 +179,8 @@ def main(): file=dict(required=False), key=dict(required=False), keyring=dict(required=False), - state=dict(required=False, choices=['present', 'absent'], default='present') + state=dict(required=False, choices=['present', 'absent'], default='present'), + validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -240,4 +245,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/packaging/rpm_key b/library/packaging/rpm_key index 82532477348..9d85f30ac8b 100644 --- a/library/packaging/rpm_key +++ b/library/packaging/rpm_key @@ -42,6 +42,14 @@ options: choices: [present, absent] description: - Wheather the key will be imported or removed from the rpm db. + validate_certs: + description: + - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + ''' EXAMPLES = ''' @@ -57,7 +65,6 @@ EXAMPLES = ''' import syslog import os.path import re -import urllib2 import tempfile # Attempt to download at most 8192 bytes. @@ -116,8 +123,8 @@ class RpmKey: def fetch_key(self, url, maxbytes=MAXBYTES): """Downloads a key from url, returns a valid path to a gpg key""" try: - fd = urllib2.urlopen(url) - key = fd.read(maxbytes) + rsp, info = fetch_url(self.module, url, validate_certs=self.module.params['validate_certs']) + key = rsp.read(maxbytes) if not is_pubkey(key): self.module.fail_json(msg="Not a public key: %s" % url) tmpfd, tmpname = tempfile.mkstemp() @@ -187,7 +194,8 @@ def main(): module = AnsibleModule( argument_spec = dict( state=dict(default='present', choices=['present', 'absent'], type='str'), - key=dict(required=True, type='str') + key=dict(required=True, type='str'), + validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -198,4 +206,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/source_control/github_hooks b/library/source_control/github_hooks index 55eb8d3c8d3..c5c5b648c7a 100644 --- a/library/source_control/github_hooks +++ b/library/source_control/github_hooks @@ -19,7 +19,6 @@ # along with Ansible. If not, see . import json -import urllib2 import base64 DOCUMENTATION = ''' @@ -51,6 +50,14 @@ options: - This tells the githooks module what you want it to do. required: true choices: [ "create", "cleanall" ] + validate_certs: + description: + - If C(no), SSL certificates for the target repo will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + author: Phillip Gentry, CX Inc ''' @@ -62,16 +69,19 @@ EXAMPLES = ''' - local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }} ''' -def list(hookurl, oauthkey, repo, user): +def list(module, hookurl, oauthkey, repo, user): url = "%s/hooks" % repo auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - req = urllib2.Request(url) - req.add_header("Authorization", "Basic %s" % auth) - res = urllib2.urlopen(req) - out = res.read() - return False, out - -def clean504(hookurl, oauthkey, repo, user): + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, headers=headers, validate_certs=module.params['validate_certs']) + if info['status'] != 200: + return False, '' + else: + return False, response.read() + +def clean504(module, hookurl, oauthkey, repo, user): current_hooks = list(hookurl, oauthkey, repo, user)[1] decoded = json.loads(current_hooks) @@ -79,11 +89,11 @@ def clean504(hookurl, oauthkey, repo, user): if hook['last_response']['code'] == 504: # print "Last response was an ERROR for hook:" # print hook['id'] - delete(hookurl, oauthkey, repo, user, hook['id']) + delete(module, hookurl, oauthkey, repo, user, hook['id']) return 0, current_hooks -def cleanall(hookurl, oauthkey, repo, user): +def cleanall(module, hookurl, oauthkey, repo, user): current_hooks = list(hookurl, oauthkey, repo, user)[1] decoded = json.loads(current_hooks) @@ -91,11 +101,11 @@ def cleanall(hookurl, oauthkey, repo, user): if hook['last_response']['code'] != 200: # print "Last response was an ERROR for hook:" # print hook['id'] - delete(hookurl, oauthkey, repo, user, hook['id']) + delete(module, hookurl, oauthkey, repo, user, hook['id']) return 0, current_hooks -def create(hookurl, oauthkey, repo, user): +def create(module, hookurl, oauthkey, repo, user): url = "%s/hooks" % repo values = { "active": True, @@ -107,29 +117,23 @@ def create(hookurl, oauthkey, repo, user): } data = json.dumps(values) auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - out='[]' - try : - req = urllib2.Request(url) - req.add_data(data) - req.add_header("Authorization", "Basic %s" % auth) - res = urllib2.urlopen(req) - out = res.read() - return 0, out - except urllib2.HTTPError, e : - if e.code == 422 : - return 0, out - -def delete(hookurl, oauthkey, repo, user, hookid): + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, data=data, headers=headers, validate_certs=module.params['validate_certs']) + if info['status'] != 200: + return 0, '[]' + else: + return 0, response.read() + +def delete(module, hookurl, oauthkey, repo, user, hookid): url = "%s/hooks/%s" % (repo, hookid) auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - req = urllib2.Request(url) - req.get_method = lambda: 'DELETE' - req.add_header("Authorization", "Basic %s" % auth) - # req.add_header('Content-Type', 'application/xml') - # req.add_header('Accept', 'application/xml') - res = urllib2.urlopen(req) - out = res.read() - return out + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE', validate_certs=module.params['validate_certs']) + return response.read() def main(): module = AnsibleModule( @@ -139,6 +143,7 @@ def main(): oauthkey=dict(required=True), repo=dict(required=True), user=dict(required=True), + validate_certs=dict(default='yes', type='bool'), ) ) @@ -149,16 +154,16 @@ def main(): user = module.params['user'] if action == "list": - (rc, out) = list(hookurl, oauthkey, repo, user) + (rc, out) = list(module, hookurl, oauthkey, repo, user) if action == "clean504": - (rc, out) = clean504(hookurl, oauthkey, repo, user) + (rc, out) = clean504(module, hookurl, oauthkey, repo, user) if action == "cleanall": - (rc, out) = cleanall(hookurl, oauthkey, repo, user) + (rc, out) = cleanall(module, hookurl, oauthkey, repo, user) if action == "create": - (rc, out) = create(hookurl, oauthkey, repo, user) + (rc, out) = create(module, hookurl, oauthkey, repo, user) if rc != 0: module.fail_json(msg="failed", result=out) @@ -168,4 +173,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() From 5bc6eafba506fafd47a10c869e3ebf3a757b014c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 10 Mar 2014 16:06:52 -0500 Subject: [PATCH 227/772] Validate SSL certs accessed through urllib* * Adds another module utility file which generalizes the access of urls via the urllib* libraries. * Adds a new spec generator for common arguments. * Makes the user-agent string configurable. Fixes #6211 --- examples/ansible.cfg | 14 ++ lib/ansible/constants.py | 4 + lib/ansible/module_utils/basic.py | 10 +- lib/ansible/module_utils/ec2.py | 28 +++ lib/ansible/module_utils/known_hosts.py | 28 +++ lib/ansible/module_utils/rax.py | 29 ++- lib/ansible/module_utils/urls.py | 262 ++++++++++++++++++++++++ library/cloud/ec2_facts | 24 +-- library/database/riak | 21 +- library/monitoring/airbrake_deployment | 41 ++-- library/monitoring/boundary_meter | 61 ++---- library/monitoring/datadog_event | 18 +- library/monitoring/newrelic_deployment | 48 +---- library/monitoring/pagerduty | 35 ++-- library/net_infrastructure/dnsmadeeasy | 32 +-- library/net_infrastructure/netscaler | 40 ++-- library/network/get_url | 116 ++--------- library/notification/flowdock | 31 +-- library/notification/grove | 7 +- library/notification/hipchat | 35 +--- library/packaging/apt_key | 20 +- library/packaging/rpm_key | 17 +- library/source_control/github_hooks | 83 ++++---- 23 files changed, 600 insertions(+), 404 deletions(-) create mode 100644 lib/ansible/module_utils/urls.py diff --git a/examples/ansible.cfg b/examples/ansible.cfg index f543b2e4bc6..396974bf019 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -103,6 +103,20 @@ filter_plugins = /usr/share/ansible_plugins/filter_plugins # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 #nocolor = 1 +# the CA certificate path used for validating SSL certs. This path +# should exist on the controlling node, not the target nodes +# common locations: +# RHEL/CentOS: /etc/pki/tls/certs/ca-bundle.crt +# Fedora : /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem +# Ubuntu : /usr/share/ca-certificates/cacert.org/cacert.org.crt +#ca_file_path = + +# the http user-agent string to use when fetching urls. Some web server +# operators block the default urllib user agent as it is frequently used +# by malicious attacks/scripts, so we set it to something unique to +# avoid issues. +#http_user_agent = ansible-agent + [paramiko_connection] # uncomment this line to cause the paramiko connection plugin to not record new host diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 431e6eb742d..ed996f3bef0 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -144,6 +144,10 @@ DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', ' DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +# URL Arguments for generic module urllib2 use +DEFAULT_HTTP_USER_AGENT = get_config(p, DEFAULTS, 'http_user_agent', 'ANSIBLE_HTTP_USER_AGENT', 'ansible-agent') +DEFAULT_CA_FILE_PATH = shell_expand_path(get_config(p, DEFAULTS, 'ca_file_path', 'ANSIBLE_CA_FILE_PATH', '')) + ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index c2be621d4bf..fd0b2edfc3b 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -59,6 +59,7 @@ import grp import pwd import platform import errno +import tempfile try: import json @@ -114,6 +115,7 @@ FILE_COMMON_ARGUMENTS=dict( remote_src = dict(), # used by assemble ) + def get_platform(): ''' what's the platform? example: Linux is a platform. ''' return platform.system() @@ -188,7 +190,7 @@ class AnsibleModule(object): os.environ['LANG'] = MODULE_LANG (self.params, self.args) = self._load_params() - self._legal_inputs = [ 'CHECKMODE', 'NO_LOG' ] + self._legal_inputs = ['CHECKMODE', 'NO_LOG'] self.aliases = self._handle_aliases() @@ -571,8 +573,9 @@ class AnsibleModule(object): def _check_invalid_arguments(self): for (k,v) in self.params.iteritems(): - if k in ('CHECKMODE', 'NO_LOG'): - continue + # these should be in legal inputs already + #if k in ('CHECKMODE', 'NO_LOG'): + # continue if k not in self._legal_inputs: self.fail_json(msg="unsupported parameter for module: %s" % k) @@ -1068,4 +1071,3 @@ class AnsibleModule(object): break return '%.2f %s' % (float(size)/ limit, suffix) - diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 9156df766b2..58291c2d5d5 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -1,3 +1,31 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + try: from distutils.version import LooseVersion HAS_LOOSE_VERSION = True diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 000db9d1e62..36f5b87fff5 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -1,3 +1,31 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + def add_git_host_key(module, url, accept_hostkey=True): """ idempotently add a git url hostkey """ diff --git a/lib/ansible/module_utils/rax.py b/lib/ansible/module_utils/rax.py index 84e5686d24f..98623c7d38e 100644 --- a/lib/ansible/module_utils/rax.py +++ b/lib/ansible/module_utils/rax.py @@ -1,5 +1,32 @@ -import os +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import os def rax_argument_spec(): return dict( diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py new file mode 100644 index 00000000000..f251c6b407f --- /dev/null +++ b/lib/ansible/module_utils/urls.py @@ -0,0 +1,262 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Michael DeHaan , 2012-2013 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +try: + import urllib + HAS_URLLIB = True +except: + HAS_URLLIB = False + +try: + import urllib2 + HAS_URLLIB2 = True +except: + HAS_URLLIB2 = False + +try: + import urlparse + HAS_URLPARSE = True +except: + HAS_URLPARSE = False + +try: + import ssl + HAS_SSL=True +except: + HAS_SSL=False + + +class RequestWithMethod(urllib2.Request): + ''' + Workaround for using DELETE/PUT/etc with urllib2 + Originally contained in library/net_infrastructure/dnsmadeeasy + ''' + + def __init__(self, url, method, data=None, headers={}): + self._method = method + urllib2.Request.__init__(self, url, data, headers) + + def get_method(self): + if self._method: + return self._method + else: + return urllib2.Request.get_method(self) + + +class SSLValidationHandler(urllib2.BaseHandler): + ''' + A custom handler class for SSL validation. + + Based on: + http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python + http://techknack.net/python-urllib2-handlers/ + ''' + + def __init__(self, module, hostname, port, ca_cert=None): + self.module = module + self.hostname = hostname + self.port = port + self.ca_cert = ca_cert + + def get_ca_cert(self): + # tries to find a valid CA cert in one of the + # standard locations for the current distribution + + if self.ca_cert and os.path.exists(self.ca_cert): + # the user provided a custom CA cert (ie. one they + # uploaded themselves), so use it + return self.ca_cert + + ca_cert = None + platform = get_platform() + distribution = get_distribution() + if platform == 'Linux': + if distribution in ('Fedora',): + ca_cert = '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem' + elif distribution in ('RHEL','CentOS','ScientificLinux'): + ca_cert = '/etc/pki/tls/certs/ca-bundle.crt' + elif distribution in ('Ubuntu','Debian'): + ca_cert = '/usr/share/ca-certificates/cacert.org/cacert.org.crt' + elif platform == 'FreeBSD': + ca_cert = '/usr/local/share/certs/ca-root.crt' + elif platform == 'OpenBSD': + ca_cert = '/etc/ssl/cert.pem' + elif platform == 'NetBSD': + ca_cert = '/etc/openssl/certs/ca-cert.pem' + elif platform == 'SunOS': + # FIXME? + pass + elif platform == 'AIX': + # FIXME? + pass + + if ca_cert and os.path.exists(ca_cert): + return ca_cert + elif os.path.exists('/etc/ansible/ca-cert.pem'): + # fall back to a user-deployed cert in a standard + # location if the OS platform one is not available + return '/etc/ansible/ca-cert.pem' + else: + # CA cert isn't available, no validation + return None + + def http_request(self, req): + try: + server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=self.get_ca_cert()) + except ssl.SSLError: + self.module.fail_json(msg='failed to validate the SSL certificate for %s:%s. You can use validate_certs=no, however this is unsafe and not recommended' % (self.hostname, self.port)) + return req + + https_request = http_request + + +def url_argument_spec(): + ''' + Creates an argument spec that can be used with any module + that will be requesting content via urllib/urllib2 + ''' + return dict( + url = dict(), + force = dict(default='no', aliases=['thirsty'], type='bool'), + http_agent = dict(default='ansible-httpget'), + use_proxy = dict(default='yes', type='bool'), + validate_certs = dict(default='yes', type='bool'), + ) + + +def fetch_url(module, url, data=None, headers=None, method=None, + use_proxy=False, validate_certs=True, force=False, last_mod_time=None, timeout=10): + ''' + Fetches a file from an HTTP/FTP server using urllib2 + ''' + + if not HAS_URLLIB: + module.fail_json(msg='urllib is not installed') + if not HAS_URLLIB2: + module.fail_json(msg='urllib2 is not installed') + elif not HAS_URLPARSE: + module.fail_json(msg='urlparse is not installed') + + r = None + handlers = [] + info = dict(url=url) + + parsed = urlparse.urlparse(url) + if parsed[0] == 'https': + if not HAS_SSL and validate_certs: + module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended') + elif validate_certs: + # do the cert validation + netloc = parsed[1] + if '@' in netloc: + netloc = netloc.split('@', 1)[1] + if ':' in netloc: + hostname, port = netloc.split(':', 1) + else: + hostname = netloc + port = 443 + # create the SSL validation handler and + # add it to the list of handlers + ssl_handler = SSLValidationHandler(module, hostname, port) + handlers.append(ssl_handler) + + if '@' in parsed[1]: + credentials, netloc = parsed[1].split('@', 1) + if ':' in credentials: + username, password = credentials.split(':', 1) + else: + username = credentials + password = '' + parsed = list(parsed) + parsed[1] = netloc + + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + # this creates a password manager + passman.add_password(None, netloc, username, password) + # because we have put None at the start it will always + # use this username/password combination for urls + # for which `theurl` is a super-url + + authhandler = urllib2.HTTPBasicAuthHandler(passman) + # create the AuthHandler + handlers.append(authhandler) + + #reconstruct url without credentials + url = urlparse.urlunparse(parsed) + + if not use_proxy: + proxyhandler = urllib2.ProxyHandler({}) + handlers.append(proxyhandler) + + opener = urllib2.build_opener(*handlers) + urllib2.install_opener(opener) + + if method: + if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'): + module.fail_json(msg='invalid HTTP request method; %s' % method.upper()) + request = RequestWithMethod(url, method.upper(), data) + else: + request = urllib2.Request(url, data) + + # add the custom agent header, to help prevent issues + # with sites that block the default urllib agent string + request.add_header('User-agent', module.params.get('http_agent')) + + # if we're ok with getting a 304, set the timestamp in the + # header, otherwise make sure we don't get a cached copy + if last_mod_time and not force: + tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000') + request.add_header('If-Modified-Since', tstamp) + else: + request.add_header('cache-control', 'no-cache') + + # user defined headers now, which may override things we've set above + if headers: + if not isinstance(headers, dict): + module.fail_json("headers provided to fetch_url() must be a dict") + for header in headers: + request.add_header(header, headers[header]) + + try: + if sys.version_info < (2,6,0): + # urlopen in python prior to 2.6.0 did not + # have a timeout parameter + r = urllib2.urlopen(request, None) + else: + r = urllib2.urlopen(request, None, timeout) + info.update(r.info()) + info['url'] = r.geturl() # The URL goes in too, because of redirects. + info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) + except urllib2.HTTPError, e: + info.update(dict(msg=str(e), status=e.code)) + except urllib2.URLError, e: + code = int(getattr(e, 'code', -1)) + info.update(dict(msg="Request failed: %s" % str(e), status=code)) + + return r, info + diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts index 1c17fa5b717..c6a6670a58b 100644 --- a/library/cloud/ec2_facts +++ b/library/cloud/ec2_facts @@ -41,7 +41,6 @@ EXAMPLES = ''' when: ansible_ec2_instance_type == "t1.micro" ''' -import urllib2 import socket import re @@ -62,7 +61,8 @@ class Ec2Metadata(object): 'us-west-1', 'us-west-2') - def __init__(self, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): + def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): + self.module = module self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri @@ -70,12 +70,9 @@ class Ec2Metadata(object): self._prefix = 'ansible_ec2_%s' def _fetch(self, url): - try: - return urllib2.urlopen(url).read() - except urllib2.HTTPError: - return - except urllib2.URLError: - return + self.module.fail_json(msg="url is %s" % url) + (response, info) = fetch_url(self.module, url, force=True) + return response.read() def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): new_fields = {} @@ -150,17 +147,20 @@ class Ec2Metadata(object): return data def main(): - - ec2_facts = Ec2Metadata().run() - ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts) + argument_spec = url_argument_spec() module = AnsibleModule( - argument_spec = dict(), + argument_spec = argument_spec, supports_check_mode = True, ) + + ec2_facts = Ec2Metadata(module).run() + ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts) + module.exit_json(**ec2_facts_result) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/database/riak b/library/database/riak index 53faba6e983..e0a7552f0ae 100644 --- a/library/database/riak +++ b/library/database/riak @@ -138,24 +138,13 @@ def main(): while True: if time.time() > timeout: module.fail_json(msg='Timeout, could not fetch Riak stats.') - try: - if sys.version_info<(2,6,0): - stats_raw = urllib2.urlopen( - 'http://%s/stats' % (http_conn), None).read() - else: - stats_raw = urllib2.urlopen( - 'http://%s/stats' % (http_conn), None, 5).read() + (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) + if info['status'] == 200: + stats_raw = response.read() break - except urllib2.HTTPError, e: - time.sleep(5) - except urllib2.URLError, e: - time.sleep(5) - except socket.timeout: - time.sleep(5) - except Exception, e: - module.fail_json(msg='Could not fetch Riak stats: %s' % e) + time.sleep(5) -# here we attempt to load those stats, + # here we attempt to load those stats, try: stats = json.loads(stats_raw) except: diff --git a/library/monitoring/airbrake_deployment b/library/monitoring/airbrake_deployment index 8a4a834be7c..6a83459906a 100644 --- a/library/monitoring/airbrake_deployment +++ b/library/monitoring/airbrake_deployment @@ -52,6 +52,13 @@ options: - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. required: false default: https://airbrake.io/deploys + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] # informational: requirements for nodes requirements: [ urllib, urllib2 ] @@ -64,29 +71,12 @@ EXAMPLES = ''' revision=4.2 ''' -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - # =========================================== # Module execution. # def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -95,6 +85,7 @@ def main(): repo=dict(required=False), revision=dict(required=False), url=dict(required=False, default='https://api.airbrake.io/deploys.txt') + validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -123,18 +114,16 @@ def main(): module.exit_json(changed=True) # Send the data to airbrake - try: - req = urllib2.Request(url, urllib.urlencode(params)) - result=urllib2.urlopen(req) - except Exception, e: - module.fail_json(msg="unable to update airbrake via %s?%s : %s" % (url, urllib.urlencode(params), e)) + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data, validate_certs=module.params['validate_certs']) + if info['status'] == 200: + module.exit_json(changed=True) else: - if result.code == 200: - module.exit_json(changed=True) - else: - module.fail_json(msg="HTTP result code: %d connecting to %s" % (result.code, url)) + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/monitoring/boundary_meter b/library/monitoring/boundary_meter index 202dfd03ae3..3c9f90a4ce9 100644 --- a/library/monitoring/boundary_meter +++ b/library/monitoring/boundary_meter @@ -24,7 +24,6 @@ along with Ansible. If not, see . import json import datetime -import urllib2 import base64 import os @@ -74,12 +73,6 @@ EXAMPLES=''' ''' -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False - api_host = "api.boundary.com" config_directory = "/etc/bprobe" @@ -101,7 +94,7 @@ def build_url(name, apiid, action, meter_id=None, cert_type=None): elif action == "delete": return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id) -def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None): +def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None): if meter_id is None: url = build_url(name, apiid, action) @@ -111,11 +104,11 @@ def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None): else: url = build_url(name, apiid, action, meter_id, cert_type) - auth = auth_encode(apikey) - request = urllib2.Request(url) - request.add_header("Authorization", "Basic %s" % (auth)) - request.add_header("Content-Type", "application/json") - return request + headers = dict() + headers["Authorization"] = "Basic %s" % auth_encode(apikey) + headers["Content-Type"] = "application/json" + + return fetch_url(module, url, data=data, headers=headers) def create_meter(module, name, apiid, apikey): @@ -126,14 +119,10 @@ def create_meter(module, name, apiid, apikey): module.exit_json(status="Meter " + name + " already exists",changed=False) else: # If it doesn't exist, create it - request = http_request(name, apiid, apikey, action="create") - # A create request seems to need a json body with the name of the meter in it body = '{"name":"' + name + '"}' - request.add_data(body) + response, info = http_request(module, name, apiid, apikey, data=body, action="create") - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: + if info['status'] != 200: module.fail_json(msg="Failed to connect to api host to create meter") # If the config directory doesn't exist, create it @@ -160,15 +149,13 @@ def create_meter(module, name, apiid, apikey): def search_meter(module, name, apiid, apikey): - request = http_request(name, apiid, apikey, action="search") + response, info = http_request(module, name, apiid, apikey, action="search") - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: + if info['status'] != 200: module.fail_json("Failed to connect to api host to search for meter") # Return meters - return json.loads(result.read()) + return json.loads(response.read()) def get_meter_id(module, name, apiid, apikey): # In order to delete the meter we need its id @@ -186,16 +173,9 @@ def delete_meter(module, name, apiid, apikey): if meter_id is None: return 1, "Meter does not exist, so can't delete it" else: - action = "delete" - request = http_request(name, apiid, apikey, action, meter_id) - # See http://stackoverflow.com/questions/4511598/how-to-make-http-delete-method-using-urllib2 - # urllib2 only does GET or POST I believe, but here we need delete - request.get_method = lambda: 'DELETE' - - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: - module.fail_json("Failed to connect to api host to delete meter") + response, info = http_request(module, name, apiid, apikey, action, meter_id) + if info['status'] != 200: + module.fail_json("Failed to delete meter") # Each new meter gets a new key.pem and ca.pem file, so they should be deleted types = ['cert', 'key'] @@ -214,17 +194,14 @@ def download_request(module, name, apiid, apikey, cert_type): if meter_id is not None: action = "certificates" - request = http_request(name, apiid, apikey, action, meter_id, cert_type) - - try: - result = urllib2.urlopen(request) - except urllib2.URLError, e: + response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type) + if info['status'] != 200: module.fail_json("Failed to connect to api host to download certificate") if result: try: cert_file_path = '%s/%s.pem' % (config_directory,cert_type) - body = result.read() + body = response.read() cert_file = open(cert_file_path, 'w') cert_file.write(body) cert_file.close @@ -238,9 +215,6 @@ def download_request(module, name, apiid, apikey, cert_type): def main(): - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( state=dict(required=True, choices=['present', 'absent']), @@ -268,5 +242,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/monitoring/datadog_event b/library/monitoring/datadog_event index 629e86e98ab..878aee6d343 100644 --- a/library/monitoring/datadog_event +++ b/library/monitoring/datadog_event @@ -67,7 +67,6 @@ datadog_event: title="Testing from ansible" text="Test!" ''' import socket -from urllib2 import urlopen, Request, URLError def main(): module = AnsibleModule( @@ -97,8 +96,7 @@ def main(): post_event(module) def post_event(module): - uri = "https://app.datadoghq.com/api/v1/events?api_key=" + \ - module.params['api_key'] + uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key'] body = dict( title=module.params['title'], @@ -117,22 +115,20 @@ def post_event(module): json_body = module.jsonify(body) headers = {"Content-Type": "application/json"} - request = Request(uri, json_body, headers, unverifiable=True) - try: - response = urlopen(request) + (response, info) = fetch_url(module, uri, data=json_body, headers=headers) + if info['status'] == 200: response_body = response.read() response_json = module.from_json(response_body) if response_json['status'] == 'ok': module.exit_json(changed=True) else: module.fail_json(msg=response) - - except URLError, e: - module.fail_json(msg="URL error: %s." % e) - except socket.error, e: - module.fail_json(msg="Socket error: %s to %s" % (e, uri)) + else: + module.fail_json(**info) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/monitoring/newrelic_deployment b/library/monitoring/newrelic_deployment index de64651969c..08132722e1d 100644 --- a/library/monitoring/newrelic_deployment +++ b/library/monitoring/newrelic_deployment @@ -75,29 +75,12 @@ EXAMPLES = ''' revision=1.0 ''' -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - # =========================================== # Module execution. # def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -134,29 +117,20 @@ def main(): module.exit_json(changed=True) # Send the data to NewRelic - try: - req = urllib2.Request("https://rpm.newrelic.com/deployments.xml", urllib.urlencode(params)) - req.add_header('x-api-key',module.params["token"]) - result=urllib2.urlopen(req) - # urlopen behaves differently in python 2.4 and 2.6 so we handle - # both cases here. In python 2.4 it throws an exception if the - # return code is anything other than a 200. In python 2.6 it - # doesn't throw an exception for any 2xx return codes. In both - # cases we expect newrelic should return a 201 on success. So - # to handle both cases, both the except & else cases below are - # effectively identical. - except Exception, e: - if e.code == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="unable to update newrelic: %s" % e) + url = "https://rpm.newrelic.com/deployments.xml" + data = urllib.urlencode(params) + headers = { + 'x-api-key': module.params["token"], + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] in (200, 201): + module.exit_json(changed=True) else: - if result.code == 201: - module.exit_json(changed=True) - else: - module.fail_json(msg="result code: %d" % result.code) + module.fail_json(msg="unable to update newrelic: %s" % info['msg']) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/monitoring/pagerduty b/library/monitoring/pagerduty index bfd0573f4de..9a7f21d0779 100644 --- a/library/monitoring/pagerduty +++ b/library/monitoring/pagerduty @@ -87,24 +87,23 @@ EXAMPLES=''' import json import datetime -import urllib2 import base64 -def ongoing(name, user, passwd): +def ongoing(module, name, user, passwd): url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing" auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') + headers = {"Authorization": "Basic %s" % auth} - req = urllib2.Request(url) - req.add_header("Authorization", "Basic %s" % auth) - res = urllib2.urlopen(req) - out = res.read() + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) - return False, out + return False, response.read() -def create(name, user, passwd, service, hours, desc): +def create(module, name, user, passwd, service, hours, desc): now = datetime.datetime.utcnow() later = now + datetime.timedelta(hours=int(hours)) @@ -113,15 +112,17 @@ def create(name, user, passwd, service, hours, desc): url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows" auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') + headers = { + 'Authorization': 'Basic %s' % auth, + 'Content-Type' : 'application/json', + } data = json.dumps({'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}}) - req = urllib2.Request(url, data) - req.add_header("Authorization", "Basic %s" % auth) - req.add_header('Content-Type', 'application/json') - res = urllib2.urlopen(req) - out = res.read() + response, info = fetch_url(module, url, data=data, headers=headers, method='POST') + if info['status'] != 200: + module.fail_json(msg="failed to create the window: %s" % info['msg']) - return False, out + return False, response.read() def main(): @@ -149,10 +150,10 @@ def main(): if state == "running" or state == "started": if not service: module.fail_json(msg="service not specified") - (rc, out) = create(name, user, passwd, service, hours, desc) + (rc, out) = create(module, name, user, passwd, service, hours, desc) if state == "ongoing": - (rc, out) = ongoing(name, user, passwd) + (rc, out) = ongoing(module, name, user, passwd) if rc != 0: module.fail_json(msg="failed", result=out) @@ -161,4 +162,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/net_infrastructure/dnsmadeeasy b/library/net_infrastructure/dnsmadeeasy index d4af13e884a..9e2c14480eb 100644 --- a/library/net_infrastructure/dnsmadeeasy +++ b/library/net_infrastructure/dnsmadeeasy @@ -106,8 +106,6 @@ EXAMPLES = ''' IMPORT_ERROR = None try: - import urllib - import urllib2 import json from time import strftime, gmtime import hashlib @@ -115,22 +113,6 @@ try: except ImportError, e: IMPORT_ERROR = str(e) - -class RequestWithMethod(urllib2.Request): - - """Workaround for using DELETE/PUT/etc with urllib2""" - - def __init__(self, url, method, data=None, headers={}): - self._method = method - urllib2.Request.__init__(self, url, data, headers) - - def get_method(self): - if self._method: - return self._method - else: - return urllib2.Request.get_method(self) - - class DME2: def __init__(self, apikey, secret, domain, module): @@ -169,16 +151,10 @@ class DME2: url = self.baseurl + resource if data and not isinstance(data, basestring): data = urllib.urlencode(data) - request = RequestWithMethod(url, method, data, self._headers()) - try: - response = urllib2.urlopen(request) - except urllib2.HTTPError, e: - self.module.fail_json( - msg="%s returned %s, with body: %s" % (url, e.code, e.read())) - except Exception, e: - self.module.fail_json( - msg="Failed contacting: %s : Exception %s" % (url, e.message())) + response, info = fetch_url(self.module, url, data=data, method=method) + if info['status'] not in (200, 201, 204): + self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) try: return json.load(response) @@ -338,4 +314,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/net_infrastructure/netscaler b/library/net_infrastructure/netscaler index 1aa370895d5..4756d90abdc 100644 --- a/library/net_infrastructure/netscaler +++ b/library/net_infrastructure/netscaler @@ -73,6 +73,14 @@ options: default: server choices: ["server", "service"] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + requirements: [ "urllib", "urllib2" ] author: Nandor Sivok ''' @@ -90,8 +98,6 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api import json -import urllib -import urllib2 import base64 import socket @@ -100,23 +106,25 @@ class netscaler(object): _nitro_base_url = '/nitro/v1/' + def __init__(self, module): + self.module = module + def http_request(self, api_endpoint, data_json={}): request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint - data_json = urllib.urlencode(data_json) - if len(data_json): - req = urllib2.Request(request_url, data_json) - req.add_header('Content-Type', 'application/x-www-form-urlencoded') - else: - req = urllib2.Request(request_url) + data_json = urllib.urlencode(data_json) + if not len(data_json): + data_json = None - base64string = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() - req.add_header('Authorization', "Basic %s" % base64string) + auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() + headers = { + 'Authorization': 'Basic %s' % auth, + 'Content-Type' : 'application/x-www-form-urlencoded', + } - resp = urllib2.urlopen(req) - resp = json.load(resp) + response, info = fetch_url(self.module, request_url, data=data_json, validate_certs=self.module.params['validate_certs']) - return resp + return json.load(response.read()) def prepare_request(self, action): resp = self.http_request( @@ -134,7 +142,7 @@ class netscaler(object): def core(module): - n = netscaler() + n = netscaler(module) n._nsc_host = module.params.get('nsc_host') n._nsc_user = module.params.get('user') n._nsc_pass = module.params.get('password') @@ -158,7 +166,8 @@ def main(): password = dict(required=True), action = dict(default='enable', choices=['enable','disable']), name = dict(default=socket.gethostname()), - type = dict(default='server', choices=['service', 'server']) + type = dict(default='server', choices=['service', 'server']), + validate_certs=dict(default='yes', type='bool'), ) ) @@ -177,4 +186,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/network/get_url b/library/network/get_url index 9704b8dbadb..c249c44049a 100644 --- a/library/network/get_url +++ b/library/network/get_url @@ -83,6 +83,13 @@ options: required: false default: 'yes' choices: ['yes', 'no'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] others: description: - all arguments accepted by the M(file) module also work here @@ -108,19 +115,6 @@ try: except ImportError: HAS_HASHLIB=False -try: - import urllib2 - HAS_URLLIB2 = True -except ImportError: - HAS_URLLIB2 = False - -try: - import urlparse - import socket - HAS_URLPARSE = True -except ImportError: - HAS_URLPARSE=False - # ============================================================== # url handling @@ -130,80 +124,14 @@ def url_filename(url): return 'index.html' return fn -def url_do_get(module, url, dest, use_proxy, last_mod_time, force): - """ - Get url and return request and info - Credits: http://stackoverflow.com/questions/7006574/how-to-download-file-from-ftp - """ - - USERAGENT = 'ansible-httpget' - info = dict(url=url, dest=dest) - r = None - handlers = [] - - parsed = urlparse.urlparse(url) - - if '@' in parsed[1]: - credentials, netloc = parsed[1].split('@', 1) - if ':' in credentials: - username, password = credentials.split(':', 1) - else: - username = credentials - password = '' - parsed = list(parsed) - parsed[1] = netloc - - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() - # this creates a password manager - passman.add_password(None, netloc, username, password) - # because we have put None at the start it will always - # use this username/password combination for urls - # for which `theurl` is a super-url - - authhandler = urllib2.HTTPBasicAuthHandler(passman) - # create the AuthHandler - handlers.append(authhandler) - - #reconstruct url without credentials - url = urlparse.urlunparse(parsed) - - if not use_proxy: - proxyhandler = urllib2.ProxyHandler({}) - handlers.append(proxyhandler) - - opener = urllib2.build_opener(*handlers) - urllib2.install_opener(opener) - request = urllib2.Request(url) - request.add_header('User-agent', USERAGENT) - - if last_mod_time and not force: - tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000') - request.add_header('If-Modified-Since', tstamp) - else: - request.add_header('cache-control', 'no-cache') - - try: - r = urllib2.urlopen(request) - info.update(r.info()) - info['url'] = r.geturl() # The URL goes in too, because of redirects. - info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) - except urllib2.HTTPError, e: - # Must not fail_json() here so caller can handle HTTP 304 unmodified - info.update(dict(msg=str(e), status=e.code)) - except urllib2.URLError, e: - code = getattr(e, 'code', -1) - module.fail_json(msg="Request failed: %s" % str(e), status_code=code) - - return r, info - -def url_get(module, url, dest, use_proxy, last_mod_time, force): +def url_get(module, url, dest, use_proxy, last_mod_time, force, validate_certs): """ Download data from the url and store in a temporary file. Return (tempfile, info about the request) """ - req, info = url_do_get(module, url, dest, use_proxy, last_mod_time, force) + rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, validate_certs=validate_certs) if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) @@ -215,12 +143,12 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force): fd, tempname = tempfile.mkstemp() f = os.fdopen(fd, 'wb') try: - shutil.copyfileobj(req, f) + shutil.copyfileobj(rsp, f) except Exception, err: os.remove(tempname) module.fail_json(msg="failed to create temporary content file: %s" % str(err)) f.close() - req.close() + rsp.close() return tempname, info def extract_filename_from_headers(headers): @@ -247,21 +175,15 @@ def extract_filename_from_headers(headers): def main(): - # does this really happen on non-ancient python? - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - if not HAS_URLPARSE: - module.fail_json(msg="urlparse is not installed") + argument_spec = url_argument_spec() + argument_spec.update( + dest = dict(required=True), + sha256sum = dict(default=''), + ) module = AnsibleModule( # not checking because of daisy chain to file module - argument_spec = dict( - url = dict(required=True), - dest = dict(required=True), - force = dict(default='no', aliases=['thirsty'], type='bool'), - sha256sum = dict(default=''), - use_proxy = dict(default='yes', type='bool') - ), + argument_spec = argument_spec, add_file_common_args=True ) @@ -270,6 +192,7 @@ def main(): force = module.params['force'] sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] + validate_certs = module.params['validate_certs'] dest_is_dir = os.path.isdir(dest) last_mod_time = None @@ -284,7 +207,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, validate_certs) # Now the request has completed, we can finally generate the final # destination file name from the info dict. @@ -366,4 +289,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/notification/flowdock b/library/notification/flowdock index a5be40d1f10..32817d756dc 100644 --- a/library/notification/flowdock +++ b/library/notification/flowdock @@ -96,31 +96,12 @@ EXAMPLES = ''' tags=tag1,tag2,tag3 ''' -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - - - # =========================================== # Module execution. # def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -187,14 +168,16 @@ def main(): module.exit_json(changed=False) # Send the data to Flowdock - try: - response = urllib2.urlopen(url, urllib.urlencode(params)) - except Exception, e: - module.fail_json(msg="unable to send msg: %s" % e) + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] != 200: + module.fail_json(msg="unable to send msg: %s" % info['msg']) - module.exit_json(changed=False, msg=module.params["msg"]) + module.exit_json(changed=True, msg=module.params["msg"]) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/notification/grove b/library/notification/grove index b759f025e29..1e2132cfb73 100644 --- a/library/notification/grove +++ b/library/notification/grove @@ -41,8 +41,6 @@ EXAMPLES = ''' message=deployed {{ target }} ''' -import urllib - BASE_URL = 'https://grove.io/api/notice/%s/' # ============================================================== @@ -57,7 +55,10 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url= if icon_url is not None: my_data['icon_url'] = icon_url - urllib.urlopen(my_url, urllib.urlencode(my_data)) + data = urllib.urlencode(my_data) + response, info = fetch_url(module, my_url, data=data) + if info['status'] != 200: + module.fail_json(msg="failed to send notification: %s" % info['msg']) # ============================================================== # main diff --git a/library/notification/hipchat b/library/notification/hipchat index eec2b8c3618..c4b36d64ce7 100644 --- a/library/notification/hipchat +++ b/library/notification/hipchat @@ -60,22 +60,10 @@ EXAMPLES = ''' # HipChat module specific support methods. # -HAS_URLLIB = True -try: - import urllib -except ImportError: - HAS_URLLIB = False - -HAS_URLLIB2 = True -try: - import urllib2 -except ImportError: - HAS_URLLIB2 = False - MSG_URI = "https://api.hipchat.com/v1/rooms/message?" -def send_msg(token, room, msg_from, msg, msg_format='text', +def send_msg(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False): '''sending message to hipchat''' @@ -92,8 +80,12 @@ def send_msg(token, room, msg_from, msg, msg_format='text', params['notify'] = 0 url = MSG_URI + "auth_token=%s" % (token) - response = urllib2.urlopen(url, urllib.urlencode(params)) - return response.read() + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== @@ -102,11 +94,6 @@ def send_msg(token, room, msg_from, msg, msg_format='text', def main(): - if not HAS_URLLIB: - module.fail_json(msg="urllib is not installed") - if not HAS_URLLIB2: - module.fail_json(msg="urllib2 is not installed") - module = AnsibleModule( argument_spec=dict( token=dict(required=True), @@ -130,15 +117,15 @@ def main(): notify = module.params["notify"] try: - send_msg(token, room, msg_from, msg, msg_format, - color, notify) + send_msg(module, token, room, msg_from, msg, msg_format, color, notify) except Exception, e: module.fail_json(msg="unable to sent msg: %s" % e) changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, - msg=msg) + module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() diff --git a/library/packaging/apt_key b/library/packaging/apt_key index eee86337020..ff05bb93d1a 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -64,6 +64,14 @@ options: default: present description: - used to specify if key is being added or revoked + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + ''' EXAMPLES = ''' @@ -88,7 +96,6 @@ EXAMPLES = ''' # FIXME: standardize into module_common -from urllib2 import urlopen, URLError from traceback import format_exc from re import compile as re_compile # FIXME: standardize into module_common @@ -133,11 +140,8 @@ def download_key(module, url): if url is None: module.fail_json(msg="needed a URL but was not specified") try: - connection = urlopen(url) - if connection is None: - module.fail_json("error connecting to download key from url") - data = connection.read() - return data + rsp, info = fetch_url(module, url, validate_certs=module.params['validate_certs']) + return rsp.read() except Exception: module.fail_json(msg="error getting key id from url", traceback=format_exc()) @@ -175,7 +179,8 @@ def main(): file=dict(required=False), key=dict(required=False), keyring=dict(required=False), - state=dict(required=False, choices=['present', 'absent'], default='present') + state=dict(required=False, choices=['present', 'absent'], default='present'), + validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -240,4 +245,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/packaging/rpm_key b/library/packaging/rpm_key index 82532477348..9d85f30ac8b 100644 --- a/library/packaging/rpm_key +++ b/library/packaging/rpm_key @@ -42,6 +42,14 @@ options: choices: [present, absent] description: - Wheather the key will be imported or removed from the rpm db. + validate_certs: + description: + - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + ''' EXAMPLES = ''' @@ -57,7 +65,6 @@ EXAMPLES = ''' import syslog import os.path import re -import urllib2 import tempfile # Attempt to download at most 8192 bytes. @@ -116,8 +123,8 @@ class RpmKey: def fetch_key(self, url, maxbytes=MAXBYTES): """Downloads a key from url, returns a valid path to a gpg key""" try: - fd = urllib2.urlopen(url) - key = fd.read(maxbytes) + rsp, info = fetch_url(self.module, url, validate_certs=self.module.params['validate_certs']) + key = rsp.read(maxbytes) if not is_pubkey(key): self.module.fail_json(msg="Not a public key: %s" % url) tmpfd, tmpname = tempfile.mkstemp() @@ -187,7 +194,8 @@ def main(): module = AnsibleModule( argument_spec = dict( state=dict(default='present', choices=['present', 'absent'], type='str'), - key=dict(required=True, type='str') + key=dict(required=True, type='str'), + validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -198,4 +206,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/library/source_control/github_hooks b/library/source_control/github_hooks index 55eb8d3c8d3..c5c5b648c7a 100644 --- a/library/source_control/github_hooks +++ b/library/source_control/github_hooks @@ -19,7 +19,6 @@ # along with Ansible. If not, see . import json -import urllib2 import base64 DOCUMENTATION = ''' @@ -51,6 +50,14 @@ options: - This tells the githooks module what you want it to do. required: true choices: [ "create", "cleanall" ] + validate_certs: + description: + - If C(no), SSL certificates for the target repo will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + author: Phillip Gentry, CX Inc ''' @@ -62,16 +69,19 @@ EXAMPLES = ''' - local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }} ''' -def list(hookurl, oauthkey, repo, user): +def list(module, hookurl, oauthkey, repo, user): url = "%s/hooks" % repo auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - req = urllib2.Request(url) - req.add_header("Authorization", "Basic %s" % auth) - res = urllib2.urlopen(req) - out = res.read() - return False, out - -def clean504(hookurl, oauthkey, repo, user): + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, headers=headers, validate_certs=module.params['validate_certs']) + if info['status'] != 200: + return False, '' + else: + return False, response.read() + +def clean504(module, hookurl, oauthkey, repo, user): current_hooks = list(hookurl, oauthkey, repo, user)[1] decoded = json.loads(current_hooks) @@ -79,11 +89,11 @@ def clean504(hookurl, oauthkey, repo, user): if hook['last_response']['code'] == 504: # print "Last response was an ERROR for hook:" # print hook['id'] - delete(hookurl, oauthkey, repo, user, hook['id']) + delete(module, hookurl, oauthkey, repo, user, hook['id']) return 0, current_hooks -def cleanall(hookurl, oauthkey, repo, user): +def cleanall(module, hookurl, oauthkey, repo, user): current_hooks = list(hookurl, oauthkey, repo, user)[1] decoded = json.loads(current_hooks) @@ -91,11 +101,11 @@ def cleanall(hookurl, oauthkey, repo, user): if hook['last_response']['code'] != 200: # print "Last response was an ERROR for hook:" # print hook['id'] - delete(hookurl, oauthkey, repo, user, hook['id']) + delete(module, hookurl, oauthkey, repo, user, hook['id']) return 0, current_hooks -def create(hookurl, oauthkey, repo, user): +def create(module, hookurl, oauthkey, repo, user): url = "%s/hooks" % repo values = { "active": True, @@ -107,29 +117,23 @@ def create(hookurl, oauthkey, repo, user): } data = json.dumps(values) auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - out='[]' - try : - req = urllib2.Request(url) - req.add_data(data) - req.add_header("Authorization", "Basic %s" % auth) - res = urllib2.urlopen(req) - out = res.read() - return 0, out - except urllib2.HTTPError, e : - if e.code == 422 : - return 0, out - -def delete(hookurl, oauthkey, repo, user, hookid): + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, data=data, headers=headers, validate_certs=module.params['validate_certs']) + if info['status'] != 200: + return 0, '[]' + else: + return 0, response.read() + +def delete(module, hookurl, oauthkey, repo, user, hookid): url = "%s/hooks/%s" % (repo, hookid) auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') - req = urllib2.Request(url) - req.get_method = lambda: 'DELETE' - req.add_header("Authorization", "Basic %s" % auth) - # req.add_header('Content-Type', 'application/xml') - # req.add_header('Accept', 'application/xml') - res = urllib2.urlopen(req) - out = res.read() - return out + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE', validate_certs=module.params['validate_certs']) + return response.read() def main(): module = AnsibleModule( @@ -139,6 +143,7 @@ def main(): oauthkey=dict(required=True), repo=dict(required=True), user=dict(required=True), + validate_certs=dict(default='yes', type='bool'), ) ) @@ -149,16 +154,16 @@ def main(): user = module.params['user'] if action == "list": - (rc, out) = list(hookurl, oauthkey, repo, user) + (rc, out) = list(module, hookurl, oauthkey, repo, user) if action == "clean504": - (rc, out) = clean504(hookurl, oauthkey, repo, user) + (rc, out) = clean504(module, hookurl, oauthkey, repo, user) if action == "cleanall": - (rc, out) = cleanall(hookurl, oauthkey, repo, user) + (rc, out) = cleanall(module, hookurl, oauthkey, repo, user) if action == "create": - (rc, out) = create(hookurl, oauthkey, repo, user) + (rc, out) = create(module, hookurl, oauthkey, repo, user) if rc != 0: module.fail_json(msg="failed", result=out) @@ -168,4 +173,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + main() From ba0fec4f4238e381adc5f5a2f286bcd5b32f0586 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 10 Mar 2014 16:11:24 -0500 Subject: [PATCH 228/772] Force command action to not be executed by the shell unless specifically enabled --- lib/ansible/module_utils/basic.py | 50 +++-- lib/ansible/module_utils/redhat.py | 252 ++++++++++++++++++++++ lib/ansible/runner/lookup_plugins/pipe.py | 2 +- library/cloud/virt | 18 +- library/commands/command | 2 +- library/files/synchronize | 9 +- library/notification/osx_say | 2 - library/packaging/easy_install | 4 +- library/packaging/npm | 5 +- library/packaging/pacman | 12 +- library/packaging/pip | 13 +- library/packaging/redhat_subscription | 81 +++---- library/packaging/rhn_register | 81 +------ library/packaging/urpmi | 13 +- library/source_control/bzr | 29 ++- library/source_control/git | 67 +++--- library/system/service | 4 +- library/system/setup | 25 ++- library/web_infrastructure/django_manage | 3 +- 19 files changed, 427 insertions(+), 245 deletions(-) create mode 100644 lib/ansible/module_utils/redhat.py diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8025563e58e..6e47dd4560d 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -991,12 +991,13 @@ class AnsibleModule(object): # rename might not preserve context self.set_context_if_different(dest, context, False) - def run_command(self, args, check_rc=False, close_fds=False, executable=None, data=None, binary_data=False, path_prefix=None): + def run_command(self, args, check_rc=False, close_fds=False, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False): ''' Execute a command, returns rc, stdout, and stderr. args is the command to run If args is a list, the command will be run with shell=False. - Otherwise, the command will be run with shell=True when args is a string. + If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False + If args is a string and use_unsafe_shell=True it run with shell=True. Other arguments: - check_rc (boolean) Whether to call fail_json in case of non zero RC. Default is False. @@ -1005,13 +1006,18 @@ class AnsibleModule(object): - executable (string) See documentation for subprocess.Popen(). Default is None. ''' + + shell = False if isinstance(args, list): - shell = False - elif isinstance(args, basestring): + pass + elif isinstance(args, basestring) and use_unsafe_shell: shell = True + elif isinstance(args, basestring): + args = shlex.split(args) else: msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) + rc = 0 msg = None st_in = None @@ -1047,25 +1053,25 @@ class AnsibleModule(object): if data: st_in = subprocess.PIPE + + kwargs = dict( + executable=executable, + shell=shell, + close_fds=close_fds, + stdin= st_in, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + if path_prefix: + kwargs['env'] = env + if cwd: + kwargs['cwd'] = cwd + + try: - if path_prefix is not None: - cmd = subprocess.Popen(args, - executable=executable, - shell=shell, - close_fds=close_fds, - stdin=st_in, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env) - else: - cmd = subprocess.Popen(args, - executable=executable, - shell=shell, - close_fds=close_fds, - stdin=st_in, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - + cmd = subprocess.Popen(args, **kwargs) + if data: if not binary_data: data += '\\n' diff --git a/lib/ansible/module_utils/redhat.py b/lib/ansible/module_utils/redhat.py new file mode 100644 index 00000000000..a1081f9c8c7 --- /dev/null +++ b/lib/ansible/module_utils/redhat.py @@ -0,0 +1,252 @@ +import os +import re +import types +import ConfigParser +import shlex + + +class RegistrationBase(object): + def __init__(self, module, username=None, password=None): + self.module = module + self.username = username + self.password = password + + def configure(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def enable(self): + # Remove any existing redhat.repo + redhat_repo = '/etc/yum.repos.d/redhat.repo' + if os.path.isfile(redhat_repo): + os.unlink(redhat_repo) + + def register(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unregister(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unsubscribe(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def update_plugin_conf(self, plugin, enabled=True): + plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin + if os.path.isfile(plugin_conf): + cfg = ConfigParser.ConfigParser() + cfg.read([plugin_conf]) + if enabled: + cfg.set('main', 'enabled', 1) + else: + cfg.set('main', 'enabled', 0) + fd = open(plugin_conf, 'rwa+') + cfg.write(fd) + fd.close() + + def subscribe(self, **kwargs): + raise NotImplementedError("Must be implemented by a sub-class") + + +class Rhsm(RegistrationBase): + def __init__(self, module, username=None, password=None): + RegistrationBase.__init__(self, module, username, password) + self.config = self._read_config() + self.module = module + + def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): + ''' + Load RHSM configuration from /etc/rhsm/rhsm.conf. + Returns: + * ConfigParser object + ''' + + # Read RHSM defaults ... + cp = ConfigParser.ConfigParser() + cp.read(rhsm_conf) + + # Add support for specifying a default value w/o having to standup some configuration + # Yeah, I know this should be subclassed ... but, oh well + def get_option_default(self, key, default=''): + sect, opt = key.split('.', 1) + if self.has_section(sect) and self.has_option(sect, opt): + return self.get(sect, opt) + else: + return default + + cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser) + + return cp + + def enable(self): + ''' + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + ''' + RegistrationBase.enable(self) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', True) + + def configure(self, **kwargs): + ''' + Configure the system as directed for registration with RHN + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'config'] + + # Pass supplied **kwargs as parameters to subscription-manager. Ignore + # non-configuration parameters and replace '_' with '.'. For example, + # 'server_hostname' becomes '--system.hostname'. + for k,v in kwargs.items(): + if re.search(r'^(system|rhsm)_', k): + args.append('--%s=%s' % (k.replace('_','.'), v)) + + self.module.run_command(args, check_rc=True) + + @property + def is_registered(self): + ''' + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHN. + ''' + # Quick version... + if False: + return os.path.isfile('/etc/pki/consumer/cert.pem') and \ + os.path.isfile('/etc/pki/consumer/key.pem') + + args = ['subscription-manager', 'identity'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: + return True + else: + return False + + def register(self, username, password, autosubscribe, activationkey): + ''' + Register the current system to the provided RHN server + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'register'] + + # Generate command arguments + if activationkey: + args.append('--activationkey "%s"' % activationkey) + else: + if autosubscribe: + args.append('--autosubscribe') + if username: + args.extend(['--username', username]) + if password: + args.extend(['--password', password]) + + # Do the needful... + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def unsubscribe(self): + ''' + Unsubscribe a system from all subscribed channels + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'unsubscribe', '--all'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def unregister(self): + ''' + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'unregister'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def subscribe(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression + Raises: + * Exception - if error occurs while running command + ''' + + # Available pools ready for subscription + available_pools = RhsmPools(self.module) + + for pool in available_pools.filter(regexp): + pool.subscribe() + + +class RhsmPool(object): + ''' + Convenience class for housing subscription information + ''' + + def __init__(self, module, **kwargs): + self.module = module + for k,v in kwargs.items(): + setattr(self, k, v) + + def __str__(self): + return str(self.__getattribute__('_name')) + + def subscribe(self): + args = "subscription-manager subscribe --pool %s" % self.PoolId + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False + + +class RhsmPools(object): + """ + This class is used for manipulating pools subscriptions with RHSM + """ + def __init__(self, module): + self.module = module + self.products = self._load_product_list() + + def __iter__(self): + return self.products.__iter__() + + def _load_product_list(self): + """ + Loads list of all availaible pools for system in data structure + """ + args = "subscription-manager list --available" + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + + products = [] + for line in stdout.split('\n'): + # Remove leading+trailing whitespace + line = line.strip() + # An empty line implies the end of a output group + if len(line) == 0: + continue + # If a colon ':' is found, parse + elif ':' in line: + (key, value) = line.split(':',1) + key = key.strip().replace(" ", "") # To unify + value = value.strip() + if key in ['ProductName', 'SubscriptionName']: + # Remember the name for later processing + products.append(RhsmPool(self.module, _name=value, key=value)) + elif products: + # Associate value with most recently recorded product + products[-1].__setattr__(key, value) + # FIXME - log some warning? + #else: + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + return products + + def filter(self, regexp='^$'): + ''' + Return a list of RhsmPools whose name matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product._name): + yield product + diff --git a/lib/ansible/runner/lookup_plugins/pipe.py b/lib/ansible/runner/lookup_plugins/pipe.py index 4205b887ffe..62ec7e129ed 100644 --- a/lib/ansible/runner/lookup_plugins/pipe.py +++ b/lib/ansible/runner/lookup_plugins/pipe.py @@ -32,7 +32,7 @@ class LookupModule(object): ret = [] for term in terms: - p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + p = subprocess.Popen(term, cwd=self.basedir, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode == 0: ret.append(stdout.decode("utf-8").rstrip()) diff --git a/library/cloud/virt b/library/cloud/virt index 42e99209b14..8cbf7fc895a 100644 --- a/library/cloud/virt +++ b/library/cloud/virt @@ -113,13 +113,14 @@ class VMNotFound(Exception): class LibvirtConnection(object): - def __init__(self, uri): + def __init__(self, uri, module): - cmd = subprocess.Popen("uname -r", shell=True, stdout=subprocess.PIPE, - close_fds=True) - output = cmd.communicate()[0] + self.module = module - if output.find("xen") != -1: + cmd = "uname -r" + rc, stdout, stderr = self.module.run_command(cmd) + + if stdout.find("xen") != -1: conn = libvirt.open(None) else: conn = libvirt.open(uri) @@ -221,11 +222,12 @@ class LibvirtConnection(object): class Virt(object): - def __init__(self, uri): + def __init__(self, uri, module): + self.module = module self.uri = uri def __get_conn(self): - self.conn = LibvirtConnection(self.uri) + self.conn = LibvirtConnection(self.uri, self.module) return self.conn def get_vm(self, vmid): @@ -399,7 +401,7 @@ def core(module): uri = module.params.get('uri', None) xml = module.params.get('xml', None) - v = Virt(uri) + v = Virt(uri, module) res = {} if state and command=='list_vms': diff --git a/library/commands/command b/library/commands/command index 76d2f828d0c..ba9ae30a7f2 100644 --- a/library/commands/command +++ b/library/commands/command @@ -136,7 +136,7 @@ def main(): args = shlex.split(args) startd = datetime.datetime.now() - rc, out, err = module.run_command(args, executable=executable) + rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell) endd = datetime.datetime.now() delta = endd - startd diff --git a/library/files/synchronize b/library/files/synchronize index 493322393bc..eb556c30f53 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -16,8 +16,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import subprocess - DOCUMENTATION = ''' --- module: synchronize @@ -272,6 +270,13 @@ def main(): cmd = cmd + " --rsync-path '%s'" %(rsync_path) changed_marker = '<>' cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" + + # expand the paths + if '@' not in source: + source = os.path.expanduser(source) + if '@' not in dest: + dest = os.path.expanduser(dest) + cmd = ' '.join([cmd, source, dest]) cmdstr = cmd (rc, out, err) = module.run_command(cmd) diff --git a/library/notification/osx_say b/library/notification/osx_say index de5d1917c5f..39e3da88c19 100644 --- a/library/notification/osx_say +++ b/library/notification/osx_say @@ -44,8 +44,6 @@ EXAMPLES = ''' - local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox ''' -import subprocess - DEFAULT_VOICE='Trinoids' def say(module, msg, voice): diff --git a/library/packaging/easy_install b/library/packaging/easy_install index bdacf8e464b..889a81f025a 100644 --- a/library/packaging/easy_install +++ b/library/packaging/easy_install @@ -151,8 +151,8 @@ def main(): command = '%s %s' % (virtualenv, env) if site_packages: command += ' --system-site-packages' - os.chdir(tempfile.gettempdir()) - rc_venv, out_venv, err_venv = module.run_command(command) + cwd = tempfile.gettempdir() + rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) rc += rc_venv out += out_venv diff --git a/library/packaging/npm b/library/packaging/npm index 62179c373aa..c623b6f7e6d 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -125,10 +125,11 @@ class Npm(object): cmd.append(self.name_version) #If path is specified, cd into that path and run the command. + cwd = None if self.path: - os.chdir(self.path) + cwd = self.path - rc, out, err = self.module.run_command(cmd, check_rc=check_rc) + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) return out return '' diff --git a/library/packaging/pacman b/library/packaging/pacman index 3080cb4a607..a4a24ca5fd1 100644 --- a/library/packaging/pacman +++ b/library/packaging/pacman @@ -90,7 +90,8 @@ def query_package(module, name, state="installed"): # pacman -Q returns 0 if the package is installed, # 1 if it is not installed if state == "installed": - rc = os.system("pacman -Q %s" % (name)) + cmd = "pacman -Q %s" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True @@ -99,7 +100,8 @@ def query_package(module, name, state="installed"): def update_package_db(module): - rc = os.system("pacman -Syy > /dev/null") + cmd = "pacman -Syy > /dev/null" + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="could not update package db") @@ -118,7 +120,8 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc = os.system("pacman -%s %s --noconfirm > /dev/null" % (args, package)) + cmd = "pacman -%s %s --noconfirm > /dev/null" % (args, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) @@ -145,7 +148,8 @@ def install_packages(module, packages, package_files): else: params = '-S %s' % package - rc = os.system("pacman %s --noconfirm > /dev/null" % (params)) + cmd = "pacman %s --noconfirm > /dev/null" % (params) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to install %s" % (package)) diff --git a/library/packaging/pip b/library/packaging/pip index 35487c32963..aa55bf8ba0b 100644 --- a/library/packaging/pip +++ b/library/packaging/pip @@ -253,10 +253,10 @@ def main(): cmd = '%s --no-site-packages %s' % (virtualenv, env) else: cmd = '%s %s' % (virtualenv, env) - os.chdir(tempfile.gettempdir()) + this_dir = tempfile.gettempdir() if chdir: - os.chdir(chdir) - rc, out_venv, err_venv = module.run_command(cmd) + this_dir = os.path.join(this_dir, chdir) + rc, out_venv, err_venv = module.run_command(cmd, cwd=this_dir) out += out_venv err += err_venv if rc != 0: @@ -298,10 +298,11 @@ def main(): if module.check_mode: module.exit_json(changed=True) - os.chdir(tempfile.gettempdir()) + this_dir = tempfile.gettempdir() if chdir: - os.chdir(chdir) - rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix) + this_dir = os.path.join(this_dir, chdir) + + rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) out += out_pip err += err_pip if rc == 1 and state == 'absent' and 'not installed' in out_pip: diff --git a/library/packaging/redhat_subscription b/library/packaging/redhat_subscription index e363aa0946a..bb5d655a52f 100644 --- a/library/packaging/redhat_subscription +++ b/library/packaging/redhat_subscription @@ -75,39 +75,13 @@ EXAMPLES = ''' import os import re import types -import subprocess import ConfigParser import shlex -class CommandException(Exception): - pass - - -def run_command(args): - ''' - Convenience method to run a command, specified as a list of arguments. - Returns: - * tuple - (stdout, stder, retcode) - ''' - - # Coerce into a string - if isinstance(args, str): - args = shlex.split(args) - - # Run desired command - proc = subprocess.Popen(args, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - (stdout, stderr) = proc.communicate() - returncode = proc.poll() - if returncode != 0: - cmd = ' '.join(args) - raise CommandException("Command failed (%s): %s\n%s" % (returncode, cmd, stdout)) - return (stdout, stderr, returncode) - - -class RegistrationBase (object): - def __init__(self, username=None, password=None): +class RegistrationBase(object): + def __init__(self, module, username=None, password=None): + self.module = module self.username = username self.password = password @@ -147,9 +121,10 @@ class RegistrationBase (object): class Rhsm(RegistrationBase): - def __init__(self, username=None, password=None): - RegistrationBase.__init__(self, username, password) + def __init__(self, module, username=None, password=None): + RegistrationBase.__init__(self, module, username, password) self.config = self._read_config() + self.module = module def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): ''' @@ -199,8 +174,8 @@ class Rhsm(RegistrationBase): for k,v in kwargs.items(): if re.search(r'^(system|rhsm)_', k): args.append('--%s=%s' % (k.replace('_','.'), v)) - - run_command(args) + + self.module.run_command(args, check_rc=True) @property def is_registered(self): @@ -216,13 +191,11 @@ class Rhsm(RegistrationBase): os.path.isfile('/etc/pki/consumer/key.pem') args = ['subscription-manager', 'identity'] - try: - (stdout, stderr, retcode) = run_command(args) - except CommandException, e: - return False - else: - # Display some debug output + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: return True + else: + return False def register(self, username, password, autosubscribe, activationkey): ''' @@ -244,7 +217,7 @@ class Rhsm(RegistrationBase): args.extend(['--password', password]) # Do the needful... - run_command(args) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) def unsubscribe(self): ''' @@ -253,7 +226,7 @@ class Rhsm(RegistrationBase): * Exception - if error occurs while running command ''' args = ['subscription-manager', 'unsubscribe', '--all'] - run_command(args) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) def unregister(self): ''' @@ -262,7 +235,7 @@ class Rhsm(RegistrationBase): * Exception - if error occurs while running command ''' args = ['subscription-manager', 'unregister'] - run_command(args) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) def subscribe(self, regexp): ''' @@ -273,7 +246,7 @@ class Rhsm(RegistrationBase): ''' # Available pools ready for subscription - available_pools = RhsmPools() + available_pools = RhsmPools(self.module) for pool in available_pools.filter(regexp): pool.subscribe() @@ -284,7 +257,8 @@ class RhsmPool(object): Convenience class for housing subscription information ''' - def __init__(self, **kwargs): + def __init__(self, module, **kwargs): + self.module = module for k,v in kwargs.items(): setattr(self, k, v) @@ -292,15 +266,20 @@ class RhsmPool(object): return str(self.__getattribute__('_name')) def subscribe(self): - (stdout, stderr, retcode) = run_command("subscription-manager subscribe --pool %s" % self.PoolId) - return True + args = "subscription-manager subscribe --pool %s" % self.PoolId + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False class RhsmPools(object): """ This class is used for manipulating pools subscriptions with RHSM """ - def __init__(self): + def __init__(self, module): + self.module = module self.products = self._load_product_list() def __iter__(self): @@ -310,7 +289,8 @@ class RhsmPools(object): """ Loads list of all availaible pools for system in data structure """ - (stdout, stderr, retval) = run_command("subscription-manager list --available") + args = "subscription-manager list --available" + rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] for line in stdout.split('\n'): @@ -326,7 +306,7 @@ class RhsmPools(object): value = value.strip() if key in ['ProductName', 'SubscriptionName']: # Remember the name for later processing - products.append(RhsmPool(_name=value, key=value)) + products.append(RhsmPool(self.module, _name=value, key=value)) elif products: # Associate value with most recently recorded product products[-1].__setattr__(key, value) @@ -348,7 +328,7 @@ class RhsmPools(object): def main(): # Load RHSM configuration from file - rhn = Rhsm() + rhn = Rhsm(AnsibleModule()) module = AnsibleModule( argument_spec = dict( @@ -364,6 +344,7 @@ def main(): ) ) + rhn.module = module state = module.params['state'] username = module.params['username'] password = module.params['password'] diff --git a/library/packaging/rhn_register b/library/packaging/rhn_register index 5e8c3718f98..28d91a6a027 100644 --- a/library/packaging/rhn_register +++ b/library/packaging/rhn_register @@ -72,12 +72,7 @@ EXAMPLES = ''' ''' import sys -import os -import re import types -import subprocess -import ConfigParser -import shlex import xmlrpclib import urlparse @@ -90,75 +85,9 @@ except ImportError, e: module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e) -class CommandException(Exception): - pass - - -def run_command(args): - ''' - Convenience method to run a command, specified as a list of arguments. - Returns: - * tuple - (stdout, stder, retcode) - ''' - - # Coerce into a string - if isinstance(args, str): - args = shlex.split(args) - - # Run desired command - proc = subprocess.Popen(args, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - (stdout, stderr) = proc.communicate() - returncode = proc.poll() - if returncode != 0: - cmd = ' '.join(args) - raise CommandException("Command failed (%s): %s\n%s" % (returncode, cmd, stdout)) - return (stdout, stderr, returncode) - - -class RegistrationBase (object): - def __init__(self, username=None, password=None): - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - redhat_repo = '/etc/yum.repos.d/redhat.repo' - if os.path.isfile(redhat_repo): - os.unlink(redhat_repo) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - if os.path.isfile(plugin_conf): - cfg = ConfigParser.ConfigParser() - cfg.read([plugin_conf]) - if enabled: - cfg.set('main', 'enabled', 1) - else: - cfg.set('main', 'enabled', 0) - fd = open(plugin_conf, 'rwa+') - cfg.write(fd) - fd.close() - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - class Rhn(RegistrationBase): - def __init__(self, username=None, password=None): + def __init__(self, module, username=None, password=None): RegistrationBase.__init__(self, username, password) self.config = self.load_config() @@ -271,7 +200,7 @@ class Rhn(RegistrationBase): register_cmd += " --activationkey '%s'" % activationkey # FIXME - support --profilename # FIXME - support --systemorgid - run_command(register_cmd) + rc, stdout, stderr = self.module.run_command(register_command, check_rc=True) def api(self, method, *args): ''' @@ -309,14 +238,14 @@ class Rhn(RegistrationBase): Subscribe to requested yum repositories using 'rhn-channel' command ''' rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password) - (stdout, stderr, rc) = run_command(rhn_channel_cmd + " --available-channels") + rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True) # Enable requested repoid's for wanted_channel in channels: # Each inserted repo regexp will be matched. If no match, no success. for availaible_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end if re.search(wanted_repo, available_channel): - run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel) + rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True) def main(): @@ -379,4 +308,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.redhat import * + main() diff --git a/library/packaging/urpmi b/library/packaging/urpmi index b001ed94dee..72dfef02011 100644 --- a/library/packaging/urpmi +++ b/library/packaging/urpmi @@ -91,7 +91,8 @@ def query_package(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc = os.system("rpm -q %s" % (name)) + cmd = "rpm -q %s" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True else: @@ -103,13 +104,14 @@ def query_package_provides(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc = os.system("rpm -q --provides %s >/dev/null" % (name)) + cmd = "rpm -q --provides %s >/dev/null" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) return rc == 0 def update_package_db(module): - rc = os.system("urpmi.update -a -q") - + cmd = "urpmi.update -a -q" + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="could not update package db") @@ -123,7 +125,8 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc = os.system("%s --auto %s > /dev/null" % (URPME_PATH, package)) + cmd = "%s --auto %s > /dev/null" % (URPME_PATH, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) diff --git a/library/source_control/bzr b/library/source_control/bzr index bc2dfc3089f..5217e469900 100644 --- a/library/source_control/bzr +++ b/library/source_control/bzr @@ -75,16 +75,17 @@ class Bzr(object): self.version = version self.bzr_path = bzr_path - def _command(self, args_list, **kwargs): + def _command(self, args_list, cwd=None, **kwargs): (rc, out, err) = self.module.run_command( - [self.bzr_path] + args_list, **kwargs) + [self.bzr_path] + args_list, cwd=cwd, **kwargs) return (rc, out, err) def get_version(self): '''samples the version of the bzr branch''' - os.chdir(self.dest) + cmd = "%s revno" % self.bzr_path - revno = os.popen(cmd).read().strip() + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + revno = stdout.strip() return revno def clone(self): @@ -94,17 +95,18 @@ class Bzr(object): os.makedirs(dest_dirname) except: pass - os.chdir(dest_dirname) if self.version.lower() != 'head': args_list = ["branch", "-r", self.version, self.parent, self.dest] else: args_list = ["branch", self.parent, self.dest] - return self._command(args_list, check_rc=True) + return self._command(args_list, check_rc=True, cwd=dest_dirname) def has_local_mods(self): - os.chdir(self.dest) + cmd = "%s status -S" % self.bzr_path - lines = os.popen(cmd).read().splitlines() + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + lines = stdout.splitlines() + lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) return len(lines) > 0 @@ -114,30 +116,27 @@ class Bzr(object): Discards any changes to tracked files in the working tree since that commit. ''' - os.chdir(self.dest) if not force and self.has_local_mods(): self.module.fail_json(msg="Local modifications exist in branch (force=no).") - return self._command(["revert"], check_rc=True) + return self._command(["revert"], check_rc=True, cwd=self.dest) def fetch(self): '''updates branch from remote sources''' - os.chdir(self.dest) if self.version.lower() != 'head': - (rc, out, err) = self._command(["pull", "-r", self.version]) + (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) else: - (rc, out, err) = self._command(["pull"]) + (rc, out, err) = self._command(["pull"], cwd=self.dest) if rc != 0: self.module.fail_json(msg="Failed to pull") return (rc, out, err) def switch_version(self): '''once pulled, switch to a particular revno or revid''' - os.chdir(self.dest) if self.version.lower() != 'head': args_list = ["revert", "-r", self.version] else: args_list = ["revert"] - return self._command(args_list, check_rc=True) + return self._command(args_list, check_rc=True, cwd=self.dest) # =========================================== diff --git a/library/source_control/git b/library/source_control/git index ca876c666b5..4f885c94001 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -181,11 +181,12 @@ def set_git_ssh(ssh_wrapper, key_file, ssh_opts): if ssh_opts: os.environ["GIT_SSH_OPTS"] = ssh_opts -def get_version(git_path, dest, ref="HEAD"): +def get_version(module, git_path, dest, ref="HEAD"): ''' samples the version of the git repo ''' - os.chdir(dest) + cmd = "%s rev-parse %s" % (git_path, ref) - sha = os.popen(cmd).read().rstrip("\n") + rc, stdout, stderr = module.run_command(cmd, cwd=dest) + sha = stdout.rstrip('\n') return sha def clone(git_path, module, repo, dest, remote, depth, version, bare, reference): @@ -195,7 +196,6 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, reference) os.makedirs(dest_dirname) except: pass - os.chdir(dest_dirname) cmd = [ git_path, 'clone' ] if bare: cmd.append('--bare') @@ -209,19 +209,19 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, reference) if reference: cmd.extend([ '--reference', str(reference) ]) cmd.extend([ repo, dest ]) - module.run_command(cmd, check_rc=True) + module.run_command(cmd, check_rc=True, cwd=dest_dirname) if bare: - os.chdir(dest) if remote != 'origin': - module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True) + module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) -def has_local_mods(git_path, dest, bare): +def has_local_mods(module, git_path, dest, bare): if bare: return False - os.chdir(dest) - cmd = "%s status -s" % (git_path,) - lines = os.popen(cmd).read().splitlines() - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + + cmd = "%s status -s" % (git_path) + rc, stdout, stderr = module.run_command(cmd, cwd=dest) + lines = stdout.splitlines() + return len(lines) > 0 def reset(git_path, module, dest): @@ -230,16 +230,16 @@ def reset(git_path, module, dest): Discards any changes to tracked files in working tree since that commit. ''' - os.chdir(dest) cmd = "%s reset --hard HEAD" % (git_path,) - return module.run_command(cmd, check_rc=True) + return module.run_command(cmd, check_rc=True, cwd=dest) def get_remote_head(git_path, module, dest, version, remote, bare): cloning = False + cwd = None if remote == module.params['repo']: cloning = True else: - os.chdir(dest) + cwd = dest if version == 'HEAD': if cloning: # cloning the repo, just get the remote's HEAD version @@ -255,7 +255,7 @@ def get_remote_head(git_path, module, dest, version, remote, bare): # appears to be a sha1. return as-is since it appears # cannot check for a specific sha1 on remote return version - (rc, out, err) = module.run_command(cmd, check_rc=True ) + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd) if len(out) < 1: module.fail_json(msg="Could not determine remote revision for %s" % version) rev = out.split()[0] @@ -270,10 +270,9 @@ def is_remote_tag(git_path, module, dest, remote, version): return False def get_branches(git_path, module, dest): - os.chdir(dest) branches = [] cmd = '%s branch -a' % (git_path,) - (rc, out, err) = module.run_command(cmd) + (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Could not determine branch data - received %s" % out) for line in out.split('\n'): @@ -281,10 +280,9 @@ def get_branches(git_path, module, dest): return branches def get_tags(git_path, module, dest): - os.chdir(dest) tags = [] cmd = '%s tag' % (git_path,) - (rc, out, err) = module.run_command(cmd) + (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Could not determine tag data - received %s" % out) for line in out.split('\n'): @@ -352,18 +350,17 @@ def get_head_branch(git_path, module, dest, remote, bare=False): def fetch(git_path, module, repo, dest, version, remote, bare): ''' updates repo from remote sources ''' - os.chdir(dest) if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*']) + (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote)) + (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*']) + (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote)) + (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") (rc, out3, err3) = submodule_update(git_path, module, dest) @@ -371,28 +368,26 @@ def fetch(git_path, module, repo, dest, version, remote, bare): def submodule_update(git_path, module, dest): ''' init and update any submodules ''' - os.chdir(dest) # skip submodule commands if .gitmodules is not present if not os.path.exists(os.path.join(dest, '.gitmodules')): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] - (rc, out, err) = module.run_command(cmd, check_rc=True) + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] - (rc, out, err) = module.run_command(cmd) + (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Failed to init/update submodules") return (rc, out, err) def switch_version(git_path, module, dest, remote, version): ''' once pulled, switch to a particular SHA, tag, or branch ''' - os.chdir(dest) cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): if not is_local_branch(git_path, module, dest, version): cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version) else: - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version)) + (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest) if rc != 0: module.fail_json(msg="Failed to checkout branch %s" % version) cmd = "%s reset --hard %s/%s" % (git_path, remote, version) @@ -400,11 +395,11 @@ def switch_version(git_path, module, dest, remote, version): cmd = "%s checkout --force %s" % (git_path, version) else: branch = get_head_branch(git_path, module, dest, remote) - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch)) + (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest) if rc != 0: module.fail_json(msg="Failed to checkout branch %s" % branch) cmd = "%s reset --hard %s" % (git_path, remote) - (rc, out1, err1) = module.run_command(cmd) + (rc, out1, err1) = module.run_command(cmd, cwd=dest) if rc != 0: if version != 'HEAD': module.fail_json(msg="Failed to checkout %s" % (version)) @@ -484,12 +479,12 @@ def main(): # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo # requested. - before = get_version(git_path, dest) + before = get_version(module, git_path, dest) module.exit_json(changed=False, before=before, after=before) else: # else do a pull - local_mods = has_local_mods(git_path, dest, bare) - before = get_version(git_path, dest) + local_mods = has_local_mods(module, git_path, dest, bare) + before = get_version(module, git_path, dest) if local_mods: # failure should happen regardless of check mode if not force: @@ -519,7 +514,7 @@ def main(): switch_version(git_path, module, dest, remote, version) # determine if we changed anything - after = get_version(git_path, dest) + after = get_version(module, git_path, dest) changed = False if before != after or local_mods: diff --git a/library/system/service b/library/system/service index 2e26a47b636..5180a14d82b 100644 --- a/library/system/service +++ b/library/system/service @@ -207,7 +207,9 @@ class Service(object): os._exit(0) # Start the command - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) + if isinstance(cmd, basestring): + cmd = shlex.split(cmd) + p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) stdout = "" stderr = "" fds = [p.stdout, p.stderr] diff --git a/library/system/setup b/library/system/setup index f140991dc27..c2e9d9792f9 100644 --- a/library/system/setup +++ b/library/system/setup @@ -29,7 +29,6 @@ import socket import struct import datetime import getpass -import subprocess import ConfigParser import StringIO @@ -1432,7 +1431,8 @@ class LinuxNetwork(Network): """ platform = 'Linux' - def __init__(self): + def __init__(self, module): + self.module = module Network.__init__(self) def populate(self): @@ -1618,12 +1618,15 @@ class LinuxNetwork(Network): ips['all_ipv6_addresses'].append(address) ip_path = module.get_bin_path("ip") - primary_data = subprocess.Popen( - [ip_path, 'addr', 'show', 'primary', device], - stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] - secondary_data = subprocess.Popen( - [ip_path, 'addr', 'show', 'secondary', device], - stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] + + args = [ip_path, 'addr', 'show', 'primary', device] + rc, stdout, stderr = self.module.run_command(args) + primary_data = stdout + + args = [ip_path, 'addr', 'show', 'secondary', device] + rc, stdout, stderr = self.module.run_command(args) + secondary_data = stdout + parse_ip_output(primary_data) parse_ip_output(secondary_data, secondary=True) @@ -2283,11 +2286,11 @@ def get_file_content(path, default=None): data = default return data -def ansible_facts(): +def ansible_facts(module): facts = {} facts.update(Facts().populate()) facts.update(Hardware().populate()) - facts.update(Network().populate()) + facts.update(Network(module).populate()) facts.update(Virtual().populate()) return facts @@ -2296,7 +2299,7 @@ def ansible_facts(): def run_setup(module): setup_options = {} - facts = ansible_facts() + facts = ansible_facts(module) for (k, v) in facts.items(): setup_options["ansible_%s" % k.replace('-', '_')] = v diff --git a/library/web_infrastructure/django_manage b/library/web_infrastructure/django_manage index 68eb92c1bfe..b02a9398f52 100644 --- a/library/web_infrastructure/django_manage +++ b/library/web_infrastructure/django_manage @@ -232,7 +232,6 @@ def main(): _ensure_virtualenv(module) - os.chdir(app_path) cmd = "python manage.py %s" % (command, ) if command in noinput_commands: @@ -251,7 +250,7 @@ def main(): if module.params[param]: cmd = '%s %s' % (cmd, module.params[param]) - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command(cmd, cwd=app_path) if rc != 0: if command == 'createcachetable' and 'table' in err and 'already exists' in err: out = 'Already exists.' From a02641c0202a643c9d70475789e3f0d8261ae2ee Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 10 Mar 2014 16:11:24 -0500 Subject: [PATCH 229/772] Force command action to not be executed by the shell unless specifically enabled --- lib/ansible/module_utils/basic.py | 50 +++-- lib/ansible/module_utils/redhat.py | 252 ++++++++++++++++++++++ lib/ansible/runner/lookup_plugins/pipe.py | 2 +- library/cloud/virt | 18 +- library/commands/command | 2 +- library/files/synchronize | 9 +- library/notification/osx_say | 2 - library/packaging/easy_install | 4 +- library/packaging/npm | 5 +- library/packaging/pacman | 12 +- library/packaging/pip | 13 +- library/packaging/redhat_subscription | 81 +++---- library/packaging/rhn_register | 81 +------ library/packaging/urpmi | 13 +- library/source_control/bzr | 29 ++- library/source_control/git | 67 +++--- library/system/service | 4 +- library/system/setup | 25 ++- library/web_infrastructure/django_manage | 3 +- 19 files changed, 427 insertions(+), 245 deletions(-) create mode 100644 lib/ansible/module_utils/redhat.py diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index fd0b2edfc3b..67ceb3b605c 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -990,12 +990,13 @@ class AnsibleModule(object): # rename might not preserve context self.set_context_if_different(dest, context, False) - def run_command(self, args, check_rc=False, close_fds=False, executable=None, data=None, binary_data=False, path_prefix=None): + def run_command(self, args, check_rc=False, close_fds=False, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False): ''' Execute a command, returns rc, stdout, and stderr. args is the command to run If args is a list, the command will be run with shell=False. - Otherwise, the command will be run with shell=True when args is a string. + If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False + If args is a string and use_unsafe_shell=True it run with shell=True. Other arguments: - check_rc (boolean) Whether to call fail_json in case of non zero RC. Default is False. @@ -1004,13 +1005,18 @@ class AnsibleModule(object): - executable (string) See documentation for subprocess.Popen(). Default is None. ''' + + shell = False if isinstance(args, list): - shell = False - elif isinstance(args, basestring): + pass + elif isinstance(args, basestring) and use_unsafe_shell: shell = True + elif isinstance(args, basestring): + args = shlex.split(args) else: msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) + rc = 0 msg = None st_in = None @@ -1022,25 +1028,25 @@ class AnsibleModule(object): if data: st_in = subprocess.PIPE + + kwargs = dict( + executable=executable, + shell=shell, + close_fds=close_fds, + stdin= st_in, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + if path_prefix: + kwargs['env'] = env + if cwd: + kwargs['cwd'] = cwd + + try: - if path_prefix is not None: - cmd = subprocess.Popen(args, - executable=executable, - shell=shell, - close_fds=close_fds, - stdin=st_in, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env) - else: - cmd = subprocess.Popen(args, - executable=executable, - shell=shell, - close_fds=close_fds, - stdin=st_in, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - + cmd = subprocess.Popen(args, **kwargs) + if data: if not binary_data: data += '\\n' diff --git a/lib/ansible/module_utils/redhat.py b/lib/ansible/module_utils/redhat.py new file mode 100644 index 00000000000..a1081f9c8c7 --- /dev/null +++ b/lib/ansible/module_utils/redhat.py @@ -0,0 +1,252 @@ +import os +import re +import types +import ConfigParser +import shlex + + +class RegistrationBase(object): + def __init__(self, module, username=None, password=None): + self.module = module + self.username = username + self.password = password + + def configure(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def enable(self): + # Remove any existing redhat.repo + redhat_repo = '/etc/yum.repos.d/redhat.repo' + if os.path.isfile(redhat_repo): + os.unlink(redhat_repo) + + def register(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unregister(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def unsubscribe(self): + raise NotImplementedError("Must be implemented by a sub-class") + + def update_plugin_conf(self, plugin, enabled=True): + plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin + if os.path.isfile(plugin_conf): + cfg = ConfigParser.ConfigParser() + cfg.read([plugin_conf]) + if enabled: + cfg.set('main', 'enabled', 1) + else: + cfg.set('main', 'enabled', 0) + fd = open(plugin_conf, 'rwa+') + cfg.write(fd) + fd.close() + + def subscribe(self, **kwargs): + raise NotImplementedError("Must be implemented by a sub-class") + + +class Rhsm(RegistrationBase): + def __init__(self, module, username=None, password=None): + RegistrationBase.__init__(self, module, username, password) + self.config = self._read_config() + self.module = module + + def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): + ''' + Load RHSM configuration from /etc/rhsm/rhsm.conf. + Returns: + * ConfigParser object + ''' + + # Read RHSM defaults ... + cp = ConfigParser.ConfigParser() + cp.read(rhsm_conf) + + # Add support for specifying a default value w/o having to standup some configuration + # Yeah, I know this should be subclassed ... but, oh well + def get_option_default(self, key, default=''): + sect, opt = key.split('.', 1) + if self.has_section(sect) and self.has_option(sect, opt): + return self.get(sect, opt) + else: + return default + + cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser) + + return cp + + def enable(self): + ''' + Enable the system to receive updates from subscription-manager. + This involves updating affected yum plugins and removing any + conflicting yum repositories. + ''' + RegistrationBase.enable(self) + self.update_plugin_conf('rhnplugin', False) + self.update_plugin_conf('subscription-manager', True) + + def configure(self, **kwargs): + ''' + Configure the system as directed for registration with RHN + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'config'] + + # Pass supplied **kwargs as parameters to subscription-manager. Ignore + # non-configuration parameters and replace '_' with '.'. For example, + # 'server_hostname' becomes '--system.hostname'. + for k,v in kwargs.items(): + if re.search(r'^(system|rhsm)_', k): + args.append('--%s=%s' % (k.replace('_','.'), v)) + + self.module.run_command(args, check_rc=True) + + @property + def is_registered(self): + ''' + Determine whether the current system + Returns: + * Boolean - whether the current system is currently registered to + RHN. + ''' + # Quick version... + if False: + return os.path.isfile('/etc/pki/consumer/cert.pem') and \ + os.path.isfile('/etc/pki/consumer/key.pem') + + args = ['subscription-manager', 'identity'] + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: + return True + else: + return False + + def register(self, username, password, autosubscribe, activationkey): + ''' + Register the current system to the provided RHN server + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'register'] + + # Generate command arguments + if activationkey: + args.append('--activationkey "%s"' % activationkey) + else: + if autosubscribe: + args.append('--autosubscribe') + if username: + args.extend(['--username', username]) + if password: + args.extend(['--password', password]) + + # Do the needful... + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def unsubscribe(self): + ''' + Unsubscribe a system from all subscribed channels + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'unsubscribe', '--all'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def unregister(self): + ''' + Unregister a currently registered system + Raises: + * Exception - if error occurs while running command + ''' + args = ['subscription-manager', 'unregister'] + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + + def subscribe(self, regexp): + ''' + Subscribe current system to available pools matching the specified + regular expression + Raises: + * Exception - if error occurs while running command + ''' + + # Available pools ready for subscription + available_pools = RhsmPools(self.module) + + for pool in available_pools.filter(regexp): + pool.subscribe() + + +class RhsmPool(object): + ''' + Convenience class for housing subscription information + ''' + + def __init__(self, module, **kwargs): + self.module = module + for k,v in kwargs.items(): + setattr(self, k, v) + + def __str__(self): + return str(self.__getattribute__('_name')) + + def subscribe(self): + args = "subscription-manager subscribe --pool %s" % self.PoolId + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False + + +class RhsmPools(object): + """ + This class is used for manipulating pools subscriptions with RHSM + """ + def __init__(self, module): + self.module = module + self.products = self._load_product_list() + + def __iter__(self): + return self.products.__iter__() + + def _load_product_list(self): + """ + Loads list of all availaible pools for system in data structure + """ + args = "subscription-manager list --available" + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + + products = [] + for line in stdout.split('\n'): + # Remove leading+trailing whitespace + line = line.strip() + # An empty line implies the end of a output group + if len(line) == 0: + continue + # If a colon ':' is found, parse + elif ':' in line: + (key, value) = line.split(':',1) + key = key.strip().replace(" ", "") # To unify + value = value.strip() + if key in ['ProductName', 'SubscriptionName']: + # Remember the name for later processing + products.append(RhsmPool(self.module, _name=value, key=value)) + elif products: + # Associate value with most recently recorded product + products[-1].__setattr__(key, value) + # FIXME - log some warning? + #else: + # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) + return products + + def filter(self, regexp='^$'): + ''' + Return a list of RhsmPools whose name matches the provided regular expression + ''' + r = re.compile(regexp) + for product in self.products: + if r.search(product._name): + yield product + diff --git a/lib/ansible/runner/lookup_plugins/pipe.py b/lib/ansible/runner/lookup_plugins/pipe.py index 4205b887ffe..62ec7e129ed 100644 --- a/lib/ansible/runner/lookup_plugins/pipe.py +++ b/lib/ansible/runner/lookup_plugins/pipe.py @@ -32,7 +32,7 @@ class LookupModule(object): ret = [] for term in terms: - p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + p = subprocess.Popen(term, cwd=self.basedir, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode == 0: ret.append(stdout.decode("utf-8").rstrip()) diff --git a/library/cloud/virt b/library/cloud/virt index 42e99209b14..8cbf7fc895a 100644 --- a/library/cloud/virt +++ b/library/cloud/virt @@ -113,13 +113,14 @@ class VMNotFound(Exception): class LibvirtConnection(object): - def __init__(self, uri): + def __init__(self, uri, module): - cmd = subprocess.Popen("uname -r", shell=True, stdout=subprocess.PIPE, - close_fds=True) - output = cmd.communicate()[0] + self.module = module - if output.find("xen") != -1: + cmd = "uname -r" + rc, stdout, stderr = self.module.run_command(cmd) + + if stdout.find("xen") != -1: conn = libvirt.open(None) else: conn = libvirt.open(uri) @@ -221,11 +222,12 @@ class LibvirtConnection(object): class Virt(object): - def __init__(self, uri): + def __init__(self, uri, module): + self.module = module self.uri = uri def __get_conn(self): - self.conn = LibvirtConnection(self.uri) + self.conn = LibvirtConnection(self.uri, self.module) return self.conn def get_vm(self, vmid): @@ -399,7 +401,7 @@ def core(module): uri = module.params.get('uri', None) xml = module.params.get('xml', None) - v = Virt(uri) + v = Virt(uri, module) res = {} if state and command=='list_vms': diff --git a/library/commands/command b/library/commands/command index 76d2f828d0c..ba9ae30a7f2 100644 --- a/library/commands/command +++ b/library/commands/command @@ -136,7 +136,7 @@ def main(): args = shlex.split(args) startd = datetime.datetime.now() - rc, out, err = module.run_command(args, executable=executable) + rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell) endd = datetime.datetime.now() delta = endd - startd diff --git a/library/files/synchronize b/library/files/synchronize index 493322393bc..eb556c30f53 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -16,8 +16,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import subprocess - DOCUMENTATION = ''' --- module: synchronize @@ -272,6 +270,13 @@ def main(): cmd = cmd + " --rsync-path '%s'" %(rsync_path) changed_marker = '<>' cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" + + # expand the paths + if '@' not in source: + source = os.path.expanduser(source) + if '@' not in dest: + dest = os.path.expanduser(dest) + cmd = ' '.join([cmd, source, dest]) cmdstr = cmd (rc, out, err) = module.run_command(cmd) diff --git a/library/notification/osx_say b/library/notification/osx_say index de5d1917c5f..39e3da88c19 100644 --- a/library/notification/osx_say +++ b/library/notification/osx_say @@ -44,8 +44,6 @@ EXAMPLES = ''' - local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox ''' -import subprocess - DEFAULT_VOICE='Trinoids' def say(module, msg, voice): diff --git a/library/packaging/easy_install b/library/packaging/easy_install index bdacf8e464b..889a81f025a 100644 --- a/library/packaging/easy_install +++ b/library/packaging/easy_install @@ -151,8 +151,8 @@ def main(): command = '%s %s' % (virtualenv, env) if site_packages: command += ' --system-site-packages' - os.chdir(tempfile.gettempdir()) - rc_venv, out_venv, err_venv = module.run_command(command) + cwd = tempfile.gettempdir() + rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) rc += rc_venv out += out_venv diff --git a/library/packaging/npm b/library/packaging/npm index 62179c373aa..c623b6f7e6d 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -125,10 +125,11 @@ class Npm(object): cmd.append(self.name_version) #If path is specified, cd into that path and run the command. + cwd = None if self.path: - os.chdir(self.path) + cwd = self.path - rc, out, err = self.module.run_command(cmd, check_rc=check_rc) + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) return out return '' diff --git a/library/packaging/pacman b/library/packaging/pacman index 3080cb4a607..a4a24ca5fd1 100644 --- a/library/packaging/pacman +++ b/library/packaging/pacman @@ -90,7 +90,8 @@ def query_package(module, name, state="installed"): # pacman -Q returns 0 if the package is installed, # 1 if it is not installed if state == "installed": - rc = os.system("pacman -Q %s" % (name)) + cmd = "pacman -Q %s" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True @@ -99,7 +100,8 @@ def query_package(module, name, state="installed"): def update_package_db(module): - rc = os.system("pacman -Syy > /dev/null") + cmd = "pacman -Syy > /dev/null" + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="could not update package db") @@ -118,7 +120,8 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc = os.system("pacman -%s %s --noconfirm > /dev/null" % (args, package)) + cmd = "pacman -%s %s --noconfirm > /dev/null" % (args, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) @@ -145,7 +148,8 @@ def install_packages(module, packages, package_files): else: params = '-S %s' % package - rc = os.system("pacman %s --noconfirm > /dev/null" % (params)) + cmd = "pacman %s --noconfirm > /dev/null" % (params) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to install %s" % (package)) diff --git a/library/packaging/pip b/library/packaging/pip index 35487c32963..aa55bf8ba0b 100644 --- a/library/packaging/pip +++ b/library/packaging/pip @@ -253,10 +253,10 @@ def main(): cmd = '%s --no-site-packages %s' % (virtualenv, env) else: cmd = '%s %s' % (virtualenv, env) - os.chdir(tempfile.gettempdir()) + this_dir = tempfile.gettempdir() if chdir: - os.chdir(chdir) - rc, out_venv, err_venv = module.run_command(cmd) + this_dir = os.path.join(this_dir, chdir) + rc, out_venv, err_venv = module.run_command(cmd, cwd=this_dir) out += out_venv err += err_venv if rc != 0: @@ -298,10 +298,11 @@ def main(): if module.check_mode: module.exit_json(changed=True) - os.chdir(tempfile.gettempdir()) + this_dir = tempfile.gettempdir() if chdir: - os.chdir(chdir) - rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix) + this_dir = os.path.join(this_dir, chdir) + + rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) out += out_pip err += err_pip if rc == 1 and state == 'absent' and 'not installed' in out_pip: diff --git a/library/packaging/redhat_subscription b/library/packaging/redhat_subscription index e363aa0946a..bb5d655a52f 100644 --- a/library/packaging/redhat_subscription +++ b/library/packaging/redhat_subscription @@ -75,39 +75,13 @@ EXAMPLES = ''' import os import re import types -import subprocess import ConfigParser import shlex -class CommandException(Exception): - pass - - -def run_command(args): - ''' - Convenience method to run a command, specified as a list of arguments. - Returns: - * tuple - (stdout, stder, retcode) - ''' - - # Coerce into a string - if isinstance(args, str): - args = shlex.split(args) - - # Run desired command - proc = subprocess.Popen(args, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - (stdout, stderr) = proc.communicate() - returncode = proc.poll() - if returncode != 0: - cmd = ' '.join(args) - raise CommandException("Command failed (%s): %s\n%s" % (returncode, cmd, stdout)) - return (stdout, stderr, returncode) - - -class RegistrationBase (object): - def __init__(self, username=None, password=None): +class RegistrationBase(object): + def __init__(self, module, username=None, password=None): + self.module = module self.username = username self.password = password @@ -147,9 +121,10 @@ class RegistrationBase (object): class Rhsm(RegistrationBase): - def __init__(self, username=None, password=None): - RegistrationBase.__init__(self, username, password) + def __init__(self, module, username=None, password=None): + RegistrationBase.__init__(self, module, username, password) self.config = self._read_config() + self.module = module def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): ''' @@ -199,8 +174,8 @@ class Rhsm(RegistrationBase): for k,v in kwargs.items(): if re.search(r'^(system|rhsm)_', k): args.append('--%s=%s' % (k.replace('_','.'), v)) - - run_command(args) + + self.module.run_command(args, check_rc=True) @property def is_registered(self): @@ -216,13 +191,11 @@ class Rhsm(RegistrationBase): os.path.isfile('/etc/pki/consumer/key.pem') args = ['subscription-manager', 'identity'] - try: - (stdout, stderr, retcode) = run_command(args) - except CommandException, e: - return False - else: - # Display some debug output + rc, stdout, stderr = self.module.run_command(args, check_rc=False) + if rc == 0: return True + else: + return False def register(self, username, password, autosubscribe, activationkey): ''' @@ -244,7 +217,7 @@ class Rhsm(RegistrationBase): args.extend(['--password', password]) # Do the needful... - run_command(args) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) def unsubscribe(self): ''' @@ -253,7 +226,7 @@ class Rhsm(RegistrationBase): * Exception - if error occurs while running command ''' args = ['subscription-manager', 'unsubscribe', '--all'] - run_command(args) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) def unregister(self): ''' @@ -262,7 +235,7 @@ class Rhsm(RegistrationBase): * Exception - if error occurs while running command ''' args = ['subscription-manager', 'unregister'] - run_command(args) + rc, stderr, stdout = self.module.run_command(args, check_rc=True) def subscribe(self, regexp): ''' @@ -273,7 +246,7 @@ class Rhsm(RegistrationBase): ''' # Available pools ready for subscription - available_pools = RhsmPools() + available_pools = RhsmPools(self.module) for pool in available_pools.filter(regexp): pool.subscribe() @@ -284,7 +257,8 @@ class RhsmPool(object): Convenience class for housing subscription information ''' - def __init__(self, **kwargs): + def __init__(self, module, **kwargs): + self.module = module for k,v in kwargs.items(): setattr(self, k, v) @@ -292,15 +266,20 @@ class RhsmPool(object): return str(self.__getattribute__('_name')) def subscribe(self): - (stdout, stderr, retcode) = run_command("subscription-manager subscribe --pool %s" % self.PoolId) - return True + args = "subscription-manager subscribe --pool %s" % self.PoolId + rc, stdout, stderr = self.module.run_command(args, check_rc=True) + if rc == 0: + return True + else: + return False class RhsmPools(object): """ This class is used for manipulating pools subscriptions with RHSM """ - def __init__(self): + def __init__(self, module): + self.module = module self.products = self._load_product_list() def __iter__(self): @@ -310,7 +289,8 @@ class RhsmPools(object): """ Loads list of all availaible pools for system in data structure """ - (stdout, stderr, retval) = run_command("subscription-manager list --available") + args = "subscription-manager list --available" + rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] for line in stdout.split('\n'): @@ -326,7 +306,7 @@ class RhsmPools(object): value = value.strip() if key in ['ProductName', 'SubscriptionName']: # Remember the name for later processing - products.append(RhsmPool(_name=value, key=value)) + products.append(RhsmPool(self.module, _name=value, key=value)) elif products: # Associate value with most recently recorded product products[-1].__setattr__(key, value) @@ -348,7 +328,7 @@ class RhsmPools(object): def main(): # Load RHSM configuration from file - rhn = Rhsm() + rhn = Rhsm(AnsibleModule()) module = AnsibleModule( argument_spec = dict( @@ -364,6 +344,7 @@ def main(): ) ) + rhn.module = module state = module.params['state'] username = module.params['username'] password = module.params['password'] diff --git a/library/packaging/rhn_register b/library/packaging/rhn_register index 5e8c3718f98..28d91a6a027 100644 --- a/library/packaging/rhn_register +++ b/library/packaging/rhn_register @@ -72,12 +72,7 @@ EXAMPLES = ''' ''' import sys -import os -import re import types -import subprocess -import ConfigParser -import shlex import xmlrpclib import urlparse @@ -90,75 +85,9 @@ except ImportError, e: module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e) -class CommandException(Exception): - pass - - -def run_command(args): - ''' - Convenience method to run a command, specified as a list of arguments. - Returns: - * tuple - (stdout, stder, retcode) - ''' - - # Coerce into a string - if isinstance(args, str): - args = shlex.split(args) - - # Run desired command - proc = subprocess.Popen(args, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - (stdout, stderr) = proc.communicate() - returncode = proc.poll() - if returncode != 0: - cmd = ' '.join(args) - raise CommandException("Command failed (%s): %s\n%s" % (returncode, cmd, stdout)) - return (stdout, stderr, returncode) - - -class RegistrationBase (object): - def __init__(self, username=None, password=None): - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - redhat_repo = '/etc/yum.repos.d/redhat.repo' - if os.path.isfile(redhat_repo): - os.unlink(redhat_repo) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - if os.path.isfile(plugin_conf): - cfg = ConfigParser.ConfigParser() - cfg.read([plugin_conf]) - if enabled: - cfg.set('main', 'enabled', 1) - else: - cfg.set('main', 'enabled', 0) - fd = open(plugin_conf, 'rwa+') - cfg.write(fd) - fd.close() - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - class Rhn(RegistrationBase): - def __init__(self, username=None, password=None): + def __init__(self, module, username=None, password=None): RegistrationBase.__init__(self, username, password) self.config = self.load_config() @@ -271,7 +200,7 @@ class Rhn(RegistrationBase): register_cmd += " --activationkey '%s'" % activationkey # FIXME - support --profilename # FIXME - support --systemorgid - run_command(register_cmd) + rc, stdout, stderr = self.module.run_command(register_command, check_rc=True) def api(self, method, *args): ''' @@ -309,14 +238,14 @@ class Rhn(RegistrationBase): Subscribe to requested yum repositories using 'rhn-channel' command ''' rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password) - (stdout, stderr, rc) = run_command(rhn_channel_cmd + " --available-channels") + rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True) # Enable requested repoid's for wanted_channel in channels: # Each inserted repo regexp will be matched. If no match, no success. for availaible_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end if re.search(wanted_repo, available_channel): - run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel) + rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True) def main(): @@ -379,4 +308,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.redhat import * + main() diff --git a/library/packaging/urpmi b/library/packaging/urpmi index b001ed94dee..72dfef02011 100644 --- a/library/packaging/urpmi +++ b/library/packaging/urpmi @@ -91,7 +91,8 @@ def query_package(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc = os.system("rpm -q %s" % (name)) + cmd = "rpm -q %s" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True else: @@ -103,13 +104,14 @@ def query_package_provides(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc = os.system("rpm -q --provides %s >/dev/null" % (name)) + cmd = "rpm -q --provides %s >/dev/null" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) return rc == 0 def update_package_db(module): - rc = os.system("urpmi.update -a -q") - + cmd = "urpmi.update -a -q" + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="could not update package db") @@ -123,7 +125,8 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc = os.system("%s --auto %s > /dev/null" % (URPME_PATH, package)) + cmd = "%s --auto %s > /dev/null" % (URPME_PATH, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) diff --git a/library/source_control/bzr b/library/source_control/bzr index bc2dfc3089f..5217e469900 100644 --- a/library/source_control/bzr +++ b/library/source_control/bzr @@ -75,16 +75,17 @@ class Bzr(object): self.version = version self.bzr_path = bzr_path - def _command(self, args_list, **kwargs): + def _command(self, args_list, cwd=None, **kwargs): (rc, out, err) = self.module.run_command( - [self.bzr_path] + args_list, **kwargs) + [self.bzr_path] + args_list, cwd=cwd, **kwargs) return (rc, out, err) def get_version(self): '''samples the version of the bzr branch''' - os.chdir(self.dest) + cmd = "%s revno" % self.bzr_path - revno = os.popen(cmd).read().strip() + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + revno = stdout.strip() return revno def clone(self): @@ -94,17 +95,18 @@ class Bzr(object): os.makedirs(dest_dirname) except: pass - os.chdir(dest_dirname) if self.version.lower() != 'head': args_list = ["branch", "-r", self.version, self.parent, self.dest] else: args_list = ["branch", self.parent, self.dest] - return self._command(args_list, check_rc=True) + return self._command(args_list, check_rc=True, cwd=dest_dirname) def has_local_mods(self): - os.chdir(self.dest) + cmd = "%s status -S" % self.bzr_path - lines = os.popen(cmd).read().splitlines() + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + lines = stdout.splitlines() + lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) return len(lines) > 0 @@ -114,30 +116,27 @@ class Bzr(object): Discards any changes to tracked files in the working tree since that commit. ''' - os.chdir(self.dest) if not force and self.has_local_mods(): self.module.fail_json(msg="Local modifications exist in branch (force=no).") - return self._command(["revert"], check_rc=True) + return self._command(["revert"], check_rc=True, cwd=self.dest) def fetch(self): '''updates branch from remote sources''' - os.chdir(self.dest) if self.version.lower() != 'head': - (rc, out, err) = self._command(["pull", "-r", self.version]) + (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) else: - (rc, out, err) = self._command(["pull"]) + (rc, out, err) = self._command(["pull"], cwd=self.dest) if rc != 0: self.module.fail_json(msg="Failed to pull") return (rc, out, err) def switch_version(self): '''once pulled, switch to a particular revno or revid''' - os.chdir(self.dest) if self.version.lower() != 'head': args_list = ["revert", "-r", self.version] else: args_list = ["revert"] - return self._command(args_list, check_rc=True) + return self._command(args_list, check_rc=True, cwd=self.dest) # =========================================== diff --git a/library/source_control/git b/library/source_control/git index ca876c666b5..4f885c94001 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -181,11 +181,12 @@ def set_git_ssh(ssh_wrapper, key_file, ssh_opts): if ssh_opts: os.environ["GIT_SSH_OPTS"] = ssh_opts -def get_version(git_path, dest, ref="HEAD"): +def get_version(module, git_path, dest, ref="HEAD"): ''' samples the version of the git repo ''' - os.chdir(dest) + cmd = "%s rev-parse %s" % (git_path, ref) - sha = os.popen(cmd).read().rstrip("\n") + rc, stdout, stderr = module.run_command(cmd, cwd=dest) + sha = stdout.rstrip('\n') return sha def clone(git_path, module, repo, dest, remote, depth, version, bare, reference): @@ -195,7 +196,6 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, reference) os.makedirs(dest_dirname) except: pass - os.chdir(dest_dirname) cmd = [ git_path, 'clone' ] if bare: cmd.append('--bare') @@ -209,19 +209,19 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, reference) if reference: cmd.extend([ '--reference', str(reference) ]) cmd.extend([ repo, dest ]) - module.run_command(cmd, check_rc=True) + module.run_command(cmd, check_rc=True, cwd=dest_dirname) if bare: - os.chdir(dest) if remote != 'origin': - module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True) + module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) -def has_local_mods(git_path, dest, bare): +def has_local_mods(module, git_path, dest, bare): if bare: return False - os.chdir(dest) - cmd = "%s status -s" % (git_path,) - lines = os.popen(cmd).read().splitlines() - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + + cmd = "%s status -s" % (git_path) + rc, stdout, stderr = module.run_command(cmd, cwd=dest) + lines = stdout.splitlines() + return len(lines) > 0 def reset(git_path, module, dest): @@ -230,16 +230,16 @@ def reset(git_path, module, dest): Discards any changes to tracked files in working tree since that commit. ''' - os.chdir(dest) cmd = "%s reset --hard HEAD" % (git_path,) - return module.run_command(cmd, check_rc=True) + return module.run_command(cmd, check_rc=True, cwd=dest) def get_remote_head(git_path, module, dest, version, remote, bare): cloning = False + cwd = None if remote == module.params['repo']: cloning = True else: - os.chdir(dest) + cwd = dest if version == 'HEAD': if cloning: # cloning the repo, just get the remote's HEAD version @@ -255,7 +255,7 @@ def get_remote_head(git_path, module, dest, version, remote, bare): # appears to be a sha1. return as-is since it appears # cannot check for a specific sha1 on remote return version - (rc, out, err) = module.run_command(cmd, check_rc=True ) + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd) if len(out) < 1: module.fail_json(msg="Could not determine remote revision for %s" % version) rev = out.split()[0] @@ -270,10 +270,9 @@ def is_remote_tag(git_path, module, dest, remote, version): return False def get_branches(git_path, module, dest): - os.chdir(dest) branches = [] cmd = '%s branch -a' % (git_path,) - (rc, out, err) = module.run_command(cmd) + (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Could not determine branch data - received %s" % out) for line in out.split('\n'): @@ -281,10 +280,9 @@ def get_branches(git_path, module, dest): return branches def get_tags(git_path, module, dest): - os.chdir(dest) tags = [] cmd = '%s tag' % (git_path,) - (rc, out, err) = module.run_command(cmd) + (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Could not determine tag data - received %s" % out) for line in out.split('\n'): @@ -352,18 +350,17 @@ def get_head_branch(git_path, module, dest, remote, bare=False): def fetch(git_path, module, repo, dest, version, remote, bare): ''' updates repo from remote sources ''' - os.chdir(dest) if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*']) + (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote)) + (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*']) + (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote)) + (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") (rc, out3, err3) = submodule_update(git_path, module, dest) @@ -371,28 +368,26 @@ def fetch(git_path, module, repo, dest, version, remote, bare): def submodule_update(git_path, module, dest): ''' init and update any submodules ''' - os.chdir(dest) # skip submodule commands if .gitmodules is not present if not os.path.exists(os.path.join(dest, '.gitmodules')): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] - (rc, out, err) = module.run_command(cmd, check_rc=True) + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] - (rc, out, err) = module.run_command(cmd) + (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Failed to init/update submodules") return (rc, out, err) def switch_version(git_path, module, dest, remote, version): ''' once pulled, switch to a particular SHA, tag, or branch ''' - os.chdir(dest) cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): if not is_local_branch(git_path, module, dest, version): cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version) else: - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version)) + (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest) if rc != 0: module.fail_json(msg="Failed to checkout branch %s" % version) cmd = "%s reset --hard %s/%s" % (git_path, remote, version) @@ -400,11 +395,11 @@ def switch_version(git_path, module, dest, remote, version): cmd = "%s checkout --force %s" % (git_path, version) else: branch = get_head_branch(git_path, module, dest, remote) - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch)) + (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest) if rc != 0: module.fail_json(msg="Failed to checkout branch %s" % branch) cmd = "%s reset --hard %s" % (git_path, remote) - (rc, out1, err1) = module.run_command(cmd) + (rc, out1, err1) = module.run_command(cmd, cwd=dest) if rc != 0: if version != 'HEAD': module.fail_json(msg="Failed to checkout %s" % (version)) @@ -484,12 +479,12 @@ def main(): # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo # requested. - before = get_version(git_path, dest) + before = get_version(module, git_path, dest) module.exit_json(changed=False, before=before, after=before) else: # else do a pull - local_mods = has_local_mods(git_path, dest, bare) - before = get_version(git_path, dest) + local_mods = has_local_mods(module, git_path, dest, bare) + before = get_version(module, git_path, dest) if local_mods: # failure should happen regardless of check mode if not force: @@ -519,7 +514,7 @@ def main(): switch_version(git_path, module, dest, remote, version) # determine if we changed anything - after = get_version(git_path, dest) + after = get_version(module, git_path, dest) changed = False if before != after or local_mods: diff --git a/library/system/service b/library/system/service index 2e26a47b636..5180a14d82b 100644 --- a/library/system/service +++ b/library/system/service @@ -207,7 +207,9 @@ class Service(object): os._exit(0) # Start the command - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) + if isinstance(cmd, basestring): + cmd = shlex.split(cmd) + p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1])) stdout = "" stderr = "" fds = [p.stdout, p.stderr] diff --git a/library/system/setup b/library/system/setup index 941a5dcd31a..1c156f6ce34 100644 --- a/library/system/setup +++ b/library/system/setup @@ -29,7 +29,6 @@ import socket import struct import datetime import getpass -import subprocess import ConfigParser import StringIO @@ -1430,7 +1429,8 @@ class LinuxNetwork(Network): """ platform = 'Linux' - def __init__(self): + def __init__(self, module): + self.module = module Network.__init__(self) def populate(self): @@ -1616,12 +1616,15 @@ class LinuxNetwork(Network): ips['all_ipv6_addresses'].append(address) ip_path = module.get_bin_path("ip") - primary_data = subprocess.Popen( - [ip_path, 'addr', 'show', 'primary', device], - stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] - secondary_data = subprocess.Popen( - [ip_path, 'addr', 'show', 'secondary', device], - stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] + + args = [ip_path, 'addr', 'show', 'primary', device] + rc, stdout, stderr = self.module.run_command(args) + primary_data = stdout + + args = [ip_path, 'addr', 'show', 'secondary', device] + rc, stdout, stderr = self.module.run_command(args) + secondary_data = stdout + parse_ip_output(primary_data) parse_ip_output(secondary_data, secondary=True) @@ -2281,11 +2284,11 @@ def get_file_content(path, default=None): data = default return data -def ansible_facts(): +def ansible_facts(module): facts = {} facts.update(Facts().populate()) facts.update(Hardware().populate()) - facts.update(Network().populate()) + facts.update(Network(module).populate()) facts.update(Virtual().populate()) return facts @@ -2294,7 +2297,7 @@ def ansible_facts(): def run_setup(module): setup_options = {} - facts = ansible_facts() + facts = ansible_facts(module) for (k, v) in facts.items(): setup_options["ansible_%s" % k.replace('-', '_')] = v diff --git a/library/web_infrastructure/django_manage b/library/web_infrastructure/django_manage index 68eb92c1bfe..b02a9398f52 100644 --- a/library/web_infrastructure/django_manage +++ b/library/web_infrastructure/django_manage @@ -232,7 +232,6 @@ def main(): _ensure_virtualenv(module) - os.chdir(app_path) cmd = "python manage.py %s" % (command, ) if command in noinput_commands: @@ -251,7 +250,7 @@ def main(): if module.params[param]: cmd = '%s %s' % (cmd, module.params[param]) - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command(cmd, cwd=app_path) if rc != 0: if command == 'createcachetable' and 'table' in err and 'already exists' in err: out = 'Already exists.' From 0d6f6ad282fd0d3fb581da9dcbafd33521eb67be Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 10 Mar 2014 16:15:44 -0500 Subject: [PATCH 230/772] Implement new default cipher class AES256 --- bin/ansible-vault | 8 +- lib/ansible/utils/vault.py | 247 +++++++++++++++--- test/units/TestVault.py | 55 ++-- test/units/TestVaultEditor.py | 141 ++++++++++ .../units/vault_test_data/foo-ansible-1.0.yml | 4 + .../units/vault_test_data/foo-ansible-1.1.yml | 6 + 6 files changed, 410 insertions(+), 51 deletions(-) create mode 100644 test/units/TestVaultEditor.py create mode 100644 test/units/vault_test_data/foo-ansible-1.0.yml create mode 100644 test/units/vault_test_data/foo-ansible-1.1.yml diff --git a/bin/ansible-vault b/bin/ansible-vault index 902653d40bf..2c8094d13b1 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -52,7 +52,7 @@ def build_option_parser(action): sys.exit() # options for all actions - #parser.add_option('-c', '--cipher', dest='cipher', default="AES", help="cipher to use") + #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use") parser.add_option('--debug', dest='debug', action="store_true", help="debug") parser.add_option('--vault-password-file', dest='password_file', help="vault password file") @@ -119,7 +119,7 @@ def execute_create(args, options, parser): else: password = _read_password(options.password_file) - cipher = 'AES' + cipher = 'AES256' if hasattr(options, 'cipher'): cipher = options.cipher @@ -133,7 +133,7 @@ def execute_decrypt(args, options, parser): else: password = _read_password(options.password_file) - cipher = 'AES' + cipher = 'AES256' if hasattr(options, 'cipher'): cipher = options.cipher @@ -169,7 +169,7 @@ def execute_encrypt(args, options, parser): else: password = _read_password(options.password_file) - cipher = 'AES' + cipher = 'AES256' if hasattr(options, 'cipher'): cipher = options.cipher diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 9a43fee1b92..169dc8333b8 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -30,6 +30,22 @@ from binascii import hexlify from binascii import unhexlify from ansible import constants as C +from Crypto.Hash import SHA256, HMAC + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + # AES IMPORTS try: from Crypto.Cipher import AES as AES @@ -37,15 +53,17 @@ try: except ImportError: HAS_AES = False +CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: rpm -e --nodeps python-crypto; pip install pycrypto" + HEADER='$ANSIBLE_VAULT' -CIPHER_WHITELIST=['AES'] +CIPHER_WHITELIST=['AES', 'AES256'] class VaultLib(object): def __init__(self, password): self.password = password self.cipher_name = None - self.version = '1.0' + self.version = '1.1' def is_encrypted(self, data): if data.startswith(HEADER): @@ -59,7 +77,8 @@ class VaultLib(object): raise errors.AnsibleError("data is already encrypted") if not self.cipher_name: - raise errors.AnsibleError("the cipher must be set before encrypting data") + self.cipher_name = "AES256" + #raise errors.AnsibleError("the cipher must be set before encrypting data") if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: cipher = globals()['Vault' + self.cipher_name] @@ -67,13 +86,17 @@ class VaultLib(object): else: raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + """ # combine sha + data this_sha = sha256(data).hexdigest() tmp_data = this_sha + "\n" + data + """ + # encrypt sha + data - tmp_data = this_cipher.encrypt(tmp_data, self.password) + enc_data = this_cipher.encrypt(data, self.password) + # add header - tmp_data = self._add_headers_and_hexify_encrypted_data(tmp_data) + tmp_data = self._add_header(enc_data) return tmp_data def decrypt(self, data): @@ -83,8 +106,9 @@ class VaultLib(object): if not self.is_encrypted(data): raise errors.AnsibleError("data is not encrypted") - # clean out header, hex and sha - data = self._split_headers_and_get_unhexified_data(data) + # clean out header + data = self._split_header(data) + # create the cipher object if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: @@ -96,33 +120,26 @@ class VaultLib(object): # try to unencrypt data data = this_cipher.decrypt(data, self.password) - # split out sha and verify decryption - split_data = data.split("\n") - this_sha = split_data[0] - this_data = '\n'.join(split_data[1:]) - test_sha = sha256(this_data).hexdigest() - if this_sha != test_sha: - raise errors.AnsibleError("Decryption failed") - - return this_data + return data - def _add_headers_and_hexify_encrypted_data(self, data): - # combine header and hexlified encrypted data in 80 char columns + def _add_header(self, data): + # combine header and encrypted data in 80 char columns - tmpdata = hexlify(data) - tmpdata = [tmpdata[i:i+80] for i in range(0, len(tmpdata), 80)] + #tmpdata = hexlify(data) + tmpdata = [data[i:i+80] for i in range(0, len(data), 80)] if not self.cipher_name: raise errors.AnsibleError("the cipher must be set before adding a header") dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n" + for l in tmpdata: dirty_data += l + '\n' return dirty_data - def _split_headers_and_get_unhexified_data(self, data): + def _split_header(self, data): # used by decrypt tmpdata = data.split('\n') @@ -130,14 +147,22 @@ class VaultLib(object): self.version = str(tmpheader[1].strip()) self.cipher_name = str(tmpheader[2].strip()) - clean_data = ''.join(tmpdata[1:]) + clean_data = '\n'.join(tmpdata[1:]) + """ # strip out newline, join, unhex clean_data = [ x.strip() for x in clean_data ] clean_data = unhexlify(''.join(clean_data)) + """ return clean_data + def __enter__(self): + return self + + def __exit__(self, *err): + pass + class VaultEditor(object): # uses helper methods for write_file(self, filename, data) # to write a file so that code isn't duplicated for simple @@ -153,6 +178,9 @@ class VaultEditor(object): def create_file(self): """ create a new encrypted file """ + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + if os.path.isfile(self.filename): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) @@ -166,6 +194,10 @@ class VaultEditor(object): self.write_data(enc_data, self.filename) def decrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) @@ -179,6 +211,9 @@ class VaultEditor(object): def edit_file(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + # decrypt to tmpfile tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) @@ -191,9 +226,11 @@ class VaultEditor(object): call([EDITOR, tmp_path]) new_data = self.read_data(tmp_path) - # create new vault and set cipher to old + # create new vault new_vault = VaultLib(self.password) - new_vault.cipher_name = this_vault.cipher_name + + # we want the cipher to default to AES256 + #new_vault.cipher_name = this_vault.cipher_name # encrypt new data a write out to tmp enc_data = new_vault.encrypt(new_data) @@ -203,6 +240,10 @@ class VaultEditor(object): self.shuffle_files(tmp_path, self.filename) def encrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) @@ -216,14 +257,20 @@ class VaultEditor(object): raise errors.AnsibleError("%s is already encrypted" % self.filename) def rekey_file(self, new_password): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + # decrypt tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) - # create new vault, set cipher to old and password to new + # create new vault new_vault = VaultLib(new_password) - new_vault.cipher_name = this_vault.cipher_name + + # we want to force cipher to the default + #new_vault.cipher_name = this_vault.cipher_name # re-encrypt data and re-write file enc_data = new_vault.encrypt(dec_data) @@ -254,11 +301,14 @@ class VaultEditor(object): class VaultAES(object): + # this version has been obsoleted by the VaultAES256 class + # which uses encrypt-then-mac (fixing order) and also improving the KDF used + # code remains for upgrade purposes only # http://stackoverflow.com/a/16761459 def __init__(self): if not HAS_AES: - raise errors.AnsibleError("pycrypto is not installed. Fix this with your package manager, for instance, yum-install python-crypto OR (apt equivalent)") + raise errors.AnsibleError(CRYPTO_UPGRADE) def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): @@ -278,7 +328,12 @@ class VaultAES(object): """ Read plaintext data from in_file and write encrypted to out_file """ - in_file = BytesIO(data) + + # combine sha + data + this_sha = sha256(data).hexdigest() + tmp_data = this_sha + "\n" + data + + in_file = BytesIO(tmp_data) in_file.seek(0) out_file = BytesIO() @@ -301,14 +356,21 @@ class VaultAES(object): out_file.write(cipher.encrypt(chunk)) out_file.seek(0) - return out_file.read() + enc_data = out_file.read() + tmp_data = hexlify(enc_data) + return tmp_data + + def decrypt(self, data, password, key_length=32): """ Read encrypted data from in_file and write decrypted to out_file """ # http://stackoverflow.com/a/14989032 + data = ''.join(data.split('\n')) + data = unhexlify(data) + in_file = BytesIO(data) in_file.seek(0) out_file = BytesIO() @@ -330,6 +392,129 @@ class VaultAES(object): # reset the stream pointer to the beginning out_file.seek(0) - return out_file.read() + new_data = out_file.read() + + # split out sha and verify decryption + split_data = new_data.split("\n") + this_sha = split_data[0] + this_data = '\n'.join(split_data[1:]) + test_sha = sha256(this_data).hexdigest() + + if this_sha != test_sha: + raise errors.AnsibleError("Decryption failed") + + #return out_file.read() + return this_data + + +class VaultAES256(object): + + """ + Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. + Keys are derived using PBKDF2 + """ + + # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html + + def gen_key_initctr(self, password, salt): + # 16 for AES 128, 32 for AES256 + keylength = 32 + + # match the size used for counter.new to avoid extra work + ivlength = 16 + + hash_function = SHA256 + + # make two keys and one iv + pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest() + + if not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, + count=10000, prf=pbkdf2_prf) + + #import epdb; epdb.st() + key1 = derivedkey[:keylength] + key2 = derivedkey[keylength:(keylength * 2)] + iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength] + + return key1, key2, hexlify(iv) + + + def encrypt(self, data, password): + + salt = os.urandom(32) + key1, key2, iv = self.gen_key_initctr(password, salt) + + # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3 + bs = AES.block_size + padding_length = (bs - len(data) % bs) or bs + data += padding_length * chr(padding_length) + + # COUNTER.new PARAMETERS + # 1) nbits (integer) - Length of the counter, in bits. + # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr + + if not HAS_COUNTER: + raise errors.AnsibleError(CRYPTO_UPGRADE) + ctr = Counter.new(128, initial_value=long(iv, 16)) + + # AES.new PARAMETERS + # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr + # 2) MODE_CTR, is the recommended mode + # 3) counter= + + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # ENCRYPT PADDED DATA + cryptedData = cipher.encrypt(data) + + # COMBINE SALT, DIGEST AND DATA + hmac = HMAC.new(key2, cryptedData, SHA256) + message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) ) + message = hexlify(message) + return message + + def decrypt(self, data, password): + + # SPLIT SALT, DIGEST, AND DATA + data = ''.join(data.split("\n")) + data = unhexlify(data) + salt, cryptedHmac, cryptedData = data.split("\n", 2) + salt = unhexlify(salt) + cryptedData = unhexlify(cryptedData) + + key1, key2, iv = self.gen_key_initctr(password, salt) + + # EXIT EARLY IF DIGEST DOESN'T MATCH + hmacDecrypt = HMAC.new(key2, cryptedData, SHA256) + if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()): + return None + + # SET THE COUNTER AND THE CIPHER + if not HAS_COUNTER: + raise errors.AnsibleError(CRYPTO_UPGRADE) + ctr = Counter.new(128, initial_value=long(iv, 16)) + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # DECRYPT PADDED DATA + decryptedData = cipher.decrypt(cryptedData) + + # UNPAD DATA + padding_length = ord(decryptedData[-1]) + decryptedData = decryptedData[:-padding_length] + + return decryptedData + + def is_equal(self, a, b): + # http://codahale.com/a-lesson-in-timing-attacks/ + if len(a) != len(b): + return False + + result = 0 + for x, y in zip(a, b): + result |= ord(x) ^ ord(y) + return result == 0 + - diff --git a/test/units/TestVault.py b/test/units/TestVault.py index bcb494965cf..415d5c14aa8 100644 --- a/test/units/TestVault.py +++ b/test/units/TestVault.py @@ -12,6 +12,21 @@ from nose.plugins.skip import SkipTest from ansible import errors from ansible.utils.vault import VaultLib + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + # AES IMPORTS try: from Crypto.Cipher import AES as AES @@ -26,8 +41,8 @@ class TestVaultLib(TestCase): slots = ['is_encrypted', 'encrypt', 'decrypt', - '_add_headers_and_hexify_encrypted_data', - '_split_headers_and_get_unhexified_data',] + '_add_header', + '_split_header',] for slot in slots: assert hasattr(v, slot), "VaultLib is missing the %s method" % slot @@ -41,8 +56,7 @@ class TestVaultLib(TestCase): v = VaultLib('ansible') v.cipher_name = "TEST" sensitive_data = "ansible" - sensitive_hex = hexlify(sensitive_data) - data = v._add_headers_and_hexify_encrypted_data(sensitive_data) + data = v._add_header(sensitive_data) lines = data.split('\n') assert len(lines) > 1, "failed to properly add header" header = lines[0] @@ -52,19 +66,18 @@ class TestVaultLib(TestCase): assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT" assert header_parts[1] == v.version, "header version is incorrect" assert header_parts[2] == 'TEST', "header does end with cipher name" - assert lines[1] == sensitive_hex - def test_remove_header(self): + def test_split_header(self): v = VaultLib('ansible') - data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible") - rdata = v._split_headers_and_get_unhexified_data(data) + data = "$ANSIBLE_VAULT;9.9;TEST\nansible" + rdata = v._split_header(data) lines = rdata.split('\n') assert lines[0] == "ansible" assert v.cipher_name == 'TEST', "cipher name was not set" assert v.version == "9.9" - def test_encyrpt_decrypt(self): - if not HAS_AES: + def test_encrypt_decrypt_aes(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = 'AES' @@ -73,8 +86,18 @@ class TestVaultLib(TestCase): assert enc_data != "foobar", "encryption failed" assert dec_data == "foobar", "decryption failed" + def test_encrypt_decrypt_aes256(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + v = VaultLib('ansible') + v.cipher_name = 'AES256' + enc_data = v.encrypt("foobar") + dec_data = v.decrypt(enc_data) + assert enc_data != "foobar", "encryption failed" + assert dec_data == "foobar", "decryption failed" + def test_encrypt_encrypted(self): - if not HAS_AES: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = 'AES' @@ -87,7 +110,7 @@ class TestVaultLib(TestCase): assert error_hit, "No error was thrown when trying to encrypt data with a header" def test_decrypt_decrypted(self): - if not HAS_AES: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') data = "ansible" @@ -99,7 +122,8 @@ class TestVaultLib(TestCase): assert error_hit, "No error was thrown when trying to decrypt data without a header" def test_cipher_not_set(self): - if not HAS_AES: + # not setting the cipher should default to AES256 + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') data = "ansible" @@ -108,6 +132,5 @@ class TestVaultLib(TestCase): enc_data = v.encrypt(data) except errors.AnsibleError, e: error_hit = True - assert error_hit, "No error was thrown when trying to encrypt data without the cipher set" - - + assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set" + assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name diff --git a/test/units/TestVaultEditor.py b/test/units/TestVaultEditor.py new file mode 100644 index 00000000000..4d3f99e89a9 --- /dev/null +++ b/test/units/TestVaultEditor.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +from unittest import TestCase +import getpass +import os +import shutil +import time +import tempfile +from binascii import unhexlify +from binascii import hexlify +from nose.plugins.skip import SkipTest + +from ansible import errors +from ansible.utils.vault import VaultLib +from ansible.utils.vault import VaultEditor + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + +# AES IMPORTS +try: + from Crypto.Cipher import AES as AES + HAS_AES = True +except ImportError: + HAS_AES = False + +class TestVaultEditor(TestCase): + + def test_methods_exist(self): + v = VaultEditor(None, None, None) + slots = ['create_file', + 'decrypt_file', + 'edit_file', + 'encrypt_file', + 'rekey_file', + 'read_data', + 'write_data', + 'shuffle_files'] + for slot in slots: + assert hasattr(v, slot), "VaultLib is missing the %s method" % slot + + def test_decrypt_1_0(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + dirpath = tempfile.mkdtemp() + filename = os.path.join(dirpath, "foo-ansible-1.0.yml") + shutil.rmtree(dirpath) + shutil.copytree("vault_test_data", dirpath) + ve = VaultEditor(None, "ansible", filename) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file() + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(filename, "rb") + fdata = f.read() + f.close() + + shutil.rmtree(dirpath) + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + + def test_decrypt_1_1(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + dirpath = tempfile.mkdtemp() + filename = os.path.join(dirpath, "foo-ansible-1.1.yml") + shutil.rmtree(dirpath) + shutil.copytree("vault_test_data", dirpath) + ve = VaultEditor(None, "ansible", filename) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file() + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(filename, "rb") + fdata = f.read() + f.close() + + shutil.rmtree(dirpath) + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + + + def test_rekey_migration(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + dirpath = tempfile.mkdtemp() + filename = os.path.join(dirpath, "foo-ansible-1.0.yml") + shutil.rmtree(dirpath) + shutil.copytree("vault_test_data", dirpath) + ve = VaultEditor(None, "ansible", filename) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.rekey_file('ansible2') + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(filename, "rb") + fdata = f.read() + f.close() + + shutil.rmtree(dirpath) + assert error_hit == False, "error rekeying 1.0 file to 1.1" + + # ensure filedata can be decrypted, is 1.1 and is AES256 + vl = VaultLib("ansible2") + dec_data = None + error_hit = False + try: + dec_data = vl.decrypt(fdata) + except errors.AnsibleError, e: + error_hit = True + + assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name + assert error_hit == False, "error decrypting migrated 1.0 file" + assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data + + diff --git a/test/units/vault_test_data/foo-ansible-1.0.yml b/test/units/vault_test_data/foo-ansible-1.0.yml new file mode 100644 index 00000000000..f71ddf10cee --- /dev/null +++ b/test/units/vault_test_data/foo-ansible-1.0.yml @@ -0,0 +1,4 @@ +$ANSIBLE_VAULT;1.0;AES +53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9 +9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1 +83c62ffb04c2512995e815de4b4d29ed diff --git a/test/units/vault_test_data/foo-ansible-1.1.yml b/test/units/vault_test_data/foo-ansible-1.1.yml new file mode 100644 index 00000000000..d9a4a448a66 --- /dev/null +++ b/test/units/vault_test_data/foo-ansible-1.1.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +62303130653266653331306264616235333735323636616539316433666463323964623162386137 +3961616263373033353631316333623566303532663065310a393036623466376263393961326530 +64336561613965383835646464623865663966323464653236343638373165343863623638316664 +3631633031323837340a396530313963373030343933616133393566366137363761373930663833 +3739 From 2d478b16279fa2d6eda1c8ebd5a1881b4172f69c Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 10 Mar 2014 16:15:44 -0500 Subject: [PATCH 231/772] Implement new default cipher class AES256 --- bin/ansible-vault | 8 +- lib/ansible/utils/vault.py | 247 +++++++++++++++--- test/units/TestVault.py | 56 ++-- test/units/TestVaultEditor.py | 141 ++++++++++ .../units/vault_test_data/foo-ansible-1.0.yml | 4 + .../units/vault_test_data/foo-ansible-1.1.yml | 6 + 6 files changed, 410 insertions(+), 52 deletions(-) create mode 100644 test/units/TestVaultEditor.py create mode 100644 test/units/vault_test_data/foo-ansible-1.0.yml create mode 100644 test/units/vault_test_data/foo-ansible-1.1.yml diff --git a/bin/ansible-vault b/bin/ansible-vault index 902653d40bf..2c8094d13b1 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -52,7 +52,7 @@ def build_option_parser(action): sys.exit() # options for all actions - #parser.add_option('-c', '--cipher', dest='cipher', default="AES", help="cipher to use") + #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use") parser.add_option('--debug', dest='debug', action="store_true", help="debug") parser.add_option('--vault-password-file', dest='password_file', help="vault password file") @@ -119,7 +119,7 @@ def execute_create(args, options, parser): else: password = _read_password(options.password_file) - cipher = 'AES' + cipher = 'AES256' if hasattr(options, 'cipher'): cipher = options.cipher @@ -133,7 +133,7 @@ def execute_decrypt(args, options, parser): else: password = _read_password(options.password_file) - cipher = 'AES' + cipher = 'AES256' if hasattr(options, 'cipher'): cipher = options.cipher @@ -169,7 +169,7 @@ def execute_encrypt(args, options, parser): else: password = _read_password(options.password_file) - cipher = 'AES' + cipher = 'AES256' if hasattr(options, 'cipher'): cipher = options.cipher diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 9a43fee1b92..169dc8333b8 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -30,6 +30,22 @@ from binascii import hexlify from binascii import unhexlify from ansible import constants as C +from Crypto.Hash import SHA256, HMAC + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + # AES IMPORTS try: from Crypto.Cipher import AES as AES @@ -37,15 +53,17 @@ try: except ImportError: HAS_AES = False +CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: rpm -e --nodeps python-crypto; pip install pycrypto" + HEADER='$ANSIBLE_VAULT' -CIPHER_WHITELIST=['AES'] +CIPHER_WHITELIST=['AES', 'AES256'] class VaultLib(object): def __init__(self, password): self.password = password self.cipher_name = None - self.version = '1.0' + self.version = '1.1' def is_encrypted(self, data): if data.startswith(HEADER): @@ -59,7 +77,8 @@ class VaultLib(object): raise errors.AnsibleError("data is already encrypted") if not self.cipher_name: - raise errors.AnsibleError("the cipher must be set before encrypting data") + self.cipher_name = "AES256" + #raise errors.AnsibleError("the cipher must be set before encrypting data") if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: cipher = globals()['Vault' + self.cipher_name] @@ -67,13 +86,17 @@ class VaultLib(object): else: raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + """ # combine sha + data this_sha = sha256(data).hexdigest() tmp_data = this_sha + "\n" + data + """ + # encrypt sha + data - tmp_data = this_cipher.encrypt(tmp_data, self.password) + enc_data = this_cipher.encrypt(data, self.password) + # add header - tmp_data = self._add_headers_and_hexify_encrypted_data(tmp_data) + tmp_data = self._add_header(enc_data) return tmp_data def decrypt(self, data): @@ -83,8 +106,9 @@ class VaultLib(object): if not self.is_encrypted(data): raise errors.AnsibleError("data is not encrypted") - # clean out header, hex and sha - data = self._split_headers_and_get_unhexified_data(data) + # clean out header + data = self._split_header(data) + # create the cipher object if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: @@ -96,33 +120,26 @@ class VaultLib(object): # try to unencrypt data data = this_cipher.decrypt(data, self.password) - # split out sha and verify decryption - split_data = data.split("\n") - this_sha = split_data[0] - this_data = '\n'.join(split_data[1:]) - test_sha = sha256(this_data).hexdigest() - if this_sha != test_sha: - raise errors.AnsibleError("Decryption failed") - - return this_data + return data - def _add_headers_and_hexify_encrypted_data(self, data): - # combine header and hexlified encrypted data in 80 char columns + def _add_header(self, data): + # combine header and encrypted data in 80 char columns - tmpdata = hexlify(data) - tmpdata = [tmpdata[i:i+80] for i in range(0, len(tmpdata), 80)] + #tmpdata = hexlify(data) + tmpdata = [data[i:i+80] for i in range(0, len(data), 80)] if not self.cipher_name: raise errors.AnsibleError("the cipher must be set before adding a header") dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n" + for l in tmpdata: dirty_data += l + '\n' return dirty_data - def _split_headers_and_get_unhexified_data(self, data): + def _split_header(self, data): # used by decrypt tmpdata = data.split('\n') @@ -130,14 +147,22 @@ class VaultLib(object): self.version = str(tmpheader[1].strip()) self.cipher_name = str(tmpheader[2].strip()) - clean_data = ''.join(tmpdata[1:]) + clean_data = '\n'.join(tmpdata[1:]) + """ # strip out newline, join, unhex clean_data = [ x.strip() for x in clean_data ] clean_data = unhexlify(''.join(clean_data)) + """ return clean_data + def __enter__(self): + return self + + def __exit__(self, *err): + pass + class VaultEditor(object): # uses helper methods for write_file(self, filename, data) # to write a file so that code isn't duplicated for simple @@ -153,6 +178,9 @@ class VaultEditor(object): def create_file(self): """ create a new encrypted file """ + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + if os.path.isfile(self.filename): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) @@ -166,6 +194,10 @@ class VaultEditor(object): self.write_data(enc_data, self.filename) def decrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) @@ -179,6 +211,9 @@ class VaultEditor(object): def edit_file(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + # decrypt to tmpfile tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) @@ -191,9 +226,11 @@ class VaultEditor(object): call([EDITOR, tmp_path]) new_data = self.read_data(tmp_path) - # create new vault and set cipher to old + # create new vault new_vault = VaultLib(self.password) - new_vault.cipher_name = this_vault.cipher_name + + # we want the cipher to default to AES256 + #new_vault.cipher_name = this_vault.cipher_name # encrypt new data a write out to tmp enc_data = new_vault.encrypt(new_data) @@ -203,6 +240,10 @@ class VaultEditor(object): self.shuffle_files(tmp_path, self.filename) def encrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) @@ -216,14 +257,20 @@ class VaultEditor(object): raise errors.AnsibleError("%s is already encrypted" % self.filename) def rekey_file(self, new_password): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + # decrypt tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) - # create new vault, set cipher to old and password to new + # create new vault new_vault = VaultLib(new_password) - new_vault.cipher_name = this_vault.cipher_name + + # we want to force cipher to the default + #new_vault.cipher_name = this_vault.cipher_name # re-encrypt data and re-write file enc_data = new_vault.encrypt(dec_data) @@ -254,11 +301,14 @@ class VaultEditor(object): class VaultAES(object): + # this version has been obsoleted by the VaultAES256 class + # which uses encrypt-then-mac (fixing order) and also improving the KDF used + # code remains for upgrade purposes only # http://stackoverflow.com/a/16761459 def __init__(self): if not HAS_AES: - raise errors.AnsibleError("pycrypto is not installed. Fix this with your package manager, for instance, yum-install python-crypto OR (apt equivalent)") + raise errors.AnsibleError(CRYPTO_UPGRADE) def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): @@ -278,7 +328,12 @@ class VaultAES(object): """ Read plaintext data from in_file and write encrypted to out_file """ - in_file = BytesIO(data) + + # combine sha + data + this_sha = sha256(data).hexdigest() + tmp_data = this_sha + "\n" + data + + in_file = BytesIO(tmp_data) in_file.seek(0) out_file = BytesIO() @@ -301,14 +356,21 @@ class VaultAES(object): out_file.write(cipher.encrypt(chunk)) out_file.seek(0) - return out_file.read() + enc_data = out_file.read() + tmp_data = hexlify(enc_data) + return tmp_data + + def decrypt(self, data, password, key_length=32): """ Read encrypted data from in_file and write decrypted to out_file """ # http://stackoverflow.com/a/14989032 + data = ''.join(data.split('\n')) + data = unhexlify(data) + in_file = BytesIO(data) in_file.seek(0) out_file = BytesIO() @@ -330,6 +392,129 @@ class VaultAES(object): # reset the stream pointer to the beginning out_file.seek(0) - return out_file.read() + new_data = out_file.read() + + # split out sha and verify decryption + split_data = new_data.split("\n") + this_sha = split_data[0] + this_data = '\n'.join(split_data[1:]) + test_sha = sha256(this_data).hexdigest() + + if this_sha != test_sha: + raise errors.AnsibleError("Decryption failed") + + #return out_file.read() + return this_data + + +class VaultAES256(object): + + """ + Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. + Keys are derived using PBKDF2 + """ + + # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html + + def gen_key_initctr(self, password, salt): + # 16 for AES 128, 32 for AES256 + keylength = 32 + + # match the size used for counter.new to avoid extra work + ivlength = 16 + + hash_function = SHA256 + + # make two keys and one iv + pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest() + + if not HAS_PBKDF2: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, + count=10000, prf=pbkdf2_prf) + + #import epdb; epdb.st() + key1 = derivedkey[:keylength] + key2 = derivedkey[keylength:(keylength * 2)] + iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength] + + return key1, key2, hexlify(iv) + + + def encrypt(self, data, password): + + salt = os.urandom(32) + key1, key2, iv = self.gen_key_initctr(password, salt) + + # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3 + bs = AES.block_size + padding_length = (bs - len(data) % bs) or bs + data += padding_length * chr(padding_length) + + # COUNTER.new PARAMETERS + # 1) nbits (integer) - Length of the counter, in bits. + # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr + + if not HAS_COUNTER: + raise errors.AnsibleError(CRYPTO_UPGRADE) + ctr = Counter.new(128, initial_value=long(iv, 16)) + + # AES.new PARAMETERS + # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr + # 2) MODE_CTR, is the recommended mode + # 3) counter= + + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # ENCRYPT PADDED DATA + cryptedData = cipher.encrypt(data) + + # COMBINE SALT, DIGEST AND DATA + hmac = HMAC.new(key2, cryptedData, SHA256) + message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) ) + message = hexlify(message) + return message + + def decrypt(self, data, password): + + # SPLIT SALT, DIGEST, AND DATA + data = ''.join(data.split("\n")) + data = unhexlify(data) + salt, cryptedHmac, cryptedData = data.split("\n", 2) + salt = unhexlify(salt) + cryptedData = unhexlify(cryptedData) + + key1, key2, iv = self.gen_key_initctr(password, salt) + + # EXIT EARLY IF DIGEST DOESN'T MATCH + hmacDecrypt = HMAC.new(key2, cryptedData, SHA256) + if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()): + return None + + # SET THE COUNTER AND THE CIPHER + if not HAS_COUNTER: + raise errors.AnsibleError(CRYPTO_UPGRADE) + ctr = Counter.new(128, initial_value=long(iv, 16)) + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # DECRYPT PADDED DATA + decryptedData = cipher.decrypt(cryptedData) + + # UNPAD DATA + padding_length = ord(decryptedData[-1]) + decryptedData = decryptedData[:-padding_length] + + return decryptedData + + def is_equal(self, a, b): + # http://codahale.com/a-lesson-in-timing-attacks/ + if len(a) != len(b): + return False + + result = 0 + for x, y in zip(a, b): + result |= ord(x) ^ ord(y) + return result == 0 + - diff --git a/test/units/TestVault.py b/test/units/TestVault.py index f42188057f8..415d5c14aa8 100644 --- a/test/units/TestVault.py +++ b/test/units/TestVault.py @@ -12,6 +12,21 @@ from nose.plugins.skip import SkipTest from ansible import errors from ansible.utils.vault import VaultLib + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + # AES IMPORTS try: from Crypto.Cipher import AES as AES @@ -26,8 +41,8 @@ class TestVaultLib(TestCase): slots = ['is_encrypted', 'encrypt', 'decrypt', - '_add_headers_and_hexify_encrypted_data', - '_split_headers_and_get_unhexified_data',] + '_add_header', + '_split_header',] for slot in slots: assert hasattr(v, slot), "VaultLib is missing the %s method" % slot @@ -41,9 +56,7 @@ class TestVaultLib(TestCase): v = VaultLib('ansible') v.cipher_name = "TEST" sensitive_data = "ansible" - sensitive_hex = hexlify(sensitive_data) - data = v._add_headers_and_hexify_encrypted_data(sensitive_data) - open("/tmp/awx.log", "a").write("data: %s\n" % data) + data = v._add_header(sensitive_data) lines = data.split('\n') assert len(lines) > 1, "failed to properly add header" header = lines[0] @@ -53,19 +66,18 @@ class TestVaultLib(TestCase): assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT" assert header_parts[1] == v.version, "header version is incorrect" assert header_parts[2] == 'TEST', "header does end with cipher name" - assert lines[1] == sensitive_hex - def test_remove_header(self): + def test_split_header(self): v = VaultLib('ansible') - data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible") - rdata = v._split_headers_and_get_unhexified_data(data) + data = "$ANSIBLE_VAULT;9.9;TEST\nansible" + rdata = v._split_header(data) lines = rdata.split('\n') assert lines[0] == "ansible" assert v.cipher_name == 'TEST', "cipher name was not set" assert v.version == "9.9" - def test_encyrpt_decrypt(self): - if not HAS_AES: + def test_encrypt_decrypt_aes(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = 'AES' @@ -74,8 +86,18 @@ class TestVaultLib(TestCase): assert enc_data != "foobar", "encryption failed" assert dec_data == "foobar", "decryption failed" + def test_encrypt_decrypt_aes256(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + v = VaultLib('ansible') + v.cipher_name = 'AES256' + enc_data = v.encrypt("foobar") + dec_data = v.decrypt(enc_data) + assert enc_data != "foobar", "encryption failed" + assert dec_data == "foobar", "decryption failed" + def test_encrypt_encrypted(self): - if not HAS_AES: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = 'AES' @@ -88,7 +110,7 @@ class TestVaultLib(TestCase): assert error_hit, "No error was thrown when trying to encrypt data with a header" def test_decrypt_decrypted(self): - if not HAS_AES: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') data = "ansible" @@ -100,7 +122,8 @@ class TestVaultLib(TestCase): assert error_hit, "No error was thrown when trying to decrypt data without a header" def test_cipher_not_set(self): - if not HAS_AES: + # not setting the cipher should default to AES256 + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') data = "ansible" @@ -109,6 +132,5 @@ class TestVaultLib(TestCase): enc_data = v.encrypt(data) except errors.AnsibleError, e: error_hit = True - assert error_hit, "No error was thrown when trying to encrypt data without the cipher set" - - + assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set" + assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name diff --git a/test/units/TestVaultEditor.py b/test/units/TestVaultEditor.py new file mode 100644 index 00000000000..4d3f99e89a9 --- /dev/null +++ b/test/units/TestVaultEditor.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +from unittest import TestCase +import getpass +import os +import shutil +import time +import tempfile +from binascii import unhexlify +from binascii import hexlify +from nose.plugins.skip import SkipTest + +from ansible import errors +from ansible.utils.vault import VaultLib +from ansible.utils.vault import VaultEditor + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + +# AES IMPORTS +try: + from Crypto.Cipher import AES as AES + HAS_AES = True +except ImportError: + HAS_AES = False + +class TestVaultEditor(TestCase): + + def test_methods_exist(self): + v = VaultEditor(None, None, None) + slots = ['create_file', + 'decrypt_file', + 'edit_file', + 'encrypt_file', + 'rekey_file', + 'read_data', + 'write_data', + 'shuffle_files'] + for slot in slots: + assert hasattr(v, slot), "VaultLib is missing the %s method" % slot + + def test_decrypt_1_0(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + dirpath = tempfile.mkdtemp() + filename = os.path.join(dirpath, "foo-ansible-1.0.yml") + shutil.rmtree(dirpath) + shutil.copytree("vault_test_data", dirpath) + ve = VaultEditor(None, "ansible", filename) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file() + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(filename, "rb") + fdata = f.read() + f.close() + + shutil.rmtree(dirpath) + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + + def test_decrypt_1_1(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + dirpath = tempfile.mkdtemp() + filename = os.path.join(dirpath, "foo-ansible-1.1.yml") + shutil.rmtree(dirpath) + shutil.copytree("vault_test_data", dirpath) + ve = VaultEditor(None, "ansible", filename) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file() + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(filename, "rb") + fdata = f.read() + f.close() + + shutil.rmtree(dirpath) + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + + + def test_rekey_migration(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + dirpath = tempfile.mkdtemp() + filename = os.path.join(dirpath, "foo-ansible-1.0.yml") + shutil.rmtree(dirpath) + shutil.copytree("vault_test_data", dirpath) + ve = VaultEditor(None, "ansible", filename) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.rekey_file('ansible2') + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(filename, "rb") + fdata = f.read() + f.close() + + shutil.rmtree(dirpath) + assert error_hit == False, "error rekeying 1.0 file to 1.1" + + # ensure filedata can be decrypted, is 1.1 and is AES256 + vl = VaultLib("ansible2") + dec_data = None + error_hit = False + try: + dec_data = vl.decrypt(fdata) + except errors.AnsibleError, e: + error_hit = True + + assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name + assert error_hit == False, "error decrypting migrated 1.0 file" + assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data + + diff --git a/test/units/vault_test_data/foo-ansible-1.0.yml b/test/units/vault_test_data/foo-ansible-1.0.yml new file mode 100644 index 00000000000..f71ddf10cee --- /dev/null +++ b/test/units/vault_test_data/foo-ansible-1.0.yml @@ -0,0 +1,4 @@ +$ANSIBLE_VAULT;1.0;AES +53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9 +9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1 +83c62ffb04c2512995e815de4b4d29ed diff --git a/test/units/vault_test_data/foo-ansible-1.1.yml b/test/units/vault_test_data/foo-ansible-1.1.yml new file mode 100644 index 00000000000..d9a4a448a66 --- /dev/null +++ b/test/units/vault_test_data/foo-ansible-1.1.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +62303130653266653331306264616235333735323636616539316433666463323964623162386137 +3961616263373033353631316333623566303532663065310a393036623466376263393961326530 +64336561613965383835646464623865663966323464653236343638373165343863623638316664 +3631633031323837340a396530313963373030343933616133393566366137363761373930663833 +3739 From 2fa6110961613b40906614abf5f3d83d23c081ae Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 10 Mar 2014 17:23:37 -0400 Subject: [PATCH 232/772] Update the message about pycrypto to include that python-devel must be installed. --- lib/ansible/utils/vault.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 169dc8333b8..118f5790054 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -53,7 +53,7 @@ try: except ImportError: HAS_AES = False -CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: rpm -e --nodeps python-crypto; pip install pycrypto" +CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto" HEADER='$ANSIBLE_VAULT' CIPHER_WHITELIST=['AES', 'AES256'] From 43203bac5669d4b8cd07bd5f0cf80672fd60907a Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 10 Mar 2014 17:23:37 -0400 Subject: [PATCH 233/772] Update the message about pycrypto to include that python-devel must be installed. --- lib/ansible/utils/vault.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 169dc8333b8..118f5790054 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -53,7 +53,7 @@ try: except ImportError: HAS_AES = False -CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: rpm -e --nodeps python-crypto; pip install pycrypto" +CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto" HEADER='$ANSIBLE_VAULT' CIPHER_WHITELIST=['AES', 'AES256'] From 6607010b2be7ac624d0dbf6933ced47922461638 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 10 Mar 2014 16:49:45 -0500 Subject: [PATCH 234/772] Fix ad-hoc command examples --- docsite/rst/playbooks_best_practices.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index fbe34ca344e..298b832ff0a 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -223,8 +223,8 @@ What about just the first 10, and then the next 10?:: And of course just basic ad-hoc stuff is also possible.:: - ansible -i production -m ping - ansible -i production -m command -a '/sbin/reboot' --limit boston + ansible boston -i production -m ping + ansible boston -i production -m command -a '/sbin/reboot' And there are some useful commands to know (at least in 1.1 and higher):: From fb14b53130194809a942a3d26cf8496c6d8b2c27 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Mon, 10 Mar 2014 15:09:06 -0700 Subject: [PATCH 235/772] Document the module_lang option. Closes #6169. --- docsite/rst/intro_configuration.rst | 7 +++++++ examples/ansible.cfg | 1 + 2 files changed, 8 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 450ca91aba2..0c25297cf35 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -310,6 +310,13 @@ different locations:: Most users will not need to use this feature. See :doc:`developing_plugins` for more details +.. _module_lang: + +module_lang +=========== + +This is to set the default language to communicate between the module and the system. By default, the value is 'C'. + .. _module_name: module_name diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 5b23e101269..f4f4385c4b9 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -22,6 +22,7 @@ sudo_user = root #ask_pass = True transport = smart remote_port = 22 +module_lang = C # additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles From 16c05cbc8892041cacba3ff87c86e68b86b4511b Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Mon, 10 Mar 2014 17:40:36 -0500 Subject: [PATCH 236/772] Update files for 1.5.1 release. --- CHANGELOG.md | 7 +++++++ RELEASES.txt | 1 + VERSION | 2 +- docsite/rst/index.rst | 2 +- lib/ansible/__init__.py | 2 +- packaging/debian/changelog | 8 +++++++- packaging/rpm/ansible.spec | 7 +++++-- 7 files changed, 23 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ff78020e64..0ab6ae6955f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,13 @@ Ansible Changes By Release ========================== +## 1.5.1 "Love Walks In" - March 10, 2014 + +- Force command action to not be executed by the shell unless specifically enabled. +- Validate SSL certs accessed through urllib*. +- Implement new default cipher class AES256 in ansible-vault. +- Misc bug fixes. + ## 1.5 "Love Walks In" - February 28, 2014 Major features/changes: diff --git a/RELEASES.txt b/RELEASES.txt index 63358298818..680d313329a 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -2,6 +2,7 @@ Ansible Releases at a Glance ============================ 1.6 "The Cradle Will Rock" - NEXT +1.5.1 "Love Walks In" -------- 03-10-2014 1.5 "Love Walks In" -------- 02-28-2014 1.4.5 "Could This Be Magic?" - 02-12-2014 1.4.4 "Could This Be Magic?" - 01-06-2014 diff --git a/VERSION b/VERSION index c239c60cba2..26ca594609a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.5 +1.5.1 diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index d507fda1640..5f6ca7d63b6 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu Ansible manages machines in an agentless manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.4.5) and also some development version features (1.5). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.5.1) and also some development version features (1.6.0). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index bad06025a12..663f95768ea 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -14,5 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -__version__ = '1.5' +__version__ = '1.5.1' __author__ = 'Michael DeHaan' diff --git a/packaging/debian/changelog b/packaging/debian/changelog index c009bebb376..a65eaff5eb0 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -1,8 +1,14 @@ +ansible (1.5.1) unstable; urgency=low + + * 1.5.1 release + + -- Michael DeHaan Mon, 10 March 2014 17:33:44 -0500 + ansible (1.5) unstable; urgency=low * 1.5 release - -- Michael DeHaan Fri, 28 February 2014 -0500 + -- Michael DeHaan Fri, 28 February 2014 00:00:00 -0500 ansible (1.4.5) unstable; urgency=low diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index c067bbe42e9..3028da5c054 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -5,7 +5,7 @@ %endif Name: %{name} -Version: 1.5 +Version: 1.5.1 Release: 1%{?dist} Url: http://www.ansible.com Summary: SSH-based application deployment, configuration management, and IT orchestration platform @@ -102,7 +102,10 @@ rm -rf %{buildroot} %changelog -* Fri Feb 28 2014 Michael DeHaan - 1.5-0 +* Fri Mar 10 2014 Michael DeHaan - 1.5.1 +- Release 1.5.1 + +* Fri Feb 28 2014 Michael DeHaan - 1.5.0 - Release 1.5.0 * Wed Feb 12 2014 Michael DeHaan - 1.4.5 From 0f962dba14d1d51c6546a2065417958048bd98b9 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 10 Mar 2014 19:37:34 -0400 Subject: [PATCH 237/772] Missing comma in argument spec. --- library/monitoring/airbrake_deployment | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/monitoring/airbrake_deployment b/library/monitoring/airbrake_deployment index 6a83459906a..55d6017e4ea 100644 --- a/library/monitoring/airbrake_deployment +++ b/library/monitoring/airbrake_deployment @@ -51,7 +51,7 @@ options: description: - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. required: false - default: https://airbrake.io/deploys + default: "https://airbrake.io/deploys" validate_certs: description: - If C(no), SSL certificates for the target url will not be validated. This should only be used @@ -84,7 +84,7 @@ def main(): user=dict(required=False), repo=dict(required=False), revision=dict(required=False), - url=dict(required=False, default='https://api.airbrake.io/deploys.txt') + url=dict(required=False, default='https://api.airbrake.io/deploys.txt'), validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True From 6740a1f342a894a0c7cdb0fab434f1bceaf03583 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 10 Mar 2014 19:43:39 -0400 Subject: [PATCH 238/772] Wrap crypto.hash imports with try/except --- lib/ansible/utils/vault.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 118f5790054..6a714fcc85d 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -30,7 +30,11 @@ from binascii import hexlify from binascii import unhexlify from ansible import constants as C -from Crypto.Hash import SHA256, HMAC +try: + from Crypto.Hash import SHA256, HMAC + HAS_HASH = True +except ImportError: + HAS_HASH = False # Counter import fails for 2.0.1, requires >= 2.6.1 from pip try: From cc1537e4ccaec4a354ae3b755f5e3626031577c1 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 10 Mar 2014 21:32:27 -0400 Subject: [PATCH 239/772] Remove remnant merge conflict lines from ansible.spec --- packaging/rpm/ansible.spec | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 6be398ef100..71004b652e7 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -102,16 +102,14 @@ rm -rf %{buildroot} %changelog -<<<<<<< HEAD * Thu Feb 28 2014 Michael DeHaan - 1.6-0 * (PENDING) -======= + * Fri Mar 10 2014 Michael DeHaan - 1.5.1 - Release 1.5.1 * Fri Feb 28 2014 Michael DeHaan - 1.5.0 - Release 1.5.0 ->>>>>>> 16c05cbc8892041cacba3ff87c86e68b86b4511b * Thu Feb 28 2014 Michael DeHaan - 1.5-0 * Release 1.5 From 7f028c101c33b0385e4b5d798d3234121d689c09 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 10 Mar 2014 21:35:43 -0400 Subject: [PATCH 240/772] Fix changelog dates in ansible.spec --- packaging/rpm/ansible.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 71004b652e7..ad34053f3a9 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -102,10 +102,10 @@ rm -rf %{buildroot} %changelog -* Thu Feb 28 2014 Michael DeHaan - 1.6-0 +* Thu Mar 11 2014 Michael DeHaan - 1.6-0 * (PENDING) -* Fri Mar 10 2014 Michael DeHaan - 1.5.1 +* Mon Mar 10 2014 Michael DeHaan - 1.5.1 - Release 1.5.1 * Fri Feb 28 2014 Michael DeHaan - 1.5.0 From 29c60bdaffe8cb19eb8ac8fc7e2e12097dfeebdc Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Tue, 11 Mar 2014 12:20:58 +0100 Subject: [PATCH 241/772] group_vars plugin: do not parse hidden files in subfolders, e.g. avoid .svn/ --- lib/ansible/inventory/vars_plugins/group_vars.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/vars_plugins/group_vars.py b/lib/ansible/inventory/vars_plugins/group_vars.py index 3421565a5fb..6be1d1f6edd 100644 --- a/lib/ansible/inventory/vars_plugins/group_vars.py +++ b/lib/ansible/inventory/vars_plugins/group_vars.py @@ -123,7 +123,8 @@ def _load_vars_from_folder(folder_path, results): # filesystem lists them. names.sort() - paths = [os.path.join(folder_path, name) for name in names] + # do not parse hidden files or dirs, e.g. .svn/ + paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')] for path in paths: _found, results = _load_vars_from_path(path, results) return results From 6b1cb22fc31d85a3e9e9bbf315ae959d05623427 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Tue, 11 Mar 2014 12:49:54 +0100 Subject: [PATCH 242/772] Allow hash_behaviour=merge to be respected in core inventory (For now, this means, enable it also for inventory scripts) --- lib/ansible/inventory/host.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 19b919ac66d..1b3c10f9d4e 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -16,6 +16,7 @@ # along with Ansible. If not, see . import ansible.constants as C +from ansible import utils class Host(object): ''' a single ansible host ''' @@ -56,7 +57,7 @@ class Host(object): results = {} groups = self.get_groups() for group in sorted(groups, key=lambda g: g.depth): - results.update(group.get_variables()) + results = utils.combine_vars(results, group.get_variables()) results.update(self.vars) results['inventory_hostname'] = self.name results['inventory_hostname_short'] = self.name.split('.')[0] From c09d4b1c7a8494f961ae9e071f89c95a4a944002 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 11 Mar 2014 08:39:28 -0400 Subject: [PATCH 243/772] Update apt module so that environment variables are set correctly since not going through shell. Very sorry folks, it will be addressed. --- library/packaging/apt | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/library/packaging/apt b/library/packaging/apt index f143c8f7b73..f11ab73de19 100644 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -138,7 +138,11 @@ import datetime import fnmatch # APT related constants -APT_ENVVARS = "DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical" +APT_ENV_VARS = dict( + DEBIAN_FRONTEND = 'noninteractive', + DEBIAN_PRIORITY = 'critical' +) + DPKG_OPTIONS = 'force-confdef,force-confold' APT_GET_ZERO = "0 upgraded, 0 newly installed" APTITUDE_ZERO = "0 packages upgraded, 0 newly installed" @@ -260,7 +264,10 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, else: check_arg = '' - cmd = "%s %s -y %s %s %s install %s" % (APT_ENVVARS, APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) + for (k,v) in APT_ENV_VARS.iteritems(): + os.environ[k] = v + + cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) @@ -292,7 +299,11 @@ def remove(m, pkgspec, cache, purge=False, purge = '--purge' else: purge = '' - cmd = "%s %s -q -y %s %s remove %s" % (APT_ENVVARS, APT_GET_CMD, dpkg_options, purge, packages) + + for (k,v) in APT_ENV_VARS.iteritems(): + os.environ[k] = v + + cmd = "%s -q -y %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, packages) if m.check_mode: m.exit_json(changed=True) From 7345495655caa18dd84280d5dbbcb2574b0ec3ac Mon Sep 17 00:00:00 2001 From: James Laska Date: Tue, 11 Mar 2014 09:21:33 -0400 Subject: [PATCH 244/772] Add generated test files to .gitignore --- .gitignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index 4f12e1da9a0..d2275493129 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,8 @@ debian/ *.swp *.swo credentials.yml +# test output +.coverage +results.xml +coverage.xml +/test/units/cover-html From 297b048d0ede80b2119ef5d174c98e6531b53b50 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 11 Mar 2014 10:47:53 -0400 Subject: [PATCH 245/772] Fix other reference to APT_ENV_VARS. --- library/packaging/apt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/library/packaging/apt b/library/packaging/apt index f11ab73de19..311bfa1199b 100644 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -343,7 +343,11 @@ def upgrade(m, mode="yes", force=False, force_yes = '' apt_cmd_path = m.get_bin_path(apt_cmd, required=True) - cmd = '%s %s -y %s %s %s %s' % (APT_ENVVARS, apt_cmd_path, dpkg_options, + + for (k,v) in APT_ENV_VARS.iteritems(): + os.environ[k] = v + + cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, check_arg, upgrade_command) rc, out, err = m.run_command(cmd) if rc: From 10f852a807aabd8c53a7b4d6680d6e044eb3fba7 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 11 Mar 2014 11:22:17 -0400 Subject: [PATCH 246/772] Update version_added for wait_timeout parameter on elb module. --- library/cloud/ec2_elb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index 159e2b1a043..c7a4e0d3b3f 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -87,7 +87,7 @@ options: - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. required: false default: 0 - version_added: "1.5" + version_added: "1.6" """ From cd3fcd843ace98507f1da21fd7c136da05de4c58 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 11:44:47 -0400 Subject: [PATCH 247/772] Set version-added for new parameters in ec2_elb_lb and elasticache --- library/cloud/ec2_elb_lb | 1 + library/cloud/elasticache | 1 + 2 files changed, 2 insertions(+) diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb index d6ac9918cc8..ca3adc31931 100644 --- a/library/cloud/ec2_elb_lb +++ b/library/cloud/ec2_elb_lb @@ -56,6 +56,7 @@ options: - A list of security groups to apply to the elb require: false default: None + version_added: "1.6" health_check: description: - An associative array of health check configuration settigs (see example) diff --git a/library/cloud/elasticache b/library/cloud/elasticache index a32f58b19ef..c506b4719c8 100644 --- a/library/cloud/elasticache +++ b/library/cloud/elasticache @@ -63,6 +63,7 @@ options: - A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc required: false default: ['default'] + version_added: "1.6" cache_security_groups: description: - A list of cache security group names to associate with this cache cluster From 8ca3bb413708d7cb33230ce02c05266900f6b9ea Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 11 Mar 2014 10:48:16 -0500 Subject: [PATCH 248/772] Updating profile/security_token version_added labels --- library/cloud/ec2 | 4 ++-- library/cloud/ec2_ami | 4 ++-- library/cloud/ec2_eip | 4 ++-- library/cloud/ec2_group | 4 ++-- library/cloud/ec2_key | 4 ++-- library/cloud/ec2_snapshot | 4 ++-- library/cloud/ec2_tag | 4 ++-- library/cloud/ec2_vol | 4 ++-- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index ba37c3cc3f0..23ec3eabffd 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -226,14 +226,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] author: Seth Vidal, Tim Gerla, Lester Wade diff --git a/library/cloud/ec2_ami b/library/cloud/ec2_ami index 94c1e864a85..446c7417e01 100644 --- a/library/cloud/ec2_ami +++ b/library/cloud/ec2_ami @@ -115,14 +115,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] author: Evan Duffield diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip index 4d6d24eaa34..7eac9798417 100644 --- a/library/cloud/ec2_eip +++ b/library/cloud/ec2_eip @@ -67,14 +67,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] author: Lorin Hochstein diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index 5d72c009acc..1dd463cc8d6 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -71,14 +71,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] ''' diff --git a/library/cloud/ec2_key b/library/cloud/ec2_key index 6523c70e95c..289deb6c9d6 100644 --- a/library/cloud/ec2_key +++ b/library/cloud/ec2_key @@ -62,14 +62,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] author: Vincent Viallet diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index 81cf3554b3d..e637ebefa38 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -76,14 +76,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] author: Will Thames diff --git a/library/cloud/ec2_tag b/library/cloud/ec2_tag index c9de5155853..92af644933e 100644 --- a/library/cloud/ec2_tag +++ b/library/cloud/ec2_tag @@ -73,14 +73,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] author: Lester Wade diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index fef476a2165..faacc82da8f 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -96,14 +96,14 @@ options: required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" security_token: description: - security token to authenticate against AWS required: false default: null aliases: [] - version_added: "1.5" + version_added: "1.6" requirements: [ "boto" ] author: Lester Wade From f0704159ae52ce35a8a01b8cf17970d859d57f5f Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 12:01:57 -0400 Subject: [PATCH 249/772] Update docstrings in ec2_vol --- library/cloud/ec2_vol | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index 42ec3cb8732..176fb30ec2e 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -52,12 +52,14 @@ options: required: false default: null aliases: [] + version_added: "1.6" id: description: - volume id if you wish to attach an existing volume (requires instance) required: false default: null aliases: [] + version_added: "1.6" volume_size: description: - size of volume (in GB) to create. @@ -158,9 +160,10 @@ EXAMPLES = ''' with_items: ec2.instances register: ec2_vol -# Idempotent playbook example combined with single instance launch -# Volume must exist in the same zone; will not do anything if it is -# already attached. +# Example: Launch an instance and then add a volue if not already present +# * Nothing will happen if the volume is already attached. +# * Volume must exist in the same zone. + - local_action: module: ec2 keypair: "{{ keypair }}" From 2b84a40d8ef63977a307e5b2bec8206a05b9794e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 11 Mar 2014 11:12:23 -0500 Subject: [PATCH 250/772] Renaming reuse parameter in ec2_eip and adding a version_added string --- library/cloud/ec2_eip | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip index 6d8f2e0d3ad..e6ecf091a48 100644 --- a/library/cloud/ec2_eip +++ b/library/cloud/ec2_eip @@ -75,11 +75,12 @@ options: default: null aliases: [] version_added: "1.6" - reuse: + reuse_existing_ip_allowed: description: - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. required: false default: false + version_added: "1.6" requirements: [ "boto" ] author: Lorin Hochstein @@ -194,13 +195,13 @@ def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): return False -def allocate_address(ec2, domain, module, reuse): +def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): """ Allocate a new elastic IP address (when needed) and return it """ # If we're in check mode, nothing else to do if module.check_mode: module.exit_json(change=True) - if reuse: + if reuse_existing_ip_allowed: if domain: domain_filter = { 'domain' : domain } else: @@ -258,7 +259,7 @@ def main(): state = dict(required=False, default='present', choices=['present', 'absent']), in_vpc = dict(required=False, choices=BOOLEANS, default=False), - reuse = dict(required=False, choices=BOOLEANS, default=False), + reuse_existing_ip_allowed = dict(required=False, type='bool', default=False), ) ) @@ -277,19 +278,19 @@ def main(): state = module.params.get('state') in_vpc = module.params.get('in_vpc') domain = "vpc" if in_vpc else None - reuse = module.params.get('reuse'); + reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed'); if state == 'present': if public_ip is None: if instance_id is None: - address = allocate_address(ec2, domain, module, reuse) + address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed) module.exit_json(changed=True, public_ip=address.public_ip) else: # Determine if the instance is inside a VPC or not instance = find_instance(ec2, instance_id, module) if instance.vpc_id != None: domain = "vpc" - address = allocate_address(ec2, domain, module, reuse) + address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed) else: address = find_address(ec2, public_ip, module) associate_ip_and_instance(ec2, address, instance_id, module) From 351ebd0b43c315bc6921017ea503579dca7327db Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 12:14:04 -0400 Subject: [PATCH 251/772] Update docstrings in ec2_ami_search --- library/cloud/ec2_ami_search | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/ec2_ami_search b/library/cloud/ec2_ami_search index 0bb3fab0078..e3b75257c35 100644 --- a/library/cloud/ec2_ami_search +++ b/library/cloud/ec2_ami_search @@ -20,10 +20,12 @@ DOCUMENTATION = ''' --- module: ec2_ami_search short_description: Retrieve AWS AMI for a given operating system. +version_added: "1.6" description: - Look up the most recent AMI on AWS for a given operating system. - Returns C(ami), C(aki), C(ari), C(serial), C(tag) - If there is no AKI or ARI associated with an image, these will be C(null). + - Only supports images from cloud-images.ubuntu.com - Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"}) options: distro: From 3d07db26e377a63189342ddf3e4cbe7b9a04e429 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 12:17:29 -0400 Subject: [PATCH 252/772] Update docstring in digital_ocean for backups_enabled version_added --- library/cloud/digital_ocean | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index 8a6e76fcf00..62b8709e39a 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -78,7 +78,7 @@ options: backups_enabled: description: - Optional, Boolean, enables backups for your droplet. - version_added: "1.5" + version_added: "1.6" default: "no" choices: [ "yes", "no" ] wait: From 880eaf38a66e2d8bd80d1bf56c9eab9faa2d7363 Mon Sep 17 00:00:00 2001 From: Fabian Freyer Date: Tue, 11 Mar 2014 17:55:40 +0100 Subject: [PATCH 253/772] Added support for pkgng multiple repositories. Currently checking if pkgng >= 1.1.4, as specified in https://wiki.freebsd.org/pkgng . I guess that's when using PKGSITE was deprecated. --- library/packaging/pkgng | 42 +++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/library/packaging/pkgng b/library/packaging/pkgng index 7b0468a7cbd..f862b0b0df8 100644 --- a/library/packaging/pkgng +++ b/library/packaging/pkgng @@ -48,8 +48,11 @@ options: default: no pkgsite: description: - - specify packagesite to use for downloading packages, if - not specified, use settings from /usr/local/etc/pkg.conf + - for pkgng versions before 1.1.4, specify packagesite to use + for downloading packages, if not specified, use settings from + /usr/local/etc/pkg.conf + for newer pkgng versions, specify a the name of a repository + configured in /usr/local/etc/pkg/repos required: false author: bleader notes: @@ -68,6 +71,7 @@ EXAMPLES = ''' import json import shlex import os +import re import sys def query_package(module, pkgin_path, name): @@ -79,6 +83,22 @@ def query_package(module, pkgin_path, name): return False +def pkgng_older_than(module, pkgin_path, compare_version): + + rc, out, err = module.run_command("%s -v" % pkgin_path) + version = map(lambda x: int(x), re.split(r'[\._]', out)) + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + def remove_packages(module, pkgin_path, packages): @@ -108,11 +128,18 @@ def install_packages(module, pkgin_path, packages, cached, pkgsite): install_c = 0 - if pkgsite != "": - pkgsite="PACKAGESITE=%s" % (pkgsite) + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + old_pkgng = pkgng_older_than(module, pkgin_path, [1, 1, 4]) + + if old_pkgng and (pkgsite != ""): + pkgsite = "PACKAGESITE=%s" % (pkgsite) if not module.check_mode and cached == "no": - rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgin_path)) + if old_pkgng: + rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgin_path)) + else: + rc, out, err = module.run_command("%s update" % (pkgin_path)) if rc != 0: module.fail_json(msg="Could not update catalogue") @@ -121,7 +148,10 @@ def install_packages(module, pkgin_path, packages, cached, pkgsite): continue if not module.check_mode: - rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgin_path, package)) + if old_pkgng: + rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgin_path, package)) + else: + rc, out, err = module.run_command("%s install -r %s -g -U -y %s" % (pkgin_path, pkgsite, package)) if not module.check_mode and not query_package(module, pkgin_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) From efe7bfa74a512edcba4fc435df2131d6f1ae9848 Mon Sep 17 00:00:00 2001 From: Fabian Freyer Date: Tue, 11 Mar 2014 18:03:14 +0100 Subject: [PATCH 254/772] Changed old variable names that referred to pkgin. --- library/packaging/pkgng | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/library/packaging/pkgng b/library/packaging/pkgng index f862b0b0df8..47e66328376 100644 --- a/library/packaging/pkgng +++ b/library/packaging/pkgng @@ -74,18 +74,18 @@ import os import re import sys -def query_package(module, pkgin_path, name): +def query_package(module, pkgng_path, name): - rc, out, err = module.run_command("%s info -g -e %s" % (pkgin_path, name)) + rc, out, err = module.run_command("%s info -g -e %s" % (pkgng_path, name)) if rc == 0: return True return False -def pkgng_older_than(module, pkgin_path, compare_version): +def pkgng_older_than(module, pkgng_path, compare_version): - rc, out, err = module.run_command("%s -v" % pkgin_path) + rc, out, err = module.run_command("%s -v" % pkgng_path) version = map(lambda x: int(x), re.split(r'[\._]', out)) i = 0 @@ -100,19 +100,19 @@ def pkgng_older_than(module, pkgin_path, compare_version): return not new_pkgng -def remove_packages(module, pkgin_path, packages): +def remove_packages(module, pkgng_path, packages): remove_c = 0 # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove - if not query_package(module, pkgin_path, package): + if not query_package(module, pkgng_path, package): continue if not module.check_mode: - rc, out, err = module.run_command("%s delete -y %s" % (pkgin_path, package)) + rc, out, err = module.run_command("%s delete -y %s" % (pkgng_path, package)) - if not module.check_mode and query_package(module, pkgin_path, package): + if not module.check_mode and query_package(module, pkgng_path, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) remove_c += 1 @@ -124,36 +124,36 @@ def remove_packages(module, pkgin_path, packages): module.exit_json(changed=False, msg="package(s) already absent") -def install_packages(module, pkgin_path, packages, cached, pkgsite): +def install_packages(module, pkgng_path, packages, cached, pkgsite): install_c = 0 # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions # in /usr/local/etc/pkg/repos - old_pkgng = pkgng_older_than(module, pkgin_path, [1, 1, 4]) + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) if old_pkgng and (pkgsite != ""): pkgsite = "PACKAGESITE=%s" % (pkgsite) if not module.check_mode and cached == "no": if old_pkgng: - rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgin_path)) + rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) else: - rc, out, err = module.run_command("%s update" % (pkgin_path)) + rc, out, err = module.run_command("%s update" % (pkgng_path)) if rc != 0: module.fail_json(msg="Could not update catalogue") for package in packages: - if query_package(module, pkgin_path, package): + if query_package(module, pkgng_path, package): continue if not module.check_mode: if old_pkgng: - rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgin_path, package)) + rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package)) else: - rc, out, err = module.run_command("%s install -r %s -g -U -y %s" % (pkgin_path, pkgsite, package)) + rc, out, err = module.run_command("%s install -r %s -g -U -y %s" % (pkgng_path, pkgsite, package)) - if not module.check_mode and not query_package(module, pkgin_path, package): + if not module.check_mode and not query_package(module, pkgng_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) install_c += 1 @@ -173,17 +173,17 @@ def main(): pkgsite = dict(default="", required=False)), supports_check_mode = True) - pkgin_path = module.get_bin_path('pkg', True) + pkgng_path = module.get_bin_path('pkg', True) p = module.params pkgs = p["name"].split(",") if p["state"] == "present": - install_packages(module, pkgin_path, pkgs, p["cached"], p["pkgsite"]) + install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) elif p["state"] == "absent": - remove_packages(module, pkgin_path, pkgs) + remove_packages(module, pkgng_path, pkgs) # import module snippets from ansible.module_utils.basic import * From 514e4b3742a481df4ea9805e8cabb02c6ff930fe Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 19 Feb 2014 19:56:51 -0500 Subject: [PATCH 255/772] Added multi VPC support Initial commit Refactored terminate_vpc method to support vpc_id_tags Cleaned up find_vpc() method --- library/cloud/ec2_vpc | 106 +++++++++++++++++++++++++++--------------- 1 file changed, 68 insertions(+), 38 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 9b9fb95a0b2..88f44f7f93e 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -56,6 +56,12 @@ options: required: false default: null aliases: [] + vpc_id_tags: + description: + - A list of tags uniquely identifying a VPC in the form of: {Tag1: Value1, Tag2: Value2, ...}. This list works in conjunction with CIDR (cidr_block) and is gnored when VPC id (vpc_id) is specified. + required: false + default: null + aliases: [] internet_gateway: description: - Toggle whether there should be an Internet gateway attached to the VPC @@ -127,6 +133,7 @@ EXAMPLES = ''' module: ec2_vpc state: present cidr_block: 172.23.0.0/16 + vpc_id_tags: { "Environment":"Development" } region: us-west-2 # Full creation example with subnets and optional availability zones. # The absence or presense of subnets deletes or creates them respectively. @@ -134,6 +141,7 @@ EXAMPLES = ''' module: ec2_vpc state: present cidr_block: 172.22.0.0/16 + vpc_id_tags: { "Environment":"Development" } subnets: - cidr: 172.22.1.0/24 az: us-west-2c @@ -193,9 +201,54 @@ def get_vpc_info(vpc): 'state': vpc.state, }) +def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): + """ + Finds a VPC that matches a specific id or cidr + tags + + module : AnsibleModule object + vpc_conn: authenticated VPCConnection connection object + + Returns: + A VPC object that matches either an ID or CIDR and one or more tag values + """ + + if vpc_id == None and cidr == None: + module.fail_json( + msg='You must specify either a vpc id or a cidr block + list of unique tags, aborting' + ) + + found_vpcs = [] + + vpc_id_tags = module.params.get('vpc_id_tags') + + # Check for existing VPC by cidr_block or id + if vpc_id is not None: + found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',}) + + else: + previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'}) + + for vpc in previous_vpcs: + # Get all tags for each of the found VPCs + vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) + + # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC + if set(vpc_id_tags.items()).issubset(set(vpc_tags.items())): + found_vpcs.append(vpc) + + found_vpc = None + + if len(found_vpcs) == 1: + found_vpc = found_vpcs[0] + + if len(found_vpcs) > 1: + module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting') + + return (found_vpc) + def create_vpc(module, vpc_conn): """ - Creates a new VPC + Creates a new or modifies an existing VPC. module : AnsibleModule object vpc_conn: authenticated VPCConnection connection object @@ -217,20 +270,12 @@ def create_vpc(module, vpc_conn): wait_timeout = int(module.params.get('wait_timeout')) changed = False - # Check for existing VPC by cidr_block or id - if id != None: - filter_dict = {'vpc-id':id, 'state': 'available',} - previous_vpcs = vpc_conn.get_all_vpcs(None, filter_dict) - else: - filter_dict = {'cidr': cidr_block, 'state': 'available'} - previous_vpcs = vpc_conn.get_all_vpcs(None, filter_dict) - - if len(previous_vpcs) > 1: - module.fail_json(msg='EC2 returned more than one VPC, aborting') + # Check for existing VPC by cidr_block + tags or id + previous_vpc = find_vpc(module, vpc_conn, id, cidr_block) - if len(previous_vpcs) == 1: + if previous_vpc is not None: changed = False - vpc = previous_vpcs[0] + vpc = previous_vpc else: changed = True try: @@ -269,6 +314,7 @@ def create_vpc(module, vpc_conn): module.fail_json(msg='subnets needs to be a list of cidr blocks') current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id }) + # First add all new subnets for subnet in subnets: add_subnet = True @@ -281,6 +327,7 @@ def create_vpc(module, vpc_conn): changed = True except EC2ResponseError, e: module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) + # Now delete all absent subnets for csubnet in current_subnets: delete_subnet = True @@ -332,7 +379,7 @@ def create_vpc(module, vpc_conn): if not isinstance(route_tables, list): module.fail_json(msg='route tables need to be a list of dictionaries') - # Work through each route table and update/create to match dictionary array +# Work through each route table and update/create to match dictionary array all_route_tables = [] for rt in route_tables: try: @@ -350,7 +397,7 @@ def create_vpc(module, vpc_conn): # Associate with subnets for sn in rt['subnets']: - rsn = vpc_conn.get_all_subnets(filters={'cidr': sn}) + rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id }) if len(rsn) != 1: module.fail_json( msg='The subnet {0} to associate with route_table {1} ' \ @@ -360,7 +407,7 @@ def create_vpc(module, vpc_conn): # Disassociate then associate since we don't have replace old_rt = vpc_conn.get_all_route_tables( - filters={'association.subnet_id': rsn.id} + filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id} ) if len(old_rt) == 1: old_rt = old_rt[0] @@ -434,23 +481,10 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): vpc_dict = {} terminated_vpc_id = '' changed = False - - if vpc_id == None and cidr == None: - module.fail_json( - msg='You must either specify a vpc id or a cidr '\ - 'block to terminate a VPC, aborting' - ) - if vpc_id is not None: - vpc_rs = vpc_conn.get_all_vpcs(vpc_id) - else: - vpc_rs = vpc_conn.get_all_vpcs(filters={'cidr': cidr}) - if len(vpc_rs) > 1: - module.fail_json( - msg='EC2 returned more than one VPC for id {0} ' \ - 'or cidr {1}, aborting'.format(vpc_id,vidr) - ) - if len(vpc_rs) == 1: - vpc = vpc_rs[0] + + vpc = find_vpc(module, vpc_conn, vpc_id, cidr) + + if vpc is not None: if vpc.state == 'available': terminated_vpc_id=vpc.id vpc_dict=get_vpc_info(vpc) @@ -497,6 +531,7 @@ def main(): dns_hostnames = dict(choices=BOOLEANS, default=True), subnets = dict(type='list'), vpc_id = dict(), + vpc_id_tags = dict(type='dict'), internet_gateway = dict(choices=BOOLEANS, default=False), route_tables = dict(type='list'), state = dict(choices=['present', 'absent'], default='present'), @@ -527,11 +562,6 @@ def main(): if module.params.get('state') == 'absent': vpc_id = module.params.get('vpc_id') cidr = module.params.get('cidr_block') - if vpc_id == None and cidr == None: - module.fail_json( - msg='You must either specify a vpc id or a cidr '\ - 'block to terminate a VPC, aborting' - ) (changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr) subnets_changed = None elif module.params.get('state') == 'present': From 49e9c427709691dbb87d2ae11b21f66fb30435b7 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 5 Mar 2014 10:32:51 -0500 Subject: [PATCH 256/772] Renamed vpc_id_tags to resource_tags --- library/cloud/ec2_vpc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 88f44f7f93e..6bb2c7d235c 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -56,7 +56,7 @@ options: required: false default: null aliases: [] - vpc_id_tags: + resource_tags: description: - A list of tags uniquely identifying a VPC in the form of: {Tag1: Value1, Tag2: Value2, ...}. This list works in conjunction with CIDR (cidr_block) and is gnored when VPC id (vpc_id) is specified. required: false @@ -133,7 +133,7 @@ EXAMPLES = ''' module: ec2_vpc state: present cidr_block: 172.23.0.0/16 - vpc_id_tags: { "Environment":"Development" } + resource_tags: { "Environment":"Development" } region: us-west-2 # Full creation example with subnets and optional availability zones. # The absence or presense of subnets deletes or creates them respectively. @@ -141,7 +141,7 @@ EXAMPLES = ''' module: ec2_vpc state: present cidr_block: 172.22.0.0/16 - vpc_id_tags: { "Environment":"Development" } + resource_tags: { "Environment":"Development" } subnets: - cidr: 172.22.1.0/24 az: us-west-2c @@ -219,7 +219,7 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): found_vpcs = [] - vpc_id_tags = module.params.get('vpc_id_tags') + resource_tags = module.params.get('resource_tags') # Check for existing VPC by cidr_block or id if vpc_id is not None: @@ -233,7 +233,7 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC - if set(vpc_id_tags.items()).issubset(set(vpc_tags.items())): + if set(resource_tags.items()).issubset(set(vpc_tags.items())): found_vpcs.append(vpc) found_vpc = None @@ -531,7 +531,7 @@ def main(): dns_hostnames = dict(choices=BOOLEANS, default=True), subnets = dict(type='list'), vpc_id = dict(), - vpc_id_tags = dict(type='dict'), + resource_tags = dict(type='dict'), internet_gateway = dict(choices=BOOLEANS, default=False), route_tables = dict(type='list'), state = dict(choices=['present', 'absent'], default='present'), From 6dfafb0cf598573f1ca001344cb8df6f60f8141f Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 5 Mar 2014 11:53:17 -0500 Subject: [PATCH 257/772] Updated module documentation, re: resource_tags --- library/cloud/ec2_vpc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 6bb2c7d235c..d470ccdeaf4 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -58,7 +58,7 @@ options: aliases: [] resource_tags: description: - - A list of tags uniquely identifying a VPC in the form of: {Tag1: Value1, Tag2: Value2, ...}. This list works in conjunction with CIDR (cidr_block) and is gnored when VPC id (vpc_id) is specified. + - A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. required: false default: null aliases: [] From 8c8ff79c8b4368105217e39d3e073e943d20e232 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 5 Mar 2014 13:51:57 -0500 Subject: [PATCH 258/772] Added code to create tags on the VPC resource --- library/cloud/ec2_vpc | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index d470ccdeaf4..35d38517c0d 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -300,7 +300,21 @@ def create_vpc(module, vpc_conn): module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) # Done with base VPC, now change to attributes and features. - + + # Add resource tags + vpc_spec_tags = module.params.get('resource_tags') + vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) + + if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())): + new_tags = {} + + for (key, value) in set(vpc_spec_tags.items()): + if (key, value) not in set(vpc_tags.items()): + new_tags[key] = value + + if new_tags: + vpc_conn.create_tags(vpc.id, new_tags) + # boto doesn't appear to have a way to determine the existing # value of the dns attributes, so we just set them. From d48880debce0d3de9c9cacee491f9001a7ef829d Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Tue, 11 Mar 2014 13:25:58 -0400 Subject: [PATCH 259/772] Added 'version_added' to the 'resrouce_tags' attribute in DOCUMENTATION section. --- library/cloud/ec2_vpc | 1 + 1 file changed, 1 insertion(+) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 35d38517c0d..abeb20e3226 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -62,6 +62,7 @@ options: required: false default: null aliases: [] + version_added: "1.6" internet_gateway: description: - Toggle whether there should be an Internet gateway attached to the VPC From b95fe470c4e8203601baee7cb3b34a1eae19fc62 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 11 Mar 2014 12:26:35 -0500 Subject: [PATCH 260/772] Fixing up some docstrings causing make webdocs to fail --- library/cloud/ec2_ami_search | 3 ++- library/cloud/s3 | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/library/cloud/ec2_ami_search b/library/cloud/ec2_ami_search index e3b75257c35..a1f53cd3d67 100644 --- a/library/cloud/ec2_ami_search +++ b/library/cloud/ec2_ami_search @@ -26,7 +26,8 @@ description: - Returns C(ami), C(aki), C(ari), C(serial), C(tag) - If there is no AKI or ARI associated with an image, these will be C(null). - Only supports images from cloud-images.ubuntu.com - - Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"}) + - 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})' +version_added: "1.6" options: distro: description: Linux distribution (e.g., C(ubuntu)) diff --git a/library/cloud/s3 b/library/cloud/s3 index afc3f99d388..fc0824b2354 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -68,7 +68,7 @@ options: aliases: [] s3_url: description: - - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus. + - "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus." default: null aliases: [ S3_URL ] aws_secret_key: From 546d514f87c719cb3b480db4d651cdbf97c83013 Mon Sep 17 00:00:00 2001 From: Vadim Kuznetsov Date: Tue, 4 Mar 2014 18:46:42 -0500 Subject: [PATCH 261/772] bug 5959: https://github.com/ansible/ansible/issues/5959 --- library/cloud/nova_keypair | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/nova_keypair b/library/cloud/nova_keypair index 19d3fa49b95..18674a1220a 100644 --- a/library/cloud/nova_keypair +++ b/library/cloud/nova_keypair @@ -18,7 +18,7 @@ # along with this software. If not, see . try: - from novaclient.v1_1 import client + from novaclient.v1_1 import client as nova_client from novaclient import exceptions import time except ImportError: From 5fdf7f9f75801587dad3b60c20a73bfdab4c7a9b Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 13:25:16 -0500 Subject: [PATCH 262/772] Bump relased in version and update module snippets. Closes GH-5154 --- library/cloud/nova_fip | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/library/cloud/nova_fip b/library/cloud/nova_fip index b236e82b908..d10b4d6ab62 100644 --- a/library/cloud/nova_fip +++ b/library/cloud/nova_fip @@ -26,7 +26,7 @@ except ImportError: DOCUMENTATION = ''' --- module: nova_fip -version_added: "1.5" +version_added: "1.6" short_description: Associate an OpenStack floating IP with a server. description: - Manage nova floating IPs using the python-novaclient library. @@ -228,6 +228,7 @@ def main(): changed = _disassociate_floating_ip(nova, floating_ip, server) module.exit_json(changed=changed) -# this is magic, see lib/ansible/module_common.py -#<> -main() +# import module snippets +from ansible.module_utils.basic import * + +main() \ No newline at end of file From e6d099852ef677ef337671a04872375f22c3ec57 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 13:33:18 -0500 Subject: [PATCH 263/772] Bump relased in version and update module snippets. Closes GH-5069 --- library/cloud/nova_group | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/library/cloud/nova_group b/library/cloud/nova_group index c96fb725ed5..21393a79afe 100644 --- a/library/cloud/nova_group +++ b/library/cloud/nova_group @@ -33,7 +33,7 @@ except ImportError: DOCUMENTATION = ''' --- module: security_group -version_added: "1.5" +version_added: "1.6" short_description: Maintain nova security groups. description: - Manage nova security groups using the python-novaclient library. @@ -336,6 +336,8 @@ def main(): module.exit_json(changed=changed, group_id=group_id) -# this is magic, see lib/ansible/module_common.py -#<> -main() + +# import module snippets +from ansible.module_utils.basic import * + +main() \ No newline at end of file From 85b3c4d907ac89c4340ee4894cfc87e4915e4d84 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 13:45:04 -0500 Subject: [PATCH 264/772] nova_compute: Adding the version_added flag to user_data. Per note in GH-5916. --- library/cloud/nova_compute | 1 + 1 file changed, 1 insertion(+) diff --git a/library/cloud/nova_compute b/library/cloud/nova_compute index 55f9e5dccb9..049c8116bbc 100644 --- a/library/cloud/nova_compute +++ b/library/cloud/nova_compute @@ -112,6 +112,7 @@ options: - Opaque blob of data which is made available to the instance required: false default: None + version_added: "1.6" requirements: ["novaclient"] ''' From b6b9e1c6f4c03e60cdd77df505665cbaa87ba44e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 11 Mar 2014 13:48:39 -0500 Subject: [PATCH 265/772] Adding version_added field to metadata field in s3 module --- library/cloud/s3 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/s3 b/library/cloud/s3 index 4c4503a9db6..4fc470678b2 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -88,6 +88,8 @@ options: - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. required: false default: null + version_added: "1.6" + requirements: [ "boto" ] author: Lester Wade, Ralph Tice ''' From b837e5275c625e3b4b996e5dd3e06a5880f9d992 Mon Sep 17 00:00:00 2001 From: Jake Kreider Date: Fri, 1 Nov 2013 09:23:01 -0500 Subject: [PATCH 266/772] Added metadata support to s3 module --- library/cloud/s3 | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/library/cloud/s3 b/library/cloud/s3 index fc0824b2354..b660fd53c9e 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -83,6 +83,11 @@ options: required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] + metadata: + description: + - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + required: false + default: null requirements: [ "boto" ] author: Lester Wade, Ralph Tice ''' @@ -97,7 +102,9 @@ EXAMPLES = ''' # GET/download and do not overwrite local file (trust remote) - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false # PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put +# PUT/upload with metadata +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' # PUT/upload and do not overwrite remote file (trust local) - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false # Download an object as a string to use else where in your playbook @@ -201,10 +208,14 @@ def path_check(path): else: return False -def upload_s3file(module, s3, bucket, obj, src, expiry): +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): try: bucket = s3.lookup(bucket) - key = bucket.new_key(obj) + key = bucket.new_key(obj) + if metadata: + for meta_key in metadata.keys(): + key.set_metadata(meta_key, metadata[meta_key]) + key.set_contents_from_filename(src) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) @@ -266,7 +277,8 @@ def main(): expiry = dict(default=600, aliases=['expiration']), s3_url = dict(aliases=['S3_URL']), overwrite = dict(aliases=['force'], default=True, type='bool'), - ) + metadata = dict(type='dict'), + ), ) module = AnsibleModule(argument_spec=argument_spec) @@ -279,6 +291,7 @@ def main(): expiry = int(module.params['expiry']) s3_url = module.params.get('s3_url') overwrite = module.params.get('overwrite') + metadata = module.params.get('metadata') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) @@ -385,24 +398,24 @@ def main(): if md5_local == md5_remote: sum_matches = True if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: create_bucket(module, s3, bucket) - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) # If bucket exists but key doesn't, just upload. if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) # Support for deleting an object if we have both params. if mode == 'delete': From 777f0a8080c825bd9306f482a04dab3e0e393bc5 Mon Sep 17 00:00:00 2001 From: Jake Kreider Date: Sun, 2 Mar 2014 20:45:53 -0600 Subject: [PATCH 267/772] Updated S3 metadata examples --- library/cloud/s3 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/s3 b/library/cloud/s3 index b660fd53c9e..4c4503a9db6 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -105,6 +105,8 @@ EXAMPLES = ''' - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' +# PUT/upload with multiple metadata +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' # PUT/upload and do not overwrite remote file (trust local) - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false # Download an object as a string to use else where in your playbook From f36de32863090519f937d98e451b859ff2503253 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 11 Mar 2014 13:48:39 -0500 Subject: [PATCH 268/772] Adding version_added field to metadata field in s3 module --- library/cloud/s3 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/s3 b/library/cloud/s3 index 4c4503a9db6..4fc470678b2 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -88,6 +88,8 @@ options: - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. required: false default: null + version_added: "1.6" + requirements: [ "boto" ] author: Lester Wade, Ralph Tice ''' From 064722aa5e199776451de0528350511826aa5899 Mon Sep 17 00:00:00 2001 From: Fabian Freyer Date: Tue, 11 Mar 2014 20:24:32 +0100 Subject: [PATCH 269/772] Added support for package annotations. --- library/packaging/pkgng | 121 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 114 insertions(+), 7 deletions(-) diff --git a/library/packaging/pkgng b/library/packaging/pkgng index 47e66328376..5bf8fb650f0 100644 --- a/library/packaging/pkgng +++ b/library/packaging/pkgng @@ -46,6 +46,14 @@ options: choices: [ 'yes', 'no' ] required: false default: no + annotation: + description: + - a comma-separated list of keyvalue-pairs of the form + <+/-/:>[=]. A '+' denotes adding an annotation, a + '-' denotes removing an annotation, and ':' denotes modifying an + annotation. + If setting or modifying annotations, a value must be provided. + required: false pkgsite: description: - for pkgng versions before 1.1.4, specify packagesite to use @@ -63,6 +71,9 @@ EXAMPLES = ''' # Install package foo - pkgng: name=foo state=present +# Annotate package foo and bar +- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar + # Remove packages foo and bar - pkgng: name=foo,bar state=absent ''' @@ -119,9 +130,9 @@ def remove_packages(module, pkgng_path, packages): if remove_c > 0: - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + return (True, "removed %s package(s)" % remove_c) - module.exit_json(changed=False, msg="package(s) already absent") + return (False, "package(s) already absent") def install_packages(module, pkgng_path, packages, cached, pkgsite): @@ -159,17 +170,97 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): install_c += 1 if install_c > 0: - module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + return (True, "added %s package(s)" % (install_c)) - module.exit_json(changed=False, msg="package(s) already present") + return (False, "package(s) already present") +def annotation_query(module, pkgng_path, package, tag): + rc, out, err = module.run_command("%s info -g -A %s" % (pkgng_path, package)) + match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, pkgng_path, package, tag, value): + _value = annotation_query(module, pkgng_path, package, tag) + if not _value: + # Annotation does not exist, add it. + rc, out, err = module.run_command('%s annotate -y -A %s %s "%s"' + % (pkgng_path, package, tag, value)) + if rc != 0: + module.fail_json("could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + +def annotation_delete(module, pkgng_path, package, tag, value): + _value = annotation_query(module, pkgng_path, package, tag) + if _value: + rc, out, err = module.run_command('%s annotate -y -D %s %s' + % (pkgng_path, package, tag)) + if rc != 0: + module.fail_json("could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + +def annotation_modify(module, pkgng_path, package, tag, value): + _value = annotation_query(module, pkgng_path, package, tag) + if not value: + # No such tag + module.fail_json("could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + rc,out,err = module.run_command('%s annotate -y -M %s %s "%s"' + % (pkgng_path, package, tag, value)) + if rc != 0: + module.fail_json("could not change annotation annotation to %s: %s" + % (package, out), stderr=err) + return True + + +def annotate_packages(module, pkgng_path, packages, annotation): + annotate_c = 0 + annotations = map(lambda _annotation: + re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?', + _annotation).groupdict(), + re.split(r',', annotation)) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for _annotation in annotations: + annotate_c += ( 1 if operation[_annotation['operation']]( + module, pkgng_path, package, + _annotation['tag'], _annotation['value']) else 0 ) + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") def main(): module = AnsibleModule( argument_spec = dict( - state = dict(default="present", choices=["present","absent"]), + state = dict(default="present", choices=["present","absent"], required=False), name = dict(aliases=["pkg"], required=True), cached = dict(default=False, type='bool'), + annotation = dict(default="", required=False), pkgsite = dict(default="", required=False)), supports_check_mode = True) @@ -179,11 +270,27 @@ def main(): pkgs = p["name"].split(",") + changed = False + msgs = [] + if p["state"] == "present": - install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) + changed = changed or _changed + msgs.append(_msg) elif p["state"] == "absent": - remove_packages(module, pkgng_path, pkgs) + _changed, _msg = remove_packages(module, pkgng_path, pkgs) + changed = changed or _changed + msgs.append(_msg) + + if p["annotation"]: + _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"]) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs)) + + # import module snippets from ansible.module_utils.basic import * From 53777bbbc9249748df433bae06516d721ab7db3d Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 15:26:26 -0400 Subject: [PATCH 270/772] Fix docstring and snippet import style in portage module --- library/packaging/portage | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/library/packaging/portage b/library/packaging/portage index c68dc0ebfa4..2cce4b41d1e 100644 --- a/library/packaging/portage +++ b/library/packaging/portage @@ -25,8 +25,7 @@ module: portage short_description: Package manager for Gentoo description: - Manages Gentoo packages - -version_added: "1.4" +version_added: "1.6" options: package: @@ -382,8 +381,7 @@ def main(): elif p['state'] in portage_absent_states: unmerge_packages(module, packages) - -# this is magic, see lib/ansible/module_common.py -#<> +# import module snippets +from ansible.module_utils.basic import * main() From b3ff88d06acc674a8c68c1cdd72275d334a9838f Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 15:43:59 -0400 Subject: [PATCH 271/772] Fix version_added and snippet imports on new digital ocean modules --- library/cloud/digital_ocean_domain | 6 +++--- library/cloud/digital_ocean_droplet | 6 +++--- library/cloud/digital_ocean_sshkey | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/library/cloud/digital_ocean_domain b/library/cloud/digital_ocean_domain index 21a9132381d..ef9338c1765 100644 --- a/library/cloud/digital_ocean_domain +++ b/library/cloud/digital_ocean_domain @@ -21,7 +21,7 @@ module: digital_ocean_domain short_description: Create/delete a DNS record in DigitalOcean description: - Create/delete a DNS record in DigitalOcean. -version_added: "1.4" +version_added: "1.6" options: state: description: @@ -236,7 +236,7 @@ def main(): except (DoError, Exception) as e: module.fail_json(msg=str(e)) -# this is magic, see lib/ansible/module_common.py -#<> +# import module snippets +from ansible.module_utils.basic import * main() diff --git a/library/cloud/digital_ocean_droplet b/library/cloud/digital_ocean_droplet index b59d79ce605..d8ec1929ccc 100644 --- a/library/cloud/digital_ocean_droplet +++ b/library/cloud/digital_ocean_droplet @@ -21,7 +21,7 @@ module: digital_ocean_droplet short_description: Create/delete a droplet in DigitalOcean description: - Create/delete a droplet in DigitalOcean and optionally waits for it to be 'running'. -version_added: "1.4" +version_added: "1.6" options: state: description: @@ -314,7 +314,7 @@ def main(): except (DoError, Exception) as e: module.fail_json(msg=str(e)) -# this is magic, see lib/ansible/module_common.py -#<> +# import module snippets +from ansible.module_utils.basic import * main() diff --git a/library/cloud/digital_ocean_sshkey b/library/cloud/digital_ocean_sshkey index 19305c1e42e..8ae7af47793 100644 --- a/library/cloud/digital_ocean_sshkey +++ b/library/cloud/digital_ocean_sshkey @@ -21,7 +21,7 @@ module: digital_ocean_sshkey short_description: Create/delete an SSH key in DigitalOcean description: - Create/delete an SSH key. -version_added: "1.4" +version_added: "1.6" options: state: description: @@ -172,7 +172,7 @@ def main(): except (DoError, Exception) as e: module.fail_json(msg=str(e)) -# this is magic, see lib/ansible/module_common.py -#<> +# import module snippets +from ansible.module_utils.basic import * main() From da95310d865ecb8e3e80e9196c61c62fac2da6cd Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 15:55:26 -0400 Subject: [PATCH 272/772] Rename locale module to locale_gen --- library/system/{locale => locale_gen} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename library/system/{locale => locale_gen} (100%) diff --git a/library/system/locale b/library/system/locale_gen similarity index 100% rename from library/system/locale rename to library/system/locale_gen From 8ffb964f4547680210d620a2f03bf33ad9c5b109 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 15:56:30 -0400 Subject: [PATCH 273/772] Set version_added for locale_gen --- library/system/locale_gen | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/locale_gen b/library/system/locale_gen index 81f33dec9ca..df115fd71ea 100644 --- a/library/system/locale_gen +++ b/library/system/locale_gen @@ -11,7 +11,7 @@ module: locale short_description: Creates of removes locales. description: - Manages locales by editing /etc/locale.gen and invoking locale-gen. -version_added: "1.5" +version_added: "1.6" options: name: description: @@ -148,4 +148,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() \ No newline at end of file +main() From 41b7badb137887c6211622e623f75b65962350d6 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 16:07:08 -0400 Subject: [PATCH 274/772] Fix snippet imports and version_added in ufw module --- library/system/ufw | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/system/ufw b/library/system/ufw index 5ac20978ec2..caae2ad4672 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -26,7 +26,7 @@ module: ufw short_description: Manage firewall with UFW description: - Manage firewall with UFW. -version_added: 1.5 +version_added: 1.6 author: Aleksey Ovcharenko, Jarno Keskikangas notes: - See C(man ufw) for more examples. @@ -169,7 +169,7 @@ def main(): insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), interface = dict(default=None, aliases=['if']), - log = dict(default=False, choices=BOOLEANS), + log = dict(default=False, type='bool'), from_ip = dict(default='any', aliases=['src', 'from']), from_port = dict(default=None), to_ip = dict(default='any', aliases=['dest', 'to']), @@ -250,7 +250,7 @@ def main(): return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) -# include magic from lib/ansible/module_common.py -#<> +# import module snippets +from ansible.module_utils.basic import * main() From c73cb776890fdd3ffeb84ecd9e22ff755a4ace6a Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 16:13:47 -0400 Subject: [PATCH 275/772] Fix name in locale_gen docstring --- library/system/locale_gen | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/locale_gen b/library/system/locale_gen index df115fd71ea..6225ce236dc 100644 --- a/library/system/locale_gen +++ b/library/system/locale_gen @@ -7,7 +7,7 @@ from subprocess import Popen, PIPE, call DOCUMENTATION = ''' --- -module: locale +module: locale_gen short_description: Creates of removes locales. description: - Manages locales by editing /etc/locale.gen and invoking locale-gen. @@ -29,7 +29,7 @@ options: EXAMPLES = ''' # Ensure a locale exists. -- locale: name=de_CH.UTF-8 state=present +- locale_gen: name=de_CH.UTF-8 state=present ''' # =========================================== From e4e64a9699822f21afbc6488a32da80db559e751 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 16:16:24 -0400 Subject: [PATCH 276/772] Rename lxc to libvirt_lxc --- lib/ansible/runner/connection_plugins/{lxc.py => libvirt_lxc.py} | 0 plugins/inventory/{lxc.py => libvirt_lxc.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename lib/ansible/runner/connection_plugins/{lxc.py => libvirt_lxc.py} (100%) rename plugins/inventory/{lxc.py => libvirt_lxc.py} (100%) diff --git a/lib/ansible/runner/connection_plugins/lxc.py b/lib/ansible/runner/connection_plugins/libvirt_lxc.py similarity index 100% rename from lib/ansible/runner/connection_plugins/lxc.py rename to lib/ansible/runner/connection_plugins/libvirt_lxc.py diff --git a/plugins/inventory/lxc.py b/plugins/inventory/libvirt_lxc.py similarity index 100% rename from plugins/inventory/lxc.py rename to plugins/inventory/libvirt_lxc.py From 10f70bee49d3e4bf0bed41e29c2918af91c63e28 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 15:42:45 -0500 Subject: [PATCH 277/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/digital_ocean | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/digital_ocean b/library/cloud/digital_ocean index 62b8709e39a..efebf5f1bcf 100644 --- a/library/cloud/digital_ocean +++ b/library/cloud/digital_ocean @@ -399,9 +399,9 @@ def main(): image_id = dict(type='int'), region_id = dict(type='int'), ssh_key_ids = dict(default=''), - virtio = dict(type='bool', choices=BOOLEANS, default='yes'), - private_networking = dict(type='bool', choices=BOOLEANS, default='no'), - backups_enabled = dict(type='bool', choices=BOOLEANS, default='no'), + virtio = dict(type='bool', default='yes'), + private_networking = dict(type='bool', default='no'), + backups_enabled = dict(type='bool', default='no'), id = dict(aliases=['droplet_id'], type='int'), unique_name = dict(type='bool', default='no'), wait = dict(type='bool', default=True), From 1b28e3bf41a0318546ffd9d26ffbb06e05d89fbe Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 15:44:34 -0500 Subject: [PATCH 278/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/ec2_eip | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip index e6ecf091a48..5be83387e07 100644 --- a/library/cloud/ec2_eip +++ b/library/cloud/ec2_eip @@ -258,7 +258,7 @@ def main(): public_ip = dict(required=False, aliases= ['ip']), state = dict(required=False, default='present', choices=['present', 'absent']), - in_vpc = dict(required=False, choices=BOOLEANS, default=False), + in_vpc = dict(required=False, type='bool', default=False), reuse_existing_ip_allowed = dict(required=False, type='bool', default=False), ) ) From a1c87e6deb1e644107489a470ed7c138f0936d9a Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 16:23:18 -0500 Subject: [PATCH 279/772] Fix docstring issue. --- library/system/at | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/at b/library/system/at index d1055abfc26..c63527563fd 100644 --- a/library/system/at +++ b/library/system/at @@ -55,8 +55,8 @@ options: unique: description: - If a matching job is present a new job will not be added. - required: false - default: false + required: false + default: false requirements: - at author: Richard Isaacson From a689e42d32cba58808ed5d3a56d35ff9b2962fa1 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 11 Mar 2014 19:14:43 -0400 Subject: [PATCH 280/772] Fix ec2_facts module by removing stray fail statement --- library/cloud/ec2_facts | 1 - 1 file changed, 1 deletion(-) diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts index c6a6670a58b..31024b65f8d 100644 --- a/library/cloud/ec2_facts +++ b/library/cloud/ec2_facts @@ -70,7 +70,6 @@ class Ec2Metadata(object): self._prefix = 'ansible_ec2_%s' def _fetch(self, url): - self.module.fail_json(msg="url is %s" % url) (response, info) = fetch_url(self.module, url, force=True) return response.read() From c3667b7b5109b90010bcad7be48feb4040ae6dc5 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 10 Mar 2014 13:11:55 -0500 Subject: [PATCH 281/772] First pass at hipchat example callback plugin --- plugins/callbacks/hipchat.py | 204 +++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) create mode 100644 plugins/callbacks/hipchat.py diff --git a/plugins/callbacks/hipchat.py b/plugins/callbacks/hipchat.py new file mode 100644 index 00000000000..ee4aca6aa33 --- /dev/null +++ b/plugins/callbacks/hipchat.py @@ -0,0 +1,204 @@ +# (C) 2014, Matt Martz + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import urllib +import urllib2 + +from ansible import utils + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + + +class CallbackModule(object): + """This is an example ansible callback plugin that sends status + updates to a HipChat channel during playbook execution. + + This plugin makes use of the following environment variables: + HIPCHAT_TOKEN (required): HipChat API token + HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible + HIPCHAT_FROM (optional): Name to post as. Default: ansible + + Requires: + prettytable + + """ + + def __init__(self): + if not HAS_PRETTYTABLE: + self.disabled = True + utils.warning('The `prettytable` python module is not installed. ' + 'Disabling the HipChat callback plugin.') + + self.msg_uri = 'https://api.hipchat.com/v1/rooms/message' + self.token = os.getenv('HIPCHAT_TOKEN') + self.room = os.getenv('HIPCHAT_ROOM', 'ansible') + self.from_name = os.getenv('HIPCHAT_FROM', 'ansible') + + if self.token is None: + self.disabled = True + utils.warning('HipChat token could not be loaded. The HipChat ' + 'token can be provided using the `HIPCHAT_TOKEN` ' + 'environment variable.') + + self.printed_playbook = False + self.playbook_name = None + + def send_msg(self, msg, msg_format='text', color='yellow', notify=False): + """Method for sending a message to HipChat""" + + params = {} + params['room_id'] = self.room + params['from'] = self.from_name[:15] # max length is 15 + params['message'] = msg + params['message_format'] = msg_format + params['color'] = color + params['notify'] = int(notify) + + url = ('%s?auth_token=%s' % (self.msg_uri, self.token)) + try: + response = urllib2.urlopen(url, urllib.urlencode(params)) + return response.read() + except: + utils.warning('Could not submit message to hipchat') + + def on_any(self, *args, **kwargs): + pass + + def runner_on_failed(self, host, res, ignore_errors=False): + pass + + def runner_on_ok(self, host, res): + pass + + def runner_on_error(self, host, msg): + pass + + def runner_on_skipped(self, host, item=None): + pass + + def runner_on_unreachable(self, host, res): + pass + + def runner_on_no_hosts(self): + pass + + def runner_on_async_poll(self, host, res, jid, clock): + pass + + def runner_on_async_ok(self, host, res, jid): + pass + + def runner_on_async_failed(self, host, res, jid): + pass + + def playbook_on_start(self): + pass + + def playbook_on_notify(self, host, handler): + pass + + def playbook_on_no_hosts_matched(self): + pass + + def playbook_on_no_hosts_remaining(self): + pass + + def playbook_on_task_start(self, name, is_conditional): + pass + + def playbook_on_vars_prompt(self, varname, private=True, prompt=None, + encrypt=None, confirm=False, salt_size=None, + salt=None, default=None): + pass + + def playbook_on_setup(self): + pass + + def playbook_on_import_for_host(self, host, imported_file): + pass + + def playbook_on_not_import_for_host(self, host, missing_file): + pass + + def playbook_on_play_start(self, pattern): + """Display Playbook and play start messages""" + + # This block sends information about a playbook when it starts + # The playbook object is not immediately available at + # playbook_on_start so we grab it via the play + # + # Displays info about playbook being started by a person on an + # inventory, as well as Tags, Skip Tags and Limits + if not self.printed_playbook: + self.playbook_name, _ = os.path.splitext( + os.path.basename(self.play.playbook.filename)) + host_list = self.play.playbook.inventory.host_list + inventory = os.path.basename(os.path.realpath(host_list)) + self.send_msg("%s: Playbook initiated by %s against %s" % + (self.playbook_name, + self.play.playbook.remote_user, + inventory), notify=True) + self.printed_playbook = True + subset = self.play.playbook.inventory._subset + skip_tags = self.play.playbook.skip_tags + self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" % + (self.playbook_name, + ', '.join(self.play.playbook.only_tags), + ', '.join(skip_tags) if skip_tags else None, + ', '.join(subset) if subset else subset)) + + # This is where we actually say we are starting a play + self.send_msg("%s: Starting play: %s" % + (self.playbook_name, pattern)) + + def playbook_on_stats(self, stats): + """Display info about playbook statistics""" + hosts = sorted(stats.processed.keys()) + + t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', + 'Failures']) + + failures = False + unreachable = False + + for h in hosts: + s = stats.summarize(h) + + if s['failures'] > 0: + failures = True + if s['unreachable'] > 0: + unreachable = True + + t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', + 'failures']]) + + self.send_msg("%s: Playbook complete" % self.playbook_name, + notify=True) + + if failures or unreachable: + color = 'red' + self.send_msg("%s: Failures detected" % self.playbook_name, + color=color, notify=True) + else: + color = 'green' + + self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color) From b853778a93cbb280243b90bb8d81953f8a9e7427 Mon Sep 17 00:00:00 2001 From: Devon Crouse Date: Tue, 11 Mar 2014 18:19:03 -0600 Subject: [PATCH 282/772] Fixed references to missing method in ec2 modules Change-Id: I9b89d433b545269d111b3c290b6411aabf58dd24 --- library/cloud/route53 | 2 +- library/cloud/s3 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/route53 b/library/cloud/route53 index f98f68f4bd9..49344ee2061 100644 --- a/library/cloud/route53 +++ b/library/cloud/route53 @@ -157,7 +157,7 @@ def commit(changes): time.sleep(500) def main(): - argument_spec = ec2_argument_keys_spec() + argument_spec = ec2_argument_spec() argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), diff --git a/library/cloud/s3 b/library/cloud/s3 index 4fc470678b2..6d64a3f43fe 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -271,7 +271,7 @@ def is_walrus(s3_url): return False def main(): - argument_spec = ec2_argument_keys_spec() + argument_spec = ec2_argument_spec() argument_spec.update(dict( bucket = dict(required=True), object = dict(), From bd3322824fc0f67a6b9091f25e664e1a7b32bd50 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 20:59:46 -0400 Subject: [PATCH 283/772] Remove duplicate digital_ocean_droplet module --- library/cloud/digital_ocean_droplet | 320 ---------------------------- 1 file changed, 320 deletions(-) delete mode 100644 library/cloud/digital_ocean_droplet diff --git a/library/cloud/digital_ocean_droplet b/library/cloud/digital_ocean_droplet deleted file mode 100644 index d8ec1929ccc..00000000000 --- a/library/cloud/digital_ocean_droplet +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -DOCUMENTATION = ''' ---- -module: digital_ocean_droplet -short_description: Create/delete a droplet in DigitalOcean -description: - - Create/delete a droplet in DigitalOcean and optionally waits for it to be 'running'. -version_added: "1.6" -options: - state: - description: - - Indicate desired state of the target. - default: present - choices: ['present', 'absent'] - client_id: - description: - - Digital Ocean manager id. - api_key: - description: - - Digital Ocean api key. - id: - description: - - Numeric, the droplet id you want to operate on. - name: - description: - - String, this is the name of the droplet - must be formatted by hostname rules. - unique_name: - description: - - Bool, require unique hostnames. By default, digital ocean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. - default: "no" - choices: [ "yes", "no" ] - size_id: - description: - - Numeric, this is the id of the size you would like the droplet created at. - image_id: - description: - - Numeric, this is the id of the image you would like the droplet created with. - region_id: - description: - - "Numeric, this is the id of the region you would like your server" - ssh_key_ids: - description: - - Optional, comma separated list of ssh_key_ids that you would like to be added to the server - wait: - description: - - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. - default: "yes" - choices: [ "yes", "no" ] - wait_timeout: - description: - - How long before wait gives up, in seconds. - default: 300 - -notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. -''' - -EXAMPLES = ''' -# Create a new Droplet -# Will return the droplet details including the droplet id (used for idempotence) - -- digital_ocean_droplet: > - state=present - name=my_new_droplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 - wait_timeout=500 - register: my_droplet -- debug: msg="ID is {{ my_droplet.droplet.id }}" -- debug: msg="IP is {{ my_droplet.droplet.ip_address }}" - -# Ensure a droplet is present -# If droplet id already exist, will return the droplet details and changed = False -# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True. - -- digital_ocean_droplet: > - state=present - id=123 - name=my_new_droplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 - wait_timeout=500 - -# Create a droplet with ssh key -# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids). -# Several keys can be added to ssh_key_ids as id1,id2,id3 -# The keys are used to connect as root to the droplet. - -- digital_ocean_droplet: > - state=present - ssh_key_ids=id1,id2 - name=my_new_droplet - client_id=XXX - api_key=XXX - size_id=1 - region_id=2 - image_id=3 -''' - -import sys -import os -import time - -try: - from dopy.manager import DoError, DoManager -except ImportError as e: - print "failed=True msg='dopy required for this module'" - sys.exit(1) - -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id - -class JsonfyMixIn(object): - def to_json(self): - return self.__dict__ - -class Droplet(JsonfyMixIn): - manager = None - - def __init__(self, droplet_json): - self.status = 'new' - self.__dict__.update(droplet_json) - - def is_powered_on(self): - return self.status == 'active' - - def update_attr(self, attrs=None): - if attrs: - for k, v in attrs.iteritems(): - setattr(self, k, v) - else: - json = self.manager.show_droplet(self.id) - if json['ip_address']: - self.update_attr(json) - - def power_on(self): - assert self.status == 'off', 'Can only power on a closed one.' - json = self.manager.power_on_droplet(self.id) - self.update_attr(json) - - def ensure_powered_on(self, wait=True, wait_timeout=300): - if self.is_powered_on(): - return - if self.status == 'off': # powered off - self.power_on() - - if wait: - end_time = time.time() + wait_timeout - while time.time() < end_time: - time.sleep(min(20, end_time - time.time())) - self.update_attr() - if self.is_powered_on(): - if not self.ip_address: - raise TimeoutError('No ip is found.', self.id) - return - raise TimeoutError('Wait for droplet running timeout', self.id) - - def destroy(self): - return self.manager.destroy_droplet(self.id) - - @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) - - @classmethod - def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None): - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids) - droplet = cls(json) - return droplet - - @classmethod - def find(cls, id=None, name=None): - if not id and not name: - return False - - droplets = cls.list_all() - - # Check first by id. digital ocean requires that it be unique - for droplet in droplets: - if droplet.id == id: - return droplet - - # Failing that, check by hostname. - for droplet in droplets: - if droplet.name == name: - return droplet - - return False - - @classmethod - def list_all(cls): - json = cls.manager.all_active_droplets() - return map(cls, json) - -def core(module): - def getkeyordie(k): - v = module.params[k] - if v is None: - module.fail_json(msg='Unable to load %s' % k) - return v - - try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: - module.fail_json(msg='Unable to load %s' % e.message) - - changed = True - state = module.params['state'] - - Droplet.setup(client_id, api_key) - if state in ('present'): - - # First, try to find a droplet by id. - droplet = Droplet.find(id=module.params['id']) - - # If we couldn't find the droplet and the user is allowing unique - # hostnames, then check to see if a droplet with the specified - # hostname already exists. - if not droplet and module.params['unique_name']: - droplet = Droplet.find(name=getkeyordie('name')) - - # If both of those attempts failed, then create a new droplet. - if not droplet: - droplet = Droplet.add( - name=getkeyordie('name'), - size_id=getkeyordie('size_id'), - image_id=getkeyordie('image_id'), - region_id=getkeyordie('region_id'), - ssh_key_ids=module.params['ssh_key_ids'] - ) - - if droplet.is_powered_on(): - changed = False - - droplet.ensure_powered_on( - wait=getkeyordie('wait'), - wait_timeout=getkeyordie('wait_timeout') - ) - - module.exit_json(changed=changed, droplet=droplet.to_json()) - - elif state in ('absent'): - # First, try to find a droplet by id. - droplet = None - if 'id' in module.params: - droplet = Droplet.find(id=module.params['id']) - - # If we couldn't find the droplet and the user is allowing unique - # hostnames, then check to see if a droplet with the specified - # hostname already exists. - if not droplet and module.params['unique_name'] and 'name' in module.params: - droplet = Droplet.find(name=module.params['name']) - - if not droplet: - module.exit_json(changed=False, msg='The droplet is not found.') - - event_json = droplet.destroy() - module.exit_json(changed=True, event_id=event_json['event_id']) - -def main(): - module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['present', 'absent'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), - name = dict(type='str'), - size_id = dict(type='int'), - image_id = dict(type='int'), - region_id = dict(type='int'), - ssh_key_ids = dict(default=''), - id = dict(aliases=['droplet_id'], type='int'), - unique_name = dict(type='bool', choices=BOOLEANS, default='no'), - wait = dict(type='bool', choices=BOOLEANS, default='yes'), - wait_timeout = dict(default=300, type='int'), - ), - required_together = ( - ['size_id', 'image_id', 'region_id'], - ), - required_one_of = ( - ['id', 'name'], - ), - ) - - try: - core(module) - except TimeoutError as e: - module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception) as e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * - -main() From bb047b9e8936afb5025b98b85e4dbf51156541c0 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 11 Mar 2014 21:12:07 -0400 Subject: [PATCH 284/772] Changelog updates for things merged already into 1.6 --- CHANGELOG.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4380ac5e8bd..4f8ceb16fe4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,12 +11,24 @@ Major features/changes: New Modules: * packaging: cpanm +* packaging: portage * system: debconf +* system: ufw +* system: locale_gen +* cloud: digital_ocean_domain +* cloud: digital_ocean_sshkey +* cloud: nova_group (security groups) +* cloud: nova_fip (floating IPs) +* cloud: rax_identity +* cloud: ec2_asg (configure autoscaling groups) Other notable changes: -* info pending - +* libvirt module now supports destroyed and paused as states +* s3 module can specify metadata +* security token additions to ec2 modules +* misc bugfixes and other parameters + ## 1.5.1 "Love Walks In" - March 10, 2014 - Force command action to not be executed by the shell unless specifically enabled. From 7778aca966064a0a97879178b1829047151c05b3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 11 Mar 2014 21:30:58 -0400 Subject: [PATCH 285/772] templates ignore_errors --- lib/ansible/playbook/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 65965526251..918b9341717 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -402,6 +402,10 @@ class PlayBook(object): ansible.callbacks.set_task(self.runner_callbacks, None) return True + # template ignore_errors + cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False) + task.ignore_errors = utils.check_conditional(cond , play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) + # load up an appropriate ansible runner to run the task in parallel results = self._run_task_internal(task) From 6da2c371128cea88dde2498c2d30a410b449d31e Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 11 Mar 2014 21:38:00 -0400 Subject: [PATCH 286/772] Fixes #6411 Return None for ec2 api endpoints that return 404 --- library/cloud/ec2_facts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts index 31024b65f8d..09c9d761ef7 100644 --- a/library/cloud/ec2_facts +++ b/library/cloud/ec2_facts @@ -71,7 +71,11 @@ class Ec2Metadata(object): def _fetch(self, url): (response, info) = fetch_url(self.module, url, force=True) - return response.read() + if response: + data = response.read() + else: + data = None + return data def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): new_fields = {} From 906e59d8a8eb4b5b7f6e6b5580019ae015a34f8b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 11 Mar 2014 22:13:29 -0400 Subject: [PATCH 287/772] added new role_names variable that exposes current roles list --- lib/ansible/playbook/play.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index af66ee25746..cd38a540781 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -134,6 +134,7 @@ class Play(object): '("su", "su_user") cannot be used together') load_vars = {} + load_vars['role_names'] = ds.get('role_names',[]) load_vars['playbook_dir'] = self.basedir if self.playbook.inventory.basedir() is not None: load_vars['inventory_dir'] = self.playbook.inventory.basedir() @@ -356,6 +357,7 @@ class Play(object): new_tasks.append(dict(meta='flush_handlers')) roles = self._build_role_dependencies(roles, [], self.vars) + role_names = [] for (role,role_path,role_vars,default_vars) in roles: # special vars must be extracted from the dict to the included tasks @@ -388,6 +390,7 @@ class Play(object): else: role_name = role + role_names.append(role_name) if os.path.isfile(task): nt = dict(include=pipes.quote(task), vars=role_vars, default_vars=default_vars, role_name=role_name) for k in special_keys: @@ -434,6 +437,7 @@ class Play(object): ds['tasks'] = new_tasks ds['handlers'] = new_handlers ds['vars_files'] = new_vars_files + ds['role_names'] = role_names self.default_vars = self._load_role_defaults(defaults_files) From 463a1c21ea8e011142513034df9b7a1340c5eb6f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 11 Mar 2014 22:50:05 -0400 Subject: [PATCH 288/772] now stat skips md5 if file isn't readable --- library/files/stat | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/library/files/stat b/library/files/stat index 2839ca8e06f..8c717a395c4 100644 --- a/library/files/stat +++ b/library/files/stat @@ -132,8 +132,9 @@ def main(): if S_ISLNK(mode): d['lnk_source'] = os.path.realpath(path) - if S_ISREG(mode) and get_md5: - d['md5'] = module.md5(path) + if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK): + d['md5'] = module.md5(path) + try: pw = pwd.getpwuid(st.st_uid) From e40fd9bb0d21ea00965be7d811313dc347733a30 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 22:36:44 -0500 Subject: [PATCH 289/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/ec2_vpc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 9b9fb95a0b2..74880bdb2a3 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -491,13 +491,13 @@ def main(): argument_spec.update(dict( cidr_block = dict(), instance_tenancy = dict(choices=['default', 'dedicated'], default='default'), - wait = dict(choices=BOOLEANS, default=False), + wait = dict(type='bool', default=False), wait_timeout = dict(default=300), - dns_support = dict(choices=BOOLEANS, default=True), - dns_hostnames = dict(choices=BOOLEANS, default=True), + dns_support = dict(type='bool', default=True), + dns_hostnames = dict(type='bool', default=True), subnets = dict(type='list'), vpc_id = dict(), - internet_gateway = dict(choices=BOOLEANS, default=False), + internet_gateway = dict(type='bool', default=False), route_tables = dict(type='list'), state = dict(choices=['present', 'absent'], default='present'), ) From 1ea5b04e05254d388877be0d0c867b2a1dc0af5d Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 22:39:42 -0500 Subject: [PATCH 290/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/gce | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/gce b/library/cloud/gce index b14ce8996da..2d95c8143bc 100755 --- a/library/cloud/gce +++ b/library/cloud/gce @@ -351,7 +351,7 @@ def main(): metadata = dict(), name = dict(), network = dict(default='default'), - persistent_boot_disk = dict(type='bool', choices=BOOLEANS, default=False), + persistent_boot_disk = dict(type='bool', default=False), state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), tags = dict(type='list'), From 05a1883cb9f8a0efa4cde4b798218a4ed0c13be0 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 22:43:01 -0500 Subject: [PATCH 291/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/quantum_subnet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/quantum_subnet b/library/cloud/quantum_subnet index 489ebb3440c..53cf5d32d15 100644 --- a/library/cloud/quantum_subnet +++ b/library/cloud/quantum_subnet @@ -259,7 +259,7 @@ def main(): tenant_name = dict(default=None), state = dict(default='present', choices=['absent', 'present']), ip_version = dict(default='4', choices=['4', '6']), - enable_dhcp = dict(default='true', choices=BOOLEANS), + enable_dhcp = dict(default='true', type='bool'), gateway_ip = dict(default=None), dns_nameservers = dict(default=None), allocation_pool_start = dict(default=None), From 86adb60a82ad55166428a3bd468bd150fb4b6998 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 22:46:53 -0500 Subject: [PATCH 292/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/rax | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/rax b/library/cloud/rax index 230f80df5e2..c566206b403 100644 --- a/library/cloud/rax +++ b/library/cloud/rax @@ -642,11 +642,11 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - auto_increment=dict(choices=BOOLEANS, default=True, type='bool'), + auto_increment=dict(default=True, type='bool'), count=dict(default=1, type='int'), count_offset=dict(default=1, type='int'), disk_config=dict(choices=['auto', 'manual']), - exact_count=dict(choices=BOOLEANS, default=False, type='bool'), + exact_count=dict(default=False, type='bool'), files=dict(type='dict', default={}), flavor=dict(), group=dict(), @@ -658,7 +658,7 @@ def main(): networks=dict(type='list', default=['public', 'private']), service=dict(), state=dict(default='present', choices=['present', 'absent']), - wait=dict(choices=BOOLEANS, default=False, type='bool'), + wait=dict(default=False, type='bool'), wait_timeout=dict(default=300), ) ) From 3121a7f3ecda26948c5198099da3347ecfb5d86c Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 22:50:03 -0500 Subject: [PATCH 293/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/rax_files | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/rax_files b/library/cloud/rax_files index 564cdb578d6..d2958c2054a 100644 --- a/library/cloud/rax_files +++ b/library/cloud/rax_files @@ -337,11 +337,11 @@ def main(): container=dict(), state=dict(choices=['present', 'absent', 'list'], default='present'), meta=dict(type='dict', default=dict()), - clear_meta=dict(choices=BOOLEANS, default=False, type='bool'), + clear_meta=dict(default=False, type='bool'), type=dict(choices=['container', 'meta'], default='container'), ttl=dict(type='int'), - public=dict(choices=BOOLEANS, default=False, type='bool'), - private=dict(choices=BOOLEANS, default=False, type='bool'), + public=dict(default=False, type='bool'), + private=dict(default=False, type='bool'), web_index=dict(), web_error=dict() ) From c3dc195362a2ee18a658fc25f9203dc95baf4771 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 22:53:24 -0500 Subject: [PATCH 294/772] Bulk update of choices=BOOLEANS to type='bool' --- library/cloud/rax_files_objects | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/rax_files_objects b/library/cloud/rax_files_objects index b628ff14027..0b733487714 100644 --- a/library/cloud/rax_files_objects +++ b/library/cloud/rax_files_objects @@ -572,8 +572,8 @@ def main(): method=dict(default='get', choices=['put', 'get', 'delete']), type=dict(default='file', choices=['file', 'meta']), meta=dict(type='dict', default=dict()), - clear_meta=dict(choices=BOOLEANS, default=False, type='bool'), - structure=dict(choices=BOOLEANS, default=True, type='bool'), + clear_meta=dict(default=False, type='bool'), + structure=dict(default=True, type='bool'), expires=dict(type='int'), ) ) From c2bf201bceeca96c7dac6811a9ccaabe31424af4 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 22:56:51 -0500 Subject: [PATCH 295/772] Bulk update of choices=BOOLEANS to type='bool' --- library/database/postgresql_user | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/database/postgresql_user b/library/database/postgresql_user index b6383006cb4..1dda1a6dc57 100644 --- a/library/database/postgresql_user +++ b/library/database/postgresql_user @@ -443,9 +443,9 @@ def main(): priv=dict(default=None), db=dict(default=''), port=dict(default='5432'), - fail_on_user=dict(type='bool', choices=BOOLEANS, default='yes'), + fail_on_user=dict(type='bool', default='yes'), role_attr_flags=dict(default=''), - encrypted=dict(type='bool', choices=BOOLEANS, default='no'), + encrypted=dict(type='bool', default='no'), expires=dict(default=None) ), supports_check_mode = True From c1af272ba175a02ef3370e828734fae1977ce2ad Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Tue, 11 Mar 2014 23:04:19 -0500 Subject: [PATCH 296/772] Bulk update of choices=BOOLEANS to type='bool' --- library/web_infrastructure/django_manage | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/web_infrastructure/django_manage b/library/web_infrastructure/django_manage index b02a9398f52..509bd404493 100644 --- a/library/web_infrastructure/django_manage +++ b/library/web_infrastructure/django_manage @@ -203,13 +203,13 @@ def main(): apps = dict(default=None, required=False), cache_table = dict(default=None, required=False), database = dict(default=None, required=False), - failfast = dict(default='no', required=False, choices=BOOLEANS, aliases=['fail_fast']), + failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']), fixtures = dict(default=None, required=False), liveserver = dict(default=None, required=False, aliases=['live_server']), testrunner = dict(default=None, required=False, aliases=['test_runner']), - skip = dict(default=None, required=False, choices=BOOLEANS), - merge = dict(default=None, required=False, choices=BOOLEANS), - link = dict(default=None, required=False, choices=BOOLEANS), + skip = dict(default=None, required=False, type='bool'), + merge = dict(default=None, required=False, type='bool'), + link = dict(default=None, required=False, type='bool'), ), ) From e4ad97b9186338c5aa683716d07ca305fd815185 Mon Sep 17 00:00:00 2001 From: Andy Trevorah Date: Wed, 12 Mar 2014 11:15:27 +0000 Subject: [PATCH 297/772] reworked apt-repository auto-install to be like yum --- library/packaging/apt_repository | 49 ++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index 34cdc6f4c96..a55fa07a247 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -95,6 +95,26 @@ except ImportError: VALID_SOURCE_TYPES = ('deb', 'deb-src') +def install_python_apt(module): + + if not module.check_mode: + apt_get_path = module.get_bin_path('apt-get') + if apt_get_path: + rc, so, se = module.run_command('%s update && %s install python-apt -y -q' % (apt_get_path, apt_get_path)) + if rc == 0: + global apt, apt_pkg + import apt + import apt_pkg + +def install_python_pycurl(module): + + if not module.check_mode: + apt_get_path = module.get_bin_path('apt-get') + if apt_get_path: + rc, so, se = module.run_command('%s update && %s install python-pycurl -y -q' % (apt_get_path, apt_get_path)) + if rc == 0: + global pycurl + import pycurl class CurlCallback: def __init__(self): @@ -361,25 +381,24 @@ def main(): repo=dict(required=True), state=dict(choices=['present', 'absent'], default='present'), update_cache = dict(aliases=['update-cache'], type='bool', default='yes'), + # this should not be needed, but exists as a failsafe + install_python_apt=dict(required=False, default="yes", type='bool'), + # this should not be needed, but exists as a failsafe + install_python_pycurl=dict(required=False, default="yes", type='bool'), ), supports_check_mode=True, ) - if not HAVE_PYTHON_APT: - try: - module.run_command('apt-get update && apt-get install python-apt -y -q') - global apt, apt_pkg - import apt - import apt_pkg - except: - module.fail_json(msg='Could not import python modules: apt, apt_pkg. Please install python-apt package.') - - if not HAVE_PYCURL: - module.fail_json(msg='Could not import python modules: pycurl. Please install python-pycurl package.') - - repo = module.params['repo'] - state = module.params['state'] - update_cache = module.params['update_cache'] + params = module.params + if params['install_python_apt'] and not HAVE_PYTHON_APT and not module.check_mode: + install_python_apt(module) + + if params['install_python_pycurl'] and not HAVE_PYCURL and not module.check_mode: + install_python_pycurl(module) + + repo = params['repo'] + state = params['state'] + update_cache = params['update_cache'] sourceslist = None if isinstance(distro, aptsources.distro.UbuntuDistribution): From 551cb1892b32e8461d0d4ce556454528722f473f Mon Sep 17 00:00:00 2001 From: James Laska Date: Thu, 27 Feb 2014 11:44:13 -0500 Subject: [PATCH 298/772] Always run cleanup after running cloud_integration tests --- test/integration/Makefile | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 7cdae607df0..33ffc3a969b 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -15,14 +15,27 @@ test_hash: cloud: amazon rackspace +cloud_cleanup: amazon_cleanup rackspace_cleanup + +amazon_cleanup: + python cleanup_ec2.py -y + +rackspace_cleanup: + @echo "FIXME - cleanup_rax.py not yet implemented" + @#python cleanup_rax.py -y + credentials.yml: @echo "No credentials.yml file found. A file named 'credentials.yml' is needed to provide credentials needed to run cloud tests." @exit 1 amazon: credentials.yml - ansible-playbook amazon.yml -i inventory -e @integration_config.yml -e @credentials.yml -v $(TEST_FLAGS) - @# FIXME - Cleanup won't run if the previous tests fail - python cleanup_ec2.py -y + ansible-playbook amazon.yml -i inventory -e @integration_config.yml -e @credentials.yml -v $(TEST_FLAGS) ; \ + RC=$$? ; \ + make amazon_cleanup ; \ + exit $$RC; rackspace: credentials.yml - ansible-playbook rackspace.yml -i inventory -e @integration_config.yml -e @credentials.yml -v $(TEST_FLAGS) + ansible-playbook rackspace.yml -i inventory -e @integration_config.yml -e @credentials.yml -v $(TEST_FLAGS) ; \ + RC=$$? ; \ + make rackspace_cleanup ; \ + exit $$RC; From 2a723e908eb3eefe05f4319a0fa7ec0d12dd31e2 Mon Sep 17 00:00:00 2001 From: James Laska Date: Wed, 12 Mar 2014 09:04:42 -0400 Subject: [PATCH 299/772] Fix issue#6430 by instantiating Network() with expected arguments --- library/system/setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/setup b/library/system/setup index 5ae5ece8bad..b3d6f5e61f4 100644 --- a/library/system/setup +++ b/library/system/setup @@ -2297,7 +2297,7 @@ def ansible_facts(module): facts = {} facts.update(Facts().populate()) facts.update(Hardware().populate()) - facts.update(Network(module).populate()) + facts.update(Network().populate()) facts.update(Virtual().populate()) return facts From c8db3c2db9a634e3e5811f293b07976ef6cfebc2 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 09:23:13 -0400 Subject: [PATCH 300/772] Revert "Fix issue#6430 by instantiating Network() with expected arguments" This reverts commit 2a723e908eb3eefe05f4319a0fa7ec0d12dd31e2. --- library/system/setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/setup b/library/system/setup index b3d6f5e61f4..5ae5ece8bad 100644 --- a/library/system/setup +++ b/library/system/setup @@ -2297,7 +2297,7 @@ def ansible_facts(module): facts = {} facts.update(Facts().populate()) facts.update(Hardware().populate()) - facts.update(Network().populate()) + facts.update(Network(module).populate()) facts.update(Virtual().populate()) return facts From e4805fe411fdbbd15310351f19e8d06dc73bde88 Mon Sep 17 00:00:00 2001 From: Ryan Date: Tue, 11 Mar 2014 16:59:13 -0400 Subject: [PATCH 301/772] add module param to Network & sub-classes add module param to Network and sub-classes of Network, so that ansible 1.5.1 changes to LinuxNetwork apply to all Network classes. --- library/system/setup | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/library/system/setup b/library/system/setup index 5ae5ece8bad..a05699f082e 100644 --- a/library/system/setup +++ b/library/system/setup @@ -1415,7 +1415,8 @@ class Network(Facts): subclass = sc return super(cls, subclass).__new__(subclass, *arguments, **keyword) - def __init__(self): + def __init__(self, module): + self.module = module Facts.__init__(self) def populate(self): @@ -1432,11 +1433,10 @@ class LinuxNetwork(Network): platform = 'Linux' def __init__(self, module): - self.module = module - Network.__init__(self) + Network.__init__(self, module) def populate(self): - ip_path = module.get_bin_path('ip') + ip_path = self.module.get_bin_path('ip') if ip_path is None: return self.facts default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path) @@ -1652,8 +1652,8 @@ class GenericBsdIfconfigNetwork(Network): """ platform = 'Generic_BSD_Ifconfig' - def __init__(self): - Network.__init__(self) + def __init__(self, module): + Network.__init__(self, module) def populate(self): From 0e38f5dfdce83d839e63f40880ffd24aaa4ef1bc Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 12 Mar 2014 09:38:20 -0400 Subject: [PATCH 302/772] Check for hash availability during vault operations --- lib/ansible/utils/vault.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 6a714fcc85d..62b082a9af4 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -182,7 +182,7 @@ class VaultEditor(object): def create_file(self): """ create a new encrypted file """ - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) if os.path.isfile(self.filename): @@ -199,7 +199,7 @@ class VaultEditor(object): def decrypt_file(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) if not os.path.isfile(self.filename): @@ -215,7 +215,7 @@ class VaultEditor(object): def edit_file(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) # decrypt to tmpfile @@ -245,7 +245,7 @@ class VaultEditor(object): def encrypt_file(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) if not os.path.isfile(self.filename): @@ -262,7 +262,7 @@ class VaultEditor(object): def rekey_file(self, new_password): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) # decrypt @@ -420,6 +420,11 @@ class VaultAES256(object): # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html + def __init__(self): + + if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + def gen_key_initctr(self, password, salt): # 16 for AES 128, 32 for AES256 keylength = 32 @@ -432,8 +437,6 @@ class VaultAES256(object): # make two keys and one iv pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest() - if not HAS_PBKDF2: - raise errors.AnsibleError(CRYPTO_UPGRADE) derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, count=10000, prf=pbkdf2_prf) @@ -460,8 +463,6 @@ class VaultAES256(object): # 1) nbits (integer) - Length of the counter, in bits. # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr - if not HAS_COUNTER: - raise errors.AnsibleError(CRYPTO_UPGRADE) ctr = Counter.new(128, initial_value=long(iv, 16)) # AES.new PARAMETERS @@ -497,8 +498,6 @@ class VaultAES256(object): return None # SET THE COUNTER AND THE CIPHER - if not HAS_COUNTER: - raise errors.AnsibleError(CRYPTO_UPGRADE) ctr = Counter.new(128, initial_value=long(iv, 16)) cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) From 0b0ca9573185c10849ca9b820637b217303821d8 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 10:10:45 -0400 Subject: [PATCH 303/772] Expand environment variables and tildes passed to commands generically across the board. --- lib/ansible/module_utils/basic.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 6e47dd4560d..8bdfb0b976e 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1018,6 +1018,9 @@ class AnsibleModule(object): msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) + # expand things like $HOME and ~ + args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ] + rc = 0 msg = None st_in = None From 804e4166c8a8eb4f881f6fb0ba07a30b0315715c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 09:19:35 -0500 Subject: [PATCH 304/772] Rewriting ssl validation to try multiple certs found in paths Previously, the function checked only for a single CA root cert, however some distributions may have multiple certs in a directory. This will now try any .crt or .pem file contained within several common paths for each platform. Fixes #6412 --- lib/ansible/module_utils/urls.py | 83 ++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 36 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index f251c6b407f..878499ce682 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -83,53 +83,64 @@ class SSLValidationHandler(urllib2.BaseHandler): self.port = port self.ca_cert = ca_cert - def get_ca_cert(self): + def get_ca_certs(self): # tries to find a valid CA cert in one of the # standard locations for the current distribution - if self.ca_cert and os.path.exists(self.ca_cert): - # the user provided a custom CA cert (ie. one they - # uploaded themselves), so use it - return self.ca_cert - - ca_cert = None + ca_certs = [] + paths_checked = [] platform = get_platform() distribution = get_distribution() + + if self.ca_cert: + # the user provided a custom CA cert (ie. one they + # uploaded themselves), so add it to the list first + ca_certs.append(self.ca_cert) + + # build a list of paths to check for .crt/.pem files + # based on the platform type + paths_checked.append('/etc/ssl/certs') if platform == 'Linux': - if distribution in ('Fedora',): - ca_cert = '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem' - elif distribution in ('RHEL','CentOS','ScientificLinux'): - ca_cert = '/etc/pki/tls/certs/ca-bundle.crt' - elif distribution in ('Ubuntu','Debian'): - ca_cert = '/usr/share/ca-certificates/cacert.org/cacert.org.crt' + paths_checked.append('/etc/pki/ca-trust/extracted/pem') + paths_checked.append('/etc/pki/tls/certs') + paths_checked.append('/usr/share/ca-certificates/cacert.org') elif platform == 'FreeBSD': - ca_cert = '/usr/local/share/certs/ca-root.crt' + paths_checked.append('/usr/local/share/certs') elif platform == 'OpenBSD': - ca_cert = '/etc/ssl/cert.pem' + paths_checked.append('/etc/ssl') elif platform == 'NetBSD': - ca_cert = '/etc/openssl/certs/ca-cert.pem' - elif platform == 'SunOS': - # FIXME? - pass - elif platform == 'AIX': - # FIXME? - pass - - if ca_cert and os.path.exists(ca_cert): - return ca_cert - elif os.path.exists('/etc/ansible/ca-cert.pem'): - # fall back to a user-deployed cert in a standard - # location if the OS platform one is not available - return '/etc/ansible/ca-cert.pem' - else: - # CA cert isn't available, no validation - return None + ca_certs.append('/etc/openssl/certs') + + # fall back to a user-deployed cert in a standard + # location if the OS platform one is not available + paths_checked.append('/etc/ansible') + + for path in paths_checked: + if os.path.exists(path) and os.path.isdir(path): + dir_contents = os.listdir(path) + for f in dir_contents: + full_path = os.path.join(path, f) + if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'): + ca_certs.append(full_path) + + return (ca_certs, paths_checked) def http_request(self, req): - try: - server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=self.get_ca_cert()) - except ssl.SSLError: - self.module.fail_json(msg='failed to validate the SSL certificate for %s:%s. You can use validate_certs=no, however this is unsafe and not recommended' % (self.hostname, self.port)) + ca_certs, paths_checked = self.get_ca_certs() + if len(ca_certs) > 0: + for ca_cert in ca_certs: + try: + server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=ca_cert) + return req + except ssl.SSLError: + # try the next one + pass + # fail if we tried all of the certs but none worked + self.module.fail_json(msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \ + 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ + 'Paths checked for this platform: %s' % ", ".join(paths_checked)) + # if no CA certs were found, we just fall through + # to here and return the request with no SSL validation return req https_request = http_request From 7f38cff9897a3b6309969c6f7cad23bbdf1a2043 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 09:33:19 -0500 Subject: [PATCH 305/772] Remove unused code from get_ca_certs() function --- lib/ansible/module_utils/urls.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 878499ce682..2a484a89d60 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -77,11 +77,10 @@ class SSLValidationHandler(urllib2.BaseHandler): http://techknack.net/python-urllib2-handlers/ ''' - def __init__(self, module, hostname, port, ca_cert=None): + def __init__(self, module, hostname, port): self.module = module self.hostname = hostname self.port = port - self.ca_cert = ca_cert def get_ca_certs(self): # tries to find a valid CA cert in one of the @@ -92,11 +91,6 @@ class SSLValidationHandler(urllib2.BaseHandler): platform = get_platform() distribution = get_distribution() - if self.ca_cert: - # the user provided a custom CA cert (ie. one they - # uploaded themselves), so add it to the list first - ca_certs.append(self.ca_cert) - # build a list of paths to check for .crt/.pem files # based on the platform type paths_checked.append('/etc/ssl/certs') From cfabc2e28a485f651d4e334bfdf7961d8e3464c6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 10:55:54 -0400 Subject: [PATCH 306/772] module.run_command is intended to bypass the shell here, so can't do ">>" --- lib/ansible/module_utils/basic.py | 6 ++++++ lib/ansible/module_utils/known_hosts.py | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8bdfb0b976e..09bdde6cd30 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1089,6 +1089,12 @@ class AnsibleModule(object): self.fail_json(cmd=clean_args, rc=rc, stdout=out, stderr=err, msg=msg) return (rc, out, err) + def append_to_file(self, filename, str): + filename = os.path.expandvars(os.path.expanduser(filename)) + fh = open(filename, 'a') + fh.write(str) + fh.close() + def pretty_bytes(self,size): ranges = ( (1<<70L, 'ZB'), diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 36f5b87fff5..8dc1f3267b7 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -91,8 +91,10 @@ def add_host_key(module, fqdn, key_type="rsa"): if not os.path.exists(os.path.expanduser("~/.ssh/")): module.fail_json(msg="%s does not exist" % os.path.expanduser("~/.ssh/")) - this_cmd = "%s -t %s %s >> ~/.ssh/known_hosts" % (keyscan_cmd, key_type, fqdn) + this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) + rc, out, err = module.run_command(this_cmd) + module.append_to_file("~/.ssh/known_hosts", out) return rc, out, err From a9017af2bb648930e27bab52de12f9983411778c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 10:19:22 -0500 Subject: [PATCH 307/772] Adding validate_certs to all modules that use fetch_url --- lib/ansible/module_utils/urls.py | 5 ++++- library/cloud/ec2_facts | 10 +++++++++- library/database/riak | 11 ++++++++++- library/monitoring/boundary_meter | 9 +++++++++ library/monitoring/datadog_event | 11 ++++++++++- library/monitoring/newrelic_deployment | 9 +++++++++ library/monitoring/pagerduty | 12 +++++++++++- library/net_infrastructure/dnsmadeeasy | 10 ++++++++++ library/notification/flowdock | 9 +++++++++ library/notification/grove | 9 +++++++++ library/notification/hipchat | 9 +++++++++ 11 files changed, 99 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 2a484a89d60..9f15d17f718 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -155,7 +155,7 @@ def url_argument_spec(): def fetch_url(module, url, data=None, headers=None, method=None, - use_proxy=False, validate_certs=True, force=False, last_mod_time=None, timeout=10): + use_proxy=False, force=False, last_mod_time=None, timeout=10): ''' Fetches a file from an HTTP/FTP server using urllib2 ''' @@ -171,6 +171,9 @@ def fetch_url(module, url, data=None, headers=None, method=None, handlers = [] info = dict(url=url) + # Get validate_certs from the module params + validate_certs = module.params.get('validate_certs', True) + parsed = urlparse.urlparse(url) if parsed[0] == 'https': if not HAS_SSL and validate_certs: diff --git a/library/cloud/ec2_facts b/library/cloud/ec2_facts index 09c9d761ef7..3fade4d1a05 100644 --- a/library/cloud/ec2_facts +++ b/library/cloud/ec2_facts @@ -21,7 +21,15 @@ DOCUMENTATION = ''' module: ec2_facts short_description: Gathers facts about remote hosts within ec2 (aws) version_added: "1.0" -options: {} +options: + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 description: - This module fetches data from the metadata servers in ec2 (aws). Eucalyptus cloud provides a similar service and this module should diff --git a/library/database/riak b/library/database/riak index e0a7552f0ae..2ab51046af4 100644 --- a/library/database/riak +++ b/library/database/riak @@ -73,6 +73,14 @@ options: default: None aliases: [] choices: ['kv'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 ''' EXAMPLES = ''' @@ -117,7 +125,8 @@ def main(): wait_for_ring=dict(default=False, type='int'), wait_for_service=dict( required=False, default=None, choices=['kv']) - ) + ), + validate_certs = dict(default='yes', type='bool'), ) diff --git a/library/monitoring/boundary_meter b/library/monitoring/boundary_meter index 3c9f90a4ce9..da739d4306f 100644 --- a/library/monitoring/boundary_meter +++ b/library/monitoring/boundary_meter @@ -58,6 +58,14 @@ options: description: - Organizations boundary API KEY required: true + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 notes: - This module does not yet support boundary tags. @@ -221,6 +229,7 @@ def main(): name=dict(required=False), apikey=dict(required=True), apiid=dict(required=True), + validate_certs = dict(default='yes', type='bool'), ) ) diff --git a/library/monitoring/datadog_event b/library/monitoring/datadog_event index 878aee6d343..5d38dd4c31d 100644 --- a/library/monitoring/datadog_event +++ b/library/monitoring/datadog_event @@ -54,6 +54,14 @@ options: description: ["An arbitrary string to use for aggregation."] required: false default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 ''' EXAMPLES = ''' @@ -89,7 +97,8 @@ def main(): choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps', 'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric', 'capistrano'] - ) + ), + validate_certs = dict(default='yes', type='bool'), ) ) diff --git a/library/monitoring/newrelic_deployment b/library/monitoring/newrelic_deployment index 08132722e1d..93d55832fd3 100644 --- a/library/monitoring/newrelic_deployment +++ b/library/monitoring/newrelic_deployment @@ -63,6 +63,14 @@ options: description: - The environment for this deployment required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 # informational: requirements for nodes requirements: [ urllib, urllib2 ] @@ -92,6 +100,7 @@ def main(): user=dict(required=False), appname=dict(required=False), environment=dict(required=False), + validate_certs = dict(default='yes', type='bool'), ), supports_check_mode=True ) diff --git a/library/monitoring/pagerduty b/library/monitoring/pagerduty index 9a7f21d0779..2c89b8f4512 100644 --- a/library/monitoring/pagerduty +++ b/library/monitoring/pagerduty @@ -60,6 +60,15 @@ options: default: Created by Ansible choices: [] aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + notes: - This module does not yet have support to end maintenance windows. ''' @@ -135,7 +144,8 @@ def main(): passwd=dict(required=True), service=dict(required=False), hours=dict(default='1', required=False), - desc=dict(default='Created by Ansible', required=False) + desc=dict(default='Created by Ansible', required=False), + validate_certs = dict(default='yes', type='bool'), ) ) diff --git a/library/net_infrastructure/dnsmadeeasy b/library/net_infrastructure/dnsmadeeasy index 9e2c14480eb..6b4fe1dcdb5 100644 --- a/library/net_infrastructure/dnsmadeeasy +++ b/library/net_infrastructure/dnsmadeeasy @@ -73,6 +73,15 @@ options: choices: [ 'present', 'absent' ] default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + notes: - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. @@ -239,6 +248,7 @@ def main(): 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), record_value=dict(required=False), record_ttl=dict(required=False, default=1800, type='int'), + validate_certs = dict(default='yes', type='bool'), ), required_together=( ['record_value', 'record_ttl', 'record_type'] diff --git a/library/notification/flowdock b/library/notification/flowdock index 32817d756dc..009487fb438 100644 --- a/library/notification/flowdock +++ b/library/notification/flowdock @@ -76,6 +76,14 @@ options: description: - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 # informational: requirements for nodes requirements: [ urllib, urllib2 ] @@ -116,6 +124,7 @@ def main(): project=dict(required=False), tags=dict(required=False), link=dict(required=False), + validate_certs = dict(default='yes', type='bool'), ), supports_check_mode=True ) diff --git a/library/notification/grove b/library/notification/grove index 1e2132cfb73..e6bf241bdaa 100644 --- a/library/notification/grove +++ b/library/notification/grove @@ -31,6 +31,14 @@ options: description: - Icon for the service required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 author: Jonas Pfenniger ''' @@ -71,6 +79,7 @@ def main(): service = dict(type='str', default='ansible'), url = dict(type='str', default=None), icon_url = dict(type='str', default=None), + validate_certs = dict(default='yes', type='bool'), ) ) diff --git a/library/notification/hipchat b/library/notification/hipchat index c4b36d64ce7..2107ac021b3 100644 --- a/library/notification/hipchat +++ b/library/notification/hipchat @@ -46,6 +46,14 @@ options: required: false default: 'yes' choices: [ "yes", "no" ] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 # informational: requirements for nodes requirements: [ urllib, urllib2 ] @@ -104,6 +112,7 @@ def main(): "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), + validate_certs = dict(default='yes', type='bool'), ), supports_check_mode=True ) From d8a81c488e022e262fa99f4684dbfa2dda5df91a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 10:31:01 -0500 Subject: [PATCH 308/772] Remove validate_certs parameter from fetch_url calls --- library/monitoring/airbrake_deployment | 2 +- library/net_infrastructure/netscaler | 2 +- library/network/get_url | 7 +++---- library/packaging/apt_key | 2 +- library/packaging/rpm_key | 2 +- library/source_control/github_hooks | 6 +++--- 6 files changed, 10 insertions(+), 11 deletions(-) diff --git a/library/monitoring/airbrake_deployment b/library/monitoring/airbrake_deployment index 55d6017e4ea..89d62deda5e 100644 --- a/library/monitoring/airbrake_deployment +++ b/library/monitoring/airbrake_deployment @@ -115,7 +115,7 @@ def main(): # Send the data to airbrake data = urllib.urlencode(params) - response, info = fetch_url(module, url, data=data, validate_certs=module.params['validate_certs']) + response, info = fetch_url(module, url, data=data) if info['status'] == 200: module.exit_json(changed=True) else: diff --git a/library/net_infrastructure/netscaler b/library/net_infrastructure/netscaler index 4756d90abdc..2a8881cf56f 100644 --- a/library/net_infrastructure/netscaler +++ b/library/net_infrastructure/netscaler @@ -122,7 +122,7 @@ class netscaler(object): 'Content-Type' : 'application/x-www-form-urlencoded', } - response, info = fetch_url(self.module, request_url, data=data_json, validate_certs=self.module.params['validate_certs']) + response, info = fetch_url(self.module, request_url, data=data_json) return json.load(response.read()) diff --git a/library/network/get_url b/library/network/get_url index c249c44049a..8f0ccb1686d 100644 --- a/library/network/get_url +++ b/library/network/get_url @@ -124,14 +124,14 @@ def url_filename(url): return 'index.html' return fn -def url_get(module, url, dest, use_proxy, last_mod_time, force, validate_certs): +def url_get(module, url, dest, use_proxy, last_mod_time, force): """ Download data from the url and store in a temporary file. Return (tempfile, info about the request) """ - rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, validate_certs=validate_certs) + rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time) if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) @@ -192,7 +192,6 @@ def main(): force = module.params['force'] sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] - validate_certs = module.params['validate_certs'] dest_is_dir = os.path.isdir(dest) last_mod_time = None @@ -207,7 +206,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, validate_certs) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force) # Now the request has completed, we can finally generate the final # destination file name from the info dict. diff --git a/library/packaging/apt_key b/library/packaging/apt_key index ff05bb93d1a..48442349ae1 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -140,7 +140,7 @@ def download_key(module, url): if url is None: module.fail_json(msg="needed a URL but was not specified") try: - rsp, info = fetch_url(module, url, validate_certs=module.params['validate_certs']) + rsp, info = fetch_url(module, url) return rsp.read() except Exception: module.fail_json(msg="error getting key id from url", traceback=format_exc()) diff --git a/library/packaging/rpm_key b/library/packaging/rpm_key index 9d85f30ac8b..8a695c786ff 100644 --- a/library/packaging/rpm_key +++ b/library/packaging/rpm_key @@ -123,7 +123,7 @@ class RpmKey: def fetch_key(self, url, maxbytes=MAXBYTES): """Downloads a key from url, returns a valid path to a gpg key""" try: - rsp, info = fetch_url(self.module, url, validate_certs=self.module.params['validate_certs']) + rsp, info = fetch_url(self.module, url) key = rsp.read(maxbytes) if not is_pubkey(key): self.module.fail_json(msg="Not a public key: %s" % url) diff --git a/library/source_control/github_hooks b/library/source_control/github_hooks index c5c5b648c7a..6a8d1ced935 100644 --- a/library/source_control/github_hooks +++ b/library/source_control/github_hooks @@ -75,7 +75,7 @@ def list(module, hookurl, oauthkey, repo, user): headers = { 'Authorization': 'Basic %s' % auth, } - response, info = fetch_url(module, url, headers=headers, validate_certs=module.params['validate_certs']) + response, info = fetch_url(module, url, headers=headers) if info['status'] != 200: return False, '' else: @@ -120,7 +120,7 @@ def create(module, hookurl, oauthkey, repo, user): headers = { 'Authorization': 'Basic %s' % auth, } - response, info = fetch_url(module, url, data=data, headers=headers, validate_certs=module.params['validate_certs']) + response, info = fetch_url(module, url, data=data, headers=headers) if info['status'] != 200: return 0, '[]' else: @@ -132,7 +132,7 @@ def delete(module, hookurl, oauthkey, repo, user, hookid): headers = { 'Authorization': 'Basic %s' % auth, } - response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE', validate_certs=module.params['validate_certs']) + response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE') return response.read() def main(): From 86d2ee4b978319242808e0416978f5a5908cce1c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 10:41:18 -0500 Subject: [PATCH 309/772] Don't append tags from roles to the plays tags list Fixes #6393 --- lib/ansible/playbook/play.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index af66ee25746..2289b0a4d3c 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -579,7 +579,6 @@ class Play(object): for x in results: if self.tags is not None: - self.tags = list(set(self.tags).union(set(x.tags))) x.tags.extend(self.tags) return results From 6d841d120eb3d73d539c11859164316d75663546 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 11:57:28 -0400 Subject: [PATCH 310/772] Don't process shell commands as arrays. --- lib/ansible/module_utils/basic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 09bdde6cd30..2d459873146 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1019,7 +1019,8 @@ class AnsibleModule(object): self.fail_json(rc=257, cmd=args, msg=msg) # expand things like $HOME and ~ - args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ] + if not shell: + args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ] rc = 0 msg = None From 154f123b7f8bd34f6d5db1e63537f9021f8822c3 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 12 Mar 2014 11:13:20 -0500 Subject: [PATCH 311/772] setup module: Mark unsafe commands as use_unsafe_shell=True. --- library/system/setup | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/library/system/setup b/library/system/setup index a05699f082e..c6583d42029 100644 --- a/library/system/setup +++ b/library/system/setup @@ -260,7 +260,7 @@ class Facts(object): self.facts['distribution_release'] = data[1] elif self.facts['system'] == 'HP-UX': self.facts['distribution'] = 'HP-UX' - rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'") + rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True) data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out) if data: self.facts['distribution_version'] = data.groups()[0] @@ -1254,31 +1254,31 @@ class HPUX(Hardware): def get_cpu_facts(self): if self.facts['architecture'] == '9000/800': - rc, out, err = module.run_command("ioscan -FkCprocessor|wc -l") + rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip()) #Working with machinfo mess elif self.facts['architecture'] == 'ia64': if self.facts['distribution_version'] == "B.11.23": - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep 'Number of CPUs'") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip().split('=')[1]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep 'processor family'") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True) self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip() - rc, out, err = module.run_command("ioscan -FkCprocessor|wc -l") + rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) self.facts['processor_cores'] = int(out.strip()) if self.facts['distribution_version'] == "B.11.31": #if machinfo return cores strings release B.11.31 > 1204 - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep core|wc -l") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True) if out.strip()== '0': - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Intel") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip().split(" ")[0]) #If hyperthreading is active divide cores by 2 - rc, out, err = module.run_command("/usr/sbin/psrset |grep LCPU") + rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True) data = re.sub(' +',' ',out).strip().split(' ') if len(data) == 1: hyperthreading = 'OFF' else: hyperthreading = data[1] - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep logical") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True) data = out.strip().split(" ") if hyperthreading == 'ON': self.facts['processor_cores'] = int(data[0])/2 @@ -1287,19 +1287,19 @@ class HPUX(Hardware): self.facts['processor_cores'] = self.facts['processor_count'] else: self.facts['processor_cores'] = int(data[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Intel |cut -d' ' -f4-") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True) self.facts['processor'] = out.strip() else: - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |egrep 'socket[s]?$' | tail -1") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip().split(" ")[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -e '[0-9] core' |tail -1") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True) self.facts['processor_cores'] = int(out.strip().split(" ")[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Intel") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) self.facts['processor'] = out.strip() def get_memory_facts(self): pagesize = 4096 - rc, out, err = module.run_command("/usr/bin/vmstat|tail -1") + rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True) data = int(re.sub(' +',' ',out).split(' ')[5].strip()) self.facts['memfree_mb'] = pagesize * data / 1024 / 1024 if self.facts['architecture'] == '9000/800': @@ -1307,12 +1307,12 @@ class HPUX(Hardware): data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip() self.facts['memtotal_mb'] = int(data) / 1024 else: - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Memory") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True) data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip() self.facts['memtotal_mb'] = int(data) rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q") self.facts['swaptotal_mb'] = int(out.strip()) - rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f |egrep '^dev|^fs'") + rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True) swap = 0 for line in out.strip().split('\n'): swap += int(re.sub(' +',' ',line).split(' ')[3].strip()) @@ -1322,7 +1322,7 @@ class HPUX(Hardware): rc, out, err = module.run_command("model") self.facts['model'] = out.strip() if self.facts['architecture'] == 'ia64': - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' |grep -v BMC") + rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True) self.facts['firmware_version'] = out.split(':')[1].strip() From 2bfaacd17063ed52ceca53f55861acb7ff655c66 Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Wed, 12 Mar 2014 18:21:53 +0100 Subject: [PATCH 312/772] Fix #5679 again after recent changes in core --- library/files/lineinfile | 5 ++--- .../roles/test_lineinfile/tasks/main.yml | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/library/files/lineinfile b/library/files/lineinfile index 73c9e88cb8c..bdc7b51ed90 100644 --- a/library/files/lineinfile +++ b/library/files/lineinfile @@ -350,9 +350,8 @@ def main(): if ins_bef is None and ins_aft is None: ins_aft = 'EOF' - # Replace the newline character with an actual newline. Don't replace - # escaped \\n, hence sub and not str.replace. - line = re.sub(r'\n', os.linesep, params['line']) + # Replace the newline character with an actual newline. + line = params['line'].decode('string_escape') present(module, dest, params['regexp'], line, ins_aft, ins_bef, create, backup, backrefs) diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index f59979473b9..b8974b7edca 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -209,3 +209,21 @@ that: - "result.stat.md5 == 'fef1d487711facfd7aa2c87d788c19d9'" + +- name: insert a multiple lines at the end of the file + lineinfile: dest={{output_dir}}/test.txt state=present line="This is a line\nwith \\\n character" insertafter="EOF" + register: result + +- name: assert that the multiple lines was inserted + assert: + that: + - "result.changed == true" + - "result.msg == 'line added'" + +- stat: path={{output_dir}}/test.txt + register: result + +- name: assert test md5 matches after insert the multiple lines + assert: + that: + - "result.stat.md5 == 'c2510d5bc8fdef8e752b8f8e74c784c2'" From 4173608b58ef7246b5ecd8788152bfd7635bc18f Mon Sep 17 00:00:00 2001 From: Patrick Smith Date: Wed, 12 Mar 2014 13:28:39 -0400 Subject: [PATCH 313/772] git: Execute git commands in `dest` directory --- library/source_control/git | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/source_control/git b/library/source_control/git index 4f885c94001..65c799a5df4 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -263,7 +263,7 @@ def get_remote_head(git_path, module, dest, version, remote, bare): def is_remote_tag(git_path, module, dest, remote, version): cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) - (rc, out, err) = module.run_command(cmd, check_rc=True) + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) if version in out: return True else: @@ -291,7 +291,7 @@ def get_tags(git_path, module, dest): def is_remote_branch(git_path, module, dest, remote, version): cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) - (rc, out, err) = module.run_command(cmd, check_rc=True) + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) if version in out: return True else: From d37f0c6d12e6ec9415ee305acc41e2e938cf42e2 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 14:11:13 -0400 Subject: [PATCH 314/772] Use same implementation as ssh.py for "is in host file" checks in module code, prevents git module from adding keys more than once. --- lib/ansible/module_utils/known_hosts.py | 61 +++++++++++++++++++------ 1 file changed, 46 insertions(+), 15 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 8dc1f3267b7..7ddaf69cebb 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -26,6 +26,9 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import hmac +HASHED_KEY_MAGIC = "|1|" + def add_git_host_key(module, url, accept_hostkey=True): """ idempotently add a git url hostkey """ @@ -58,28 +61,56 @@ def get_fqdn(repo_url): return result - def check_hostkey(module, fqdn): + return not not_in_host_file(module, fqdn) - """ use ssh-keygen to check if key is known """ +# this is a variant of code found in connection_plugins/paramiko.py and we should modify +# the paramiko code to import and use this. - result = False - keygen_cmd = module.get_bin_path('ssh-keygen', True) - this_cmd = keygen_cmd + " -H -F " + fqdn - rc, out, err = module.run_command(this_cmd) +def not_in_host_file(self, host): - if rc == 0 and out != "": - result = True + + if 'USER' in os.environ: + user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") else: - # Check the main system location - this_cmd = keygen_cmd + " -H -f /etc/ssh/ssh_known_hosts -F " + fqdn - rc, out, err = module.run_command(this_cmd) + user_host_file = "~/.ssh/known_hosts" + user_host_file = os.path.expanduser(user_host_file) + + host_file_list = [] + host_file_list.append(user_host_file) + host_file_list.append("/etc/ssh/ssh_known_hosts") + host_file_list.append("/etc/ssh/ssh_known_hosts2") + + hfiles_not_found = 0 + for hf in host_file_list: + if not os.path.exists(hf): + hfiles_not_found += 1 + continue + host_fh = open(hf) + data = host_fh.read() + host_fh.close() + for line in data.split("\n"): + if line is None or line.find(" ") == -1: + continue + tokens = line.split() + if tokens[0].find(HASHED_KEY_MAGIC) == 0: + # this is a hashed known host entry + try: + (kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2) + hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) + hash.update(host) + if hash.digest() == kn_host.decode('base64'): + return False + except: + # invalid hashed host key, skip it + continue + else: + # standard host file entry + if host in tokens[0]: + return False - if rc == 0: - if out != "": - result = True + return True - return result def add_host_key(module, fqdn, key_type="rsa"): From 2c7d58abe046e08a7b082e63d5699b7f2206ea6c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 13:44:24 -0500 Subject: [PATCH 315/772] Compile ca certs into a temp file to reduce number of attempts For those who may have a large number of certs found, this can reduce the number of ssl connections attempted. --- lib/ansible/module_utils/urls.py | 37 +++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 9f15d17f718..053dfc041c8 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -50,6 +50,7 @@ try: except: HAS_SSL=False +import tempfile class RequestWithMethod(urllib2.Request): ''' @@ -109,32 +110,42 @@ class SSLValidationHandler(urllib2.BaseHandler): # location if the OS platform one is not available paths_checked.append('/etc/ansible') + tmp_fd, tmp_path = tempfile.mkstemp() + + # for all of the paths, find any .crt or .pem files + # and compile them into single temp file for use + # in the ssl check to speed up the test for path in paths_checked: if os.path.exists(path) and os.path.isdir(path): dir_contents = os.listdir(path) for f in dir_contents: full_path = os.path.join(path, f) if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'): - ca_certs.append(full_path) + try: + cert_file = open(full_path, 'r') + os.write(tmp_fd, cert_file.read()) + cert_file.close() + except: + pass - return (ca_certs, paths_checked) + return (tmp_path, paths_checked) def http_request(self, req): - ca_certs, paths_checked = self.get_ca_certs() - if len(ca_certs) > 0: - for ca_cert in ca_certs: - try: - server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=ca_cert) - return req - except ssl.SSLError: - # try the next one - pass + tmp_ca_cert_path, paths_checked = self.get_ca_certs() + try: + server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=tmp_ca_cert_path) + except ssl.SSLError: # fail if we tried all of the certs but none worked self.module.fail_json(msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \ 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ 'Paths checked for this platform: %s' % ", ".join(paths_checked)) - # if no CA certs were found, we just fall through - # to here and return the request with no SSL validation + try: + # cleanup the temp file created, don't worry + # if it fails for some reason + os.remove(tmp_ca_cert_path) + except: + pass + return req https_request = http_request From bbf320fd22161614e425757a264952edf61493a4 Mon Sep 17 00:00:00 2001 From: Dan Koch Date: Wed, 12 Mar 2014 14:59:50 -0400 Subject: [PATCH 316/772] Work around for cwd stat problems with run_command and sudo This fixes issue #6443 --- lib/ansible/module_utils/basic.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 2d459873146..3eace12eeb4 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1074,6 +1074,8 @@ class AnsibleModule(object): try: + if cwd: + os.chdir(cwd) cmd = subprocess.Popen(args, **kwargs) if data: From 29c32890d5ca262eb05f9d1b72d5f8c5f5467d26 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 15:18:55 -0400 Subject: [PATCH 317/772] Add notes about module.run_command to coding guidelines. --- CODING_GUIDELINES.md | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/CODING_GUIDELINES.md b/CODING_GUIDELINES.md index 7860fb24814..1ba5d5035e4 100644 --- a/CODING_GUIDELINES.md +++ b/CODING_GUIDELINES.md @@ -66,8 +66,10 @@ Functions and Methods * In general, functions should not be 'too long' and should describe a meaningful amount of work * When code gets too nested, that's usually the sign the loop body could benefit from being a function + * Parts of our existing code are not the best examples of this at times. * Functions should have names that describe what they do, along with docstrings * Functions should be named with_underscores + * "Don't repeat yourself" is generally a good philosophy Variables ========= @@ -76,6 +78,15 @@ Variables * Ansible python code uses identifiers like 'ClassesLikeThis and variables_like_this * Module parameters should also use_underscores and not runtogether +Module Security +=============== + + * Modules must take steps to avoid passing user input from the shell and always check return codes + * always use module.run_command instead of subprocess or Popen or os.system -- this is mandatory + * if you use need the shell you must pass use_unsafe_shell=True to module.run_command + * if you do not need the shell, avoid using the shell + * any variables that can come from the user input with use_unsafe_shell=True must be wrapped by pipes.quote(x) + Misc Preferences ================ @@ -149,16 +160,19 @@ All contributions to the core repo should preserve original licenses and new con Module Documentation ==================== -All module pull requests must include a DOCUMENTATION docstring (YAML format, see other modules for examples) as well as an EXAMPLES docstring, which -is free form. +All module pull requests must include a DOCUMENTATION docstring (YAML format, +see other modules for examples) as well as an EXAMPLES docstring, which is free form. -When adding new modules, any new parameter must have a "version_added" attribute. When submitting a new module, the module should have a "version_added" -attribute in the pull request as well, set to the current development version. +When adding new modules, any new parameter must have a "version_added" attribute. +When submitting a new module, the module should have a "version_added" attribute in the +pull request as well, set to the current development version. Be sure to check grammar and spelling. -It's frequently the case that modules get submitted with YAML that isn't valid, so you can run "make webdocs" from the checkout to preview your module's documentation. -If it fails to build, take a look at your DOCUMENTATION string or you might have a Python syntax error in there too. +It's frequently the case that modules get submitted with YAML that isn't valid, +so you can run "make webdocs" from the checkout to preview your module's documentation. +If it fails to build, take a look at your DOCUMENTATION string +or you might have a Python syntax error in there too. Python Imports ============== From 60a7f573002b38deecc89ae834f7fb245510613f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 14:33:31 -0500 Subject: [PATCH 318/772] Make sure the cwd exists in run_command before trying to use it --- lib/ansible/module_utils/basic.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 3eace12eeb4..1988857610f 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1074,8 +1074,10 @@ class AnsibleModule(object): try: - if cwd: + # make sure we're in the right working directory + if cwd and os.path.isdir(cwd): os.chdir(cwd) + cmd = subprocess.Popen(args, **kwargs) if data: From 5c9dc33e415c7dc28db330099236fa2ffcdf009e Mon Sep 17 00:00:00 2001 From: James Laska Date: Wed, 12 Mar 2014 14:28:48 -0400 Subject: [PATCH 319/772] Additional test_git scenarios Includes coverage for accept_hostkey and additional scm URL formats. --- .../integration/roles/test_git/tasks/main.yml | 85 ++++++++++++++++--- 1 file changed, 71 insertions(+), 14 deletions(-) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index a7072d1ab52..d5b92c8366c 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -16,11 +16,15 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -- name: set where to extract the repo - set_fact: checkout_dir={{ output_dir }}/git - -- name: set what repo to use - set_fact: repo=https://github.com/jimi-c/test_role +- name: set role facts + set_fact: + checkout_dir: '{{ output_dir }}/git' + repo_format1: 'https://github.com/jimi-c/test_role' + repo_format2: 'git@github.com:jimi-c/test_role.git' + repo_format3: 'ssh://git@github.com/jimi-c/test_role.git' + known_host_files: + - "{{ lookup('env','HOME') }}/.ssh/known_hosts" + - '/etc/ssh/ssh_known_hosts' - name: clean out the output_dir shell: rm -rf {{ output_dir }}/* @@ -28,28 +32,26 @@ - name: verify that git is installed so this test can continue shell: which git +# +# Test repo=https://github.com/... +# + - name: initial checkout - git: repo={{ repo }} dest={{ checkout_dir }} + git: repo={{ repo_format1 }} dest={{ checkout_dir }} register: git_result -- debug: var=git_result - -- shell: ls ~/ansible_testing/git - - name: verify information about the initial clone assert: that: - "'before' in git_result" - "'after' in git_result" - "not git_result.before" - - "git_result.changed" + - "git_result.changed" - name: repeated checkout - git: repo={{ repo }} dest={{ checkout_dir }} + git: repo={{ repo_format1 }} dest={{ checkout_dir }} register: git_result2 -- debug: var=git_result2 - - name: check for tags stat: path={{ checkout_dir }}/.git/refs/tags register: tags @@ -74,6 +76,61 @@ that: - "not git_result2.changed" +# +# Test repo=git@github.com:/... +# Requires variable: github_ssh_private_key +# + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} +- name: remove known_host files + file: state=absent path={{ item }} + with_items: known_host_files +- name: checkout ssh://git@github.com repo without accept_hostkey (expected fail) + git: repo={{ repo_format2 }} dest={{ checkout_dir }} + register: git_result + ignore_errors: true +- assert: + that: + - 'git_result.failed' + - 'git_result.msg == "github.com has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module"' + +- name: checkout git@github.com repo with accept_hostkey (expected pass) + git: + repo: '{{ repo_format2 }}' + dest: '{{ checkout_dir }}' + accept_hostkey: true + key_file: '{{ github_ssh_private_key }}' + register: git_result + when: github_ssh_private_key is defined + +- assert: + that: + - 'git_result.changed' + when: not git_result|skipped + +# +# Test repo=ssh://git@github.com/... +# Requires variable: github_ssh_private_key +# + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: checkout ssh://git@github.com repo with accept_hostkey (expected pass) + git: + repo: '{{ repo_format3 }}' + dest: '{{ checkout_dir }}' + version: 'master' + accept_hostkey: false # should already have been accepted + key_file: '{{ github_ssh_private_key }}' + register: git_result + when: github_ssh_private_key is defined + +- assert: + that: + - 'git_result.changed' + when: not git_result|skipped From bc678e7d7b04b10e755042ff3df9a1d7920fdaea Mon Sep 17 00:00:00 2001 From: James Laska Date: Wed, 12 Mar 2014 14:37:15 -0400 Subject: [PATCH 320/772] Add credentials.template and support custom INVENTORY Testers may override the inventory and vars-file using the environment variables 'INVENTORY' and 'VARS_FILE'. --- test/integration/Makefile | 19 +++++++++++-------- test/integration/credentials.template | 7 +++++++ 2 files changed, 18 insertions(+), 8 deletions(-) create mode 100644 test/integration/credentials.template diff --git a/test/integration/Makefile b/test/integration/Makefile index 33ffc3a969b..7a4072d44ae 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -1,17 +1,20 @@ +INVENTORY ?= inventory +VARS_FILE ?= integration_config.yml + all: non_destructive destructive check_mode test_hash non_destructive: - ansible-playbook non_destructive.yml -i inventory -e @integration_config.yml -v $(TEST_FLAGS) + ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) destructive: - ansible-playbook destructive.yml -i inventory -e @integration_config.yml -v $(TEST_FLAGS) + ansible-playbook destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) check_mode: - ansible-playbook check_mode.yml -i inventory -e @integration_config.yml -v --check $(TEST_FLAGS) + ansible-playbook check_mode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --check $(TEST_FLAGS) test_hash: - ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i inventory -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' - ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i inventory -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' + ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' + ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' cloud: amazon rackspace @@ -25,17 +28,17 @@ rackspace_cleanup: @#python cleanup_rax.py -y credentials.yml: - @echo "No credentials.yml file found. A file named 'credentials.yml' is needed to provide credentials needed to run cloud tests." + @echo "No credentials.yml file found. A file named 'credentials.yml' is needed to provide credentials needed to run cloud tests. See sample 'credentials.template' file." @exit 1 amazon: credentials.yml - ansible-playbook amazon.yml -i inventory -e @integration_config.yml -e @credentials.yml -v $(TEST_FLAGS) ; \ + ansible-playbook amazon.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -v $(TEST_FLAGS) ; \ RC=$$? ; \ make amazon_cleanup ; \ exit $$RC; rackspace: credentials.yml - ansible-playbook rackspace.yml -i inventory -e @integration_config.yml -e @credentials.yml -v $(TEST_FLAGS) ; \ + ansible-playbook rackspace.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -v $(TEST_FLAGS) ; \ RC=$$? ; \ make rackspace_cleanup ; \ exit $$RC; diff --git a/test/integration/credentials.template b/test/integration/credentials.template new file mode 100644 index 00000000000..0ca34aff7c6 --- /dev/null +++ b/test/integration/credentials.template @@ -0,0 +1,7 @@ +--- +# AWS Credentials +ec2_access_key: FIXME +ec2_secret_key: FIXME + +# GITHUB Credentials +github_ssh_private_key: "{{ lookup('env','HOME') }}/.ssh/id_rsa" From 6010e748394d3fc01e2824bda2e6e3881068cb56 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 16:44:03 -0400 Subject: [PATCH 321/772] Various commits to enable no-shell or safe-shell usage, more to come. --- library/packaging/apt_repository | 5 ++++- library/packaging/cpanm | 29 +++++++++++++++-------------- library/packaging/gem | 2 +- library/packaging/macports | 6 ++++-- library/packaging/opkg | 3 ++- library/packaging/pacman | 6 +++--- 6 files changed, 29 insertions(+), 22 deletions(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index 4587d90ba78..7a19dabc167 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -352,7 +352,10 @@ def get_add_ppa_signing_key_callback(module): def _run_command(command): module.run_command(command, check_rc=True) - return _run_command if not module.check_mode else None + if module.check_mode: + return _run_command + else: + return None def main(): diff --git a/library/packaging/cpanm b/library/packaging/cpanm index 5f5ae98022f..1c73d2727c8 100644 --- a/library/packaging/cpanm +++ b/library/packaging/cpanm @@ -72,14 +72,17 @@ author: Franck Cuny def _is_package_installed(module, name, locallib, cpanm): cmd = "" if locallib: - cmd = "PERL5LIB={locallib}/lib/perl5".format(locallib=locallib) - cmd = "{cmd} perl -M{name} -e '1'".format(cmd=cmd, name=name) + os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib + cmd = "%s perl -M%s -e '1'" % (cmd, name) res, stdout, stderr = module.run_command(cmd, check_rc=False) - installed = True if res == 0 else False - return installed - + if res == 0 + return True + else + return False def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): + # this code should use "%s" like everything else and just return early but not fixing all of it now. + # don't copy stuff like this if from_path: cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path) else: @@ -111,21 +114,20 @@ def main(): required_one_of=[['name', 'from_path']], ) - cpanm = module.get_bin_path('cpanm', True) - - name = module.params['name'] + cpanm = module.get_bin_path('cpanm', True) + name = module.params['name'] from_path = module.params['from_path'] - notest = module.boolean(module.params.get('notest', False)) - locallib = module.params['locallib'] - mirror = module.params['mirror'] + notest = module.boolean(module.params.get('notest', False)) + locallib = module.params['locallib'] + mirror = module.params['mirror'] - changed = False + changed = False installed = _is_package_installed(module, name, locallib, cpanm) if not installed: out_cpanm = err_cpanm = '' - cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm) + cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm) rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) @@ -137,7 +139,6 @@ def main(): module.exit_json(changed=changed, binary=cpanm, name=name) - # import module snippets from ansible.module_utils.basic import * diff --git a/library/packaging/gem b/library/packaging/gem index 25fc337e14e..079711fecfe 100644 --- a/library/packaging/gem +++ b/library/packaging/gem @@ -89,7 +89,7 @@ def get_rubygems_path(module): return module.get_bin_path('gem', True) def get_rubygems_version(module): - cmd = [get_rubygems_path(module), '--version'] + cmd = [ get_rubygems_path(module), '--version' ] (rc, out, err) = module.run_command(cmd, check_rc=True) match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out) diff --git a/library/packaging/macports b/library/packaging/macports index b58224b63fe..ae7010b1cbd 100644 --- a/library/packaging/macports +++ b/library/packaging/macports @@ -53,6 +53,7 @@ EXAMPLES = ''' - macports: name=foo state=inactive ''' +import pipes def update_package_db(module, port_path): """ Updates packages list. """ @@ -68,7 +69,7 @@ def query_package(module, port_path, name, state="present"): if state == "present": - rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (port_path, name)) + rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) if rc == 0: return True @@ -76,7 +77,8 @@ def query_package(module, port_path, name, state="present"): elif state == "active": - rc, out, err = module.run_command("%s installed %s | grep -q active" % (port_path, name)) + rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) + if rc == 0: return True diff --git a/library/packaging/opkg b/library/packaging/opkg index 4a834cf1a39..0187abe56a8 100644 --- a/library/packaging/opkg +++ b/library/packaging/opkg @@ -51,6 +51,7 @@ EXAMPLES = ''' - opkg: name=foo,bar state=absent ''' +import pipes def update_package_db(module, opkg_path): """ Updates packages list. """ @@ -66,7 +67,7 @@ def query_package(module, opkg_path, name, state="present"): if state == "present": - rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (opkg_path, name)) + rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True) if rc == 0: return True diff --git a/library/packaging/pacman b/library/packaging/pacman index a4a24ca5fd1..46b7f4c755f 100644 --- a/library/packaging/pacman +++ b/library/packaging/pacman @@ -100,7 +100,7 @@ def query_package(module, name, state="installed"): def update_package_db(module): - cmd = "pacman -Syy > /dev/null" + cmd = "pacman -Syy" rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: @@ -120,7 +120,7 @@ def remove_packages(module, packages): if not query_package(module, package): continue - cmd = "pacman -%s %s --noconfirm > /dev/null" % (args, package) + cmd = "pacman -%s %s --noconfirm" % (args, package) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: @@ -148,7 +148,7 @@ def install_packages(module, packages, package_files): else: params = '-S %s' % package - cmd = "pacman %s --noconfirm > /dev/null" % (params) + cmd = "pacman %s --noconfirm" % (params) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: From 81b4ebbe1d4ff1a9ab15aa3c7da46a55d51d35c1 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 16:57:18 -0400 Subject: [PATCH 322/772] Fixes for run_command shell usage in remainder of packaging modules, save portinstall. --- library/packaging/pkgin | 4 ++-- library/packaging/pkgutil | 7 +++++-- library/packaging/redhat_subscription | 1 - library/packaging/swdepot | 5 +++-- library/packaging/urpmi | 6 +++--- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/library/packaging/pkgin b/library/packaging/pkgin index 0554cf9a216..866c9f76a4c 100755 --- a/library/packaging/pkgin +++ b/library/packaging/pkgin @@ -58,13 +58,13 @@ import json import shlex import os import sys - +import pipes def query_package(module, pkgin_path, name, state="present"): if state == "present": - rc, out, err = module.run_command("%s -y list | grep ^%s" % (pkgin_path, name)) + rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True) if rc == 0: # At least one package with a package name that starts with ``name`` diff --git a/library/packaging/pkgutil b/library/packaging/pkgutil index d6c4f536c5a..e7d1ce7a0d6 100644 --- a/library/packaging/pkgutil +++ b/library/packaging/pkgutil @@ -58,13 +58,14 @@ pkgutil: name=CSWcommon state=present # Install a package from a specific repository pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest' ''' + import os +import pipes def package_installed(module, name): cmd = [module.get_bin_path('pkginfo', True)] cmd.append('-q') cmd.append(name) - #rc, out, err = module.run_command(' '.join(cmd), shell=False) rc, out, err = module.run_command(' '.join(cmd)) if rc == 0: return True @@ -73,12 +74,14 @@ def package_installed(module, name): def package_latest(module, name, site): # Only supports one package + name = pipes.quote(name) + site = pipes.quote(site) cmd = [ 'pkgutil', '--single', '-c' ] if site is not None: cmd += [ '-t', site ] cmd.append(name) cmd += [ '| tail -1 | grep -v SAME' ] - rc, out, err = module.run_command(' '.join(cmd)) + rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True) if rc == 1: return True else: diff --git a/library/packaging/redhat_subscription b/library/packaging/redhat_subscription index bb5d655a52f..0e5ce0856d2 100644 --- a/library/packaging/redhat_subscription +++ b/library/packaging/redhat_subscription @@ -216,7 +216,6 @@ class Rhsm(RegistrationBase): if password: args.extend(['--password', password]) - # Do the needful... rc, stderr, stdout = self.module.run_command(args, check_rc=True) def unsubscribe(self): diff --git a/library/packaging/swdepot b/library/packaging/swdepot index 6fd89088cc0..b41a860531f 100644 --- a/library/packaging/swdepot +++ b/library/packaging/swdepot @@ -19,6 +19,7 @@ # along with this software. If not, see . import re +import pipes DOCUMENTATION = ''' --- @@ -78,9 +79,9 @@ def query_package(module, name, depot=None): cmd_list = '/usr/sbin/swlist -a revision -l product' if depot: - rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, depot, name, name)) + rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) else: - rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, name, name)) + rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) if rc == 0: version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1] else: diff --git a/library/packaging/urpmi b/library/packaging/urpmi index 72dfef02011..be49dfd2648 100644 --- a/library/packaging/urpmi +++ b/library/packaging/urpmi @@ -104,7 +104,7 @@ def query_package_provides(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - cmd = "rpm -q --provides %s >/dev/null" % (name) + cmd = "rpm -q --provides %s" % (name) rc, stdout, stderr = module.run_command(cmd, check_rc=False) return rc == 0 @@ -125,7 +125,7 @@ def remove_packages(module, packages): if not query_package(module, package): continue - cmd = "%s --auto %s > /dev/null" % (URPME_PATH, package) + cmd = "%s --auto %s" % (URPME_PATH, package) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: @@ -158,7 +158,7 @@ def install_packages(module, pkgspec, force=True, no_suggests=True): else: force_yes = '' - cmd = ("%s --auto %s --quiet %s %s > /dev/null" % (URPMI_PATH, force_yes, no_suggests_yes, packages)) + cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages)) rc, out, err = module.run_command(cmd) From 303e085f8b385f8ef42ffb1ee60b1d989dc65711 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 15:59:24 -0500 Subject: [PATCH 323/772] Only use cwd in run_command kwargs if the directory exists --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 1988857610f..f47347482f6 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1069,7 +1069,7 @@ class AnsibleModule(object): if path_prefix: kwargs['env'] = env - if cwd: + if cwd and os.path.isdir(cwd): kwargs['cwd'] = cwd From c193604f602e0cf4c44a7c02003f519fb35ddb15 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 17:04:19 -0400 Subject: [PATCH 324/772] Some more run_command updates. --- library/packaging/portinstall | 6 +++--- library/source_control/bzr | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/library/packaging/portinstall b/library/packaging/portinstall index 4bef8035be3..711a853e34a 100644 --- a/library/packaging/portinstall +++ b/library/packaging/portinstall @@ -71,7 +71,7 @@ def query_package(module, name): if pkg_info_path: pkgng = False pkg_glob_path = module.get_bin_path('pkg_glob', True) - rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, name)) + rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name), use_unsafe_shell=True) else: pkgng = True pkg_info_path = module.get_bin_path('pkg', True) @@ -128,11 +128,11 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, package)) + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(package)), use_unsafe_shell=True) if query_package(module, package): name_without_digits = re.sub('[0-9]', '', package) - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, name_without_digits)) + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(lame_without_digits)),use_unsafe_shell=True) if query_package(module, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) diff --git a/library/source_control/bzr b/library/source_control/bzr index 5217e469900..996150a39af 100644 --- a/library/source_control/bzr +++ b/library/source_control/bzr @@ -76,8 +76,7 @@ class Bzr(object): self.bzr_path = bzr_path def _command(self, args_list, cwd=None, **kwargs): - (rc, out, err) = self.module.run_command( - [self.bzr_path] + args_list, cwd=cwd, **kwargs) + (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) return (rc, out, err) def get_version(self): From 89024f5d9fbb7c7f2cc4a6378bbb5501081bf050 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 12 Mar 2014 17:12:38 -0400 Subject: [PATCH 325/772] Fix typo in portinstall --- library/packaging/portinstall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/portinstall b/library/packaging/portinstall index 711a853e34a..057eee6fa74 100644 --- a/library/packaging/portinstall +++ b/library/packaging/portinstall @@ -132,7 +132,7 @@ def remove_packages(module, packages): if query_package(module, package): name_without_digits = re.sub('[0-9]', '', package) - rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(lame_without_digits)),use_unsafe_shell=True) + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(name_without_digits)),use_unsafe_shell=True) if query_package(module, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) From e7f74251c883bcf73982c3092d92aa8eed32a065 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 12 Mar 2014 17:25:32 -0400 Subject: [PATCH 326/772] Fix missing colons in cpanm --- library/packaging/cpanm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/packaging/cpanm b/library/packaging/cpanm index 1c73d2727c8..9fa003e1af0 100644 --- a/library/packaging/cpanm +++ b/library/packaging/cpanm @@ -75,9 +75,9 @@ def _is_package_installed(module, name, locallib, cpanm): os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib cmd = "%s perl -M%s -e '1'" % (cmd, name) res, stdout, stderr = module.run_command(cmd, check_rc=False) - if res == 0 + if res == 0: return True - else + else: return False def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): From 4e8b97ddeb2fee56cd0a39f7ea1b7eac2c1ba519 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 17:22:59 -0400 Subject: [PATCH 327/772] More shell updates --- library/system/cron | 18 ++++++++++-------- library/system/debconf | 6 ++++-- library/system/hostname | 2 +- library/system/lvg | 8 ++++---- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/library/system/cron b/library/system/cron index 39727b4c769..15c21fb157d 100644 --- a/library/system/cron +++ b/library/system/cron @@ -145,6 +145,7 @@ import os import re import tempfile import platform +import pipes CRONCMD = "/usr/bin/crontab" @@ -190,7 +191,8 @@ class CronTab(object): except: raise CronTabError("Unexpected error:", sys.exc_info()[0]) else: - (rc, out, err) = self.module.run_command(self._read_user_execute()) + # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME + (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True) if rc != 0 and rc != 1: # 1 can mean that there are no jobs. raise CronTabError("Unable to read crontab") @@ -235,8 +237,8 @@ class CronTab(object): # Add the entire crontab back to the user crontab if not self.cron_file: - # os.system(self._write_execute(path)) - (rc, out, err) = self.module.run_command(self._write_execute(path)) + # quoting shell args for now but really this should be two non-shell calls. FIXME + (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True) os.unlink(path) if rc != 0: @@ -350,9 +352,9 @@ class CronTab(object): user = '' if self.user: if platform.system() == 'SunOS': - return "su '%s' -c '%s -l'" % (self.user, CRONCMD) + return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) else: - user = '-u %s' % self.user + user = '-u %s' % pipes.quote(self.user) return "%s %s %s" % (CRONCMD , user, '-l') def _write_execute(self, path): @@ -362,10 +364,10 @@ class CronTab(object): user = '' if self.user: if platform.system() == 'SunOS': - return "chown %s %s ; su '%s' -c '%s %s'" % (self.user, path, self.user, CRONCMD, path) + return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) else: - user = '-u %s' % self.user - return "%s %s %s" % (CRONCMD , user, path) + user = '-u %s' % pipes.quote(self.user) + return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) diff --git a/library/system/debconf b/library/system/debconf index 5b47d6b2b18..244561973db 100644 --- a/library/system/debconf +++ b/library/system/debconf @@ -84,6 +84,8 @@ debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license- debconf: name='tzdata' ''' +import pipes + def get_selections(module, pkg): cmd = [module.get_bin_path('debconf-show', True), pkg] rc, out, err = module.run_command(' '.join(cmd)) @@ -105,11 +107,11 @@ def set_selection(module, pkg, question, vtype, value, unseen): data = ' '.join([ question, vtype, value ]) setsel = module.get_bin_path('debconf-set-selections', True) - cmd = ["echo '%s %s' |" % (pkg, data), setsel] + cmd = ["echo '%s %s' |" % (pipes.quote(pkg), pipes.quote(data)), setsel] if unseen: cmd.append('-u') - return module.run_command(' '.join(cmd)) + return module.run_command(' '.join(cmd), use_unsafe_shell=True) def main(): diff --git a/library/system/hostname b/library/system/hostname index 781bdcd08aa..cca2364b611 100644 --- a/library/system/hostname +++ b/library/system/hostname @@ -286,7 +286,7 @@ class FedoraStrategy(GenericStrategy): def get_permanent_hostname(self): cmd = 'hostnamectl status | awk \'/^ *Static hostname:/{printf("%s", $3)}\'' - rc, out, err = self.module.run_command(cmd) + rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err)) diff --git a/library/system/lvg b/library/system/lvg index 4e24b25a5c9..bc4709e3b12 100644 --- a/library/system/lvg +++ b/library/system/lvg @@ -162,13 +162,13 @@ def main(): ### create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in dev_list: - rc,_,err = module.run_command("%s %s"%(pvcreate_cmd,current_dev)) + rc,_,err = module.run_command("%s %s" % (pvcreate_cmd,current_dev)) if rc == 0: changed = True else: - module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err) + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) vgcreate_cmd = module.get_bin_path('vgcreate') - rc,_,err = module.run_command("%s -s %s %s %s"%(vgcreate_cmd, pesize, vg, dev_string)) + rc,_,err = module.run_command("%s -s %s %s %s" % (vgcreate_cmd, pesize, vg, dev_string)) if rc == 0: changed = True else: @@ -210,7 +210,7 @@ def main(): module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err) ### add PV to our VG vgextend_cmd = module.get_bin_path('vgextend', True) - rc,_,err = module.run_command("%s %s %s"%(vgextend_cmd, vg, devs_to_add_string)) + rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) if rc == 0: changed = True else: From 6067d826e4c129a3f6fcfcf57ac115d6c997048d Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 12 Mar 2014 17:36:40 -0400 Subject: [PATCH 328/772] Close parens in portinstall --- library/packaging/portinstall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/portinstall b/library/packaging/portinstall index 057eee6fa74..88e654b8db4 100644 --- a/library/packaging/portinstall +++ b/library/packaging/portinstall @@ -71,7 +71,7 @@ def query_package(module, name): if pkg_info_path: pkgng = False pkg_glob_path = module.get_bin_path('pkg_glob', True) - rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name), use_unsafe_shell=True) + rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name)), use_unsafe_shell=True) else: pkgng = True pkg_info_path = module.get_bin_path('pkg', True) From dac90278db8837b1ff6c422af7ae18742d6cc250 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 17:38:23 -0400 Subject: [PATCH 329/772] More shell fixes --- library/database/riak | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/database/riak b/library/database/riak index 2ab51046af4..e1aedf25fb4 100644 --- a/library/database/riak +++ b/library/database/riak @@ -105,7 +105,7 @@ except ImportError: def ring_check(module, riak_admin_bin): - cmd = '%s ringready 2> /dev/null' % riak_admin_bin + cmd = '%s ringready' % riak_admin_bin rc, out, err = module.run_command(cmd) if rc == 0 and 'TRUE All nodes agree on the ring' in out: return True @@ -221,7 +221,7 @@ def main(): if wait_for_handoffs: timeout = time.time() + wait_for_handoffs while True: - cmd = '%s transfers 2> /dev/null' % riak_admin_bin + cmd = '%s transfers' % riak_admin_bin rc, out, err = module.run_command(cmd) if 'No transfers active' in out: result['handoffs'] = 'No transfers active.' From 98500b335b4af2ec17d470212ab9293a67e8b94c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 17:43:18 -0400 Subject: [PATCH 330/772] Fixup shell handling in monit module. --- library/monitoring/monit | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/library/monitoring/monit b/library/monitoring/monit index 32e3e058121..0705b714315 100644 --- a/library/monitoring/monit +++ b/library/monitoring/monit @@ -47,6 +47,7 @@ EXAMPLES = ''' - monit: name=httpd state=started ''' +import pipes def main(): arg_spec = dict( @@ -67,7 +68,7 @@ def main(): rc, out, err = module.run_command('%s reload' % MONIT) module.exit_json(changed=True, name=name, state=state) - rc, out, err = module.run_command('%s summary | grep "Process \'%s\'"' % (MONIT, name)) + rc, out, err = module.run_command('%s summary | grep "Process \'%s\'"' % (MONIT, pipes.quote(name)), use_unsafe_shell=True) present = name in out if not present and not state == 'present': @@ -78,7 +79,7 @@ def main(): if module.check_mode: module.exit_json(changed=True) module.run_command('%s reload' % MONIT, check_rc=True) - rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name)) + rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True) if name in out: module.exit_json(changed=True, name=name, state=state) else: @@ -86,7 +87,7 @@ def main(): module.exit_json(changed=False, name=name, state=state) - rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name)) + rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True) running = 'running' in out.lower() if running and (state == 'started' or state == 'monitored'): @@ -99,7 +100,7 @@ def main(): if module.check_mode: module.exit_json(changed=True) module.run_command('%s stop %s' % (MONIT, name)) - rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name)) + rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True) if 'not monitored' in out.lower() or 'stop pending' in out.lower(): module.exit_json(changed=True, name=name, state=state) module.fail_json(msg=out) @@ -108,7 +109,8 @@ def main(): if module.check_mode: module.exit_json(changed=True) module.run_command('%s unmonitor %s' % (MONIT, name)) - rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name)) + # FIXME: DRY FOLKS! + rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True) if 'not monitored' in out.lower(): module.exit_json(changed=True, name=name, state=state) module.fail_json(msg=out) From 78ebf5d266718f7ab97c12fbecfb62140254308c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 12 Mar 2014 17:44:53 -0400 Subject: [PATCH 331/772] Still more shell fixes. --- library/packaging/apt_key | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt_key b/library/packaging/apt_key index 48442349ae1..e071833c3be 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -131,7 +131,7 @@ def all_keys(module, keyring): return results def key_present(module, key_id): - (rc, out, err) = module.run_command("apt-key list | 2>&1 grep -i -q %s" % key_id) + (rc, out, err) = module.run_command("apt-key list | 2>&1 grep -i -q %s" % pipes.quote(key_id), use_unsafe_shell=True) return rc == 0 def download_key(module, url): From 9cc4be2683d9a8e5c68a671d8e85793861be716c Mon Sep 17 00:00:00 2001 From: James Laska Date: Wed, 12 Mar 2014 19:28:57 -0400 Subject: [PATCH 332/772] Correct test integration ec2 cleanup script --- test/integration/cleanup_ec2.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/integration/cleanup_ec2.py b/test/integration/cleanup_ec2.py index 08d54751aaf..0e974c2089e 100644 --- a/test/integration/cleanup_ec2.py +++ b/test/integration/cleanup_ec2.py @@ -15,12 +15,14 @@ def delete_aws_resources(get_func, attr, opts): for item in get_func(): val = getattr(item, attr) if re.search(opts.match_re, val): - prompt_and_delete("Delete object with %s=%s? [y/n]: " % (attr, val), opts.assumeyes) + prompt_and_delete(item, "Delete object with %s=%s? [y/n]: " % (attr, val), opts.assumeyes) -def prompt_and_delete(prompt, assumeyes): - while not assumeyes: - assumeyes = raw_input(prompt) - obj.delete() +def prompt_and_delete(item, prompt, assumeyes): + if not assumeyes: + assumeyes = raw_input(prompt).lower() == 'y' + assert hasattr(item, 'delete'), "Class <%s> has no delete attribute" % item.__class__ + if assumeyes: + item.delete() def parse_args(): # Load details from credentials.yml From 81f2e43b76e108b57e6916d902d3a19799cfe1c6 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 12 Mar 2014 19:52:16 -0400 Subject: [PATCH 333/772] Fix Homebrew._current_package_is_installed --- library/packaging/homebrew | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index 33b2ab62497..a74091542e2 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -360,14 +360,15 @@ class Homebrew(object): self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) - rc, out, err = self.module.run_command( - "{brew_path} list -m1 | grep -q '^{package}$'".format( - brew_path=self.brew_path, - package=self.current_package, - ) - ) + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "list", + "-m1", + ] + rc, out, err = self.module.run_command(cmd) + packages = [package for package in out.split('\n') if package] - if rc == 0: + if rc == 0 and self.current_package in packages: return True else: return False From 3d032b9db96b24779b5895f3b13a85584fa9db03 Mon Sep 17 00:00:00 2001 From: James Martin Date: Wed, 12 Mar 2014 21:26:16 -0400 Subject: [PATCH 334/772] Fixes various typos introduced in a9017af2bb648930e27bab52de12f9983411778c. Fixes odd encoding problem with wait_for_service. --- library/database/riak | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/library/database/riak b/library/database/riak index e1aedf25fb4..7ba4df3ea3a 100644 --- a/library/database/riak +++ b/library/database/riak @@ -124,9 +124,8 @@ def main(): wait_for_handoffs=dict(default=False, type='int'), wait_for_ring=dict(default=False, type='int'), wait_for_service=dict( - required=False, default=None, choices=['kv']) - ), - validate_certs = dict(default='yes', type='bool'), + required=False, default=None, choices=['kv']), + validate_certs = dict(default='yes', type='bool')) ) @@ -137,6 +136,7 @@ def main(): wait_for_handoffs = module.params.get('wait_for_handoffs') wait_for_ring = module.params.get('wait_for_ring') wait_for_service = module.params.get('wait_for_service') + validate_certs = module.params.get('validate_certs') #make sure riak commands are on the path @@ -231,7 +231,7 @@ def main(): module.fail_json(msg='Timeout waiting for handoffs.') if wait_for_service: - cmd = '%s wait_for_service riak_%s %s' % ( riak_admin_bin, wait_for_service, node_name) + cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ] rc, out, err = module.run_command(cmd) result['service'] = out @@ -250,5 +250,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() From ccc0a8cbb069b002e9468ef302f4481ca075cf80 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Mar 2014 20:45:51 -0500 Subject: [PATCH 335/772] Fix regression in dnsmadeeasy module caused by fetch_url change Also: * changed this module to use https for requests to the API * fixed a bug in which a traceback was caused when the domain wasn't found Fixes #6457 --- library/net_infrastructure/dnsmadeeasy | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/net_infrastructure/dnsmadeeasy b/library/net_infrastructure/dnsmadeeasy index 6b4fe1dcdb5..148e25a5011 100644 --- a/library/net_infrastructure/dnsmadeeasy +++ b/library/net_infrastructure/dnsmadeeasy @@ -129,7 +129,7 @@ class DME2: self.api = apikey self.secret = secret - self.baseurl = 'http://api.dnsmadeeasy.com/V2.0/' + self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' self.domain = str(domain) self.domain_map = None # ["domain_name"] => ID self.record_map = None # ["record_name"] => ID @@ -161,14 +161,14 @@ class DME2: if data and not isinstance(data, basestring): data = urllib.urlencode(data) - response, info = fetch_url(self.module, url, data=data, method=method) + response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) if info['status'] not in (200, 201, 204): self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) try: return json.load(response) except Exception, e: - return False + return {} def getDomain(self, domain_id): if not self.domain_map: @@ -268,7 +268,7 @@ def main(): domain_records = DME.getRecords() if not domain_records: module.fail_json( - msg="The %s domain name is not accessible with this api_key; try using its ID if known." % domain) + msg="The requested domain name is not accessible with this api_key; try using its ID if known.") module.exit_json(changed=False, result=domain_records) # Fetch existing record + Build new one From 427221786c81f5e6c4d4d7e6019f3e5cc80bffb6 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 12 Mar 2014 22:15:56 -0500 Subject: [PATCH 336/772] Shell updates. --- library/database/mysql_db | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/library/database/mysql_db b/library/database/mysql_db index 622bf59a39f..2fc32914082 100644 --- a/library/database/mysql_db +++ b/library/database/mysql_db @@ -101,6 +101,7 @@ EXAMPLES = ''' import ConfigParser import os +import pipes try: import MySQLdb except ImportError: @@ -123,36 +124,36 @@ def db_delete(cursor, db): def db_dump(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysqldump', True) - cmd += " --quick --user=%s --password='%s'" %(user, password) + cmd += " --quick --user=%s --password='%s'" % (pipes.quote(user), pipes.quote(password)) if socket is not None: - cmd += " --socket=%s" % socket + cmd += " --socket=%s" % pipes.quote(socket) else: - cmd += " --host=%s --port=%s" % (host, port) - cmd += " %s" % db_name + cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes(port)) + cmd += " %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': - cmd = cmd + ' | gzip > ' + target + cmd = cmd + ' | gzip > ' + pipes.quote(target) elif os.path.splitext(target)[-1] == '.bz2': - cmd = cmd + ' | bzip2 > ' + target + cmd = cmd + ' | bzip2 > ' + pipes.quote(target) else: - cmd += " > %s" % target - rc, stdout, stderr = module.run_command(cmd) + cmd += " > %s" % pipes.quote(target) + rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr def db_import(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysql', True) - cmd += " --user=%s --password='%s'" %(user, password) + cmd += " --user=%s --password='%s'" % (pipes.quote(user), pipes.quote(password)) if socket is not None: - cmd += " --socket=%s" % socket + cmd += " --socket=%s" % pipes.quote(socket) else: - cmd += " --host=%s --port=%s" % (host, port) - cmd += " -D %s" % db_name + cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) + cmd += " -D %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': - cmd = 'gunzip < ' + target + ' | ' + cmd + cmd = 'gunzip < ' + pipes.quote(target) + ' | ' + cmd elif os.path.splitext(target)[-1] == '.bz2': - cmd = 'bunzip2 < ' + target + ' | ' + cmd + cmd = 'bunzip2 < ' + pipes.quote(target) + ' | ' + cmd else: - cmd += " < %s" % target - rc, stdout, stderr = module.run_command(cmd) + cmd += " < %s" % pipes.quote(target) + rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr def db_create(cursor, db, encoding, collation): From 2b53ece3ab4ce58010d0a5db5c5ae00a0f839db4 Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Wed, 12 Mar 2014 23:25:22 -0500 Subject: [PATCH 337/772] mysql_db module: typo fix Tests clean. --- library/database/mysql_db | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/database/mysql_db b/library/database/mysql_db index 2fc32914082..c9fd5b4e087 100644 --- a/library/database/mysql_db +++ b/library/database/mysql_db @@ -128,7 +128,7 @@ def db_dump(module, host, user, password, db_name, target, port, socket=None): if socket is not None: cmd += " --socket=%s" % pipes.quote(socket) else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes(port)) + cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) cmd += " %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': cmd = cmd + ' | gzip > ' + pipes.quote(target) From d1753046e001c56e960980da79034a79d5a5bf27 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 13 Mar 2014 07:46:08 -0400 Subject: [PATCH 338/772] Revert "Correctly catch template errors without returning invalid data" This reverts commit 3cd7d47b7ec2a8a560c6da803768eb3d9983c252. --- lib/ansible/utils/template.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index e58543c411f..fc4ff9fd204 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -316,8 +316,6 @@ def template_from_string(basedir, data, vars, fail_on_undefined=False): except Exception, e: if 'recursion' in str(e): raise errors.AnsibleError("recursive loop detected in template string: %s" % data) - elif isinstance(e, TemplateSyntaxError): - raise errors.AnsibleError("there was an error in the template: %s" % data) else: return data From 92e8e59cad67eb3e0ead601fc17eafcb9508c206 Mon Sep 17 00:00:00 2001 From: Philippe Pepiot Date: Thu, 13 Mar 2014 15:28:37 +0100 Subject: [PATCH 339/772] apt_key: Fix traceback when key_id format is invalid Signed-off-by: Philippe Pepiot --- library/packaging/apt_key | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt_key b/library/packaging/apt_key index e071833c3be..57cc48c967c 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -199,7 +199,7 @@ def main(): if key_id.startswith('0x'): key_id = key_id[2:] except ValueError: - module.fail_json("Invalid key_id") + module.fail_json(msg="Invalid key_id", id=key_id) # FIXME: I think we have a common facility for this, if not, want check_missing_binaries(module) From fae36165a27ceb2cb834c69ca4abc4f5f40fd1fa Mon Sep 17 00:00:00 2001 From: patrickheeney Date: Thu, 13 Mar 2014 07:58:12 -0700 Subject: [PATCH 340/772] Fix documentation for accept_hostkey --- library/source_control/git | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/library/source_control/git b/library/source_control/git index 65c799a5df4..f4ee4d522b7 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -45,12 +45,13 @@ options: branch name, or a tag name. accept_hostkey: required: false - default: false + default: "no" + choices: [ "yes", "no" ] version_added: "1.5" description: - - Add the hostkey for the repo url if not already added. - If ssh_args contains "-o StrictHostKeyChecking=no", this - parameter is ignored. + - if C(yes), adds the hostkey for the repo url if not already + added. If ssh_args contains "-o StrictHostKeyChecking=no", + this parameter is ignored. ssh_opts: required: false default: None From 07dd02c25a024e00d27f3152a4bb3c401cb9e3e4 Mon Sep 17 00:00:00 2001 From: James Laska Date: Thu, 13 Mar 2014 09:52:36 -0400 Subject: [PATCH 341/772] [test_ec2*] cloud integration test updates To support parallel cloud test execution, create and provide a random string to cloud integration tests. The variable 'resource_prefix' can be used in cloud roles and during resource cleanup to safely create/destroy cloud-based resources. Additional changes include: * The roles test_ec2_key and test_ec2_group were updated to use to {{resource_prefix}}. * Additionally, the Makefile was updated to set resource_prefix to a random string. The Makefile will also use 'resource_prefix' during cloud_cleanup. * All test_ec2* roles were updated to add 'setup_ec2' as a role dependency. --- test/integration/Makefile | 17 +++++++++++------ test/integration/cleanup_ec2.py | 14 +++++++++----- .../roles/setup_ec2/defaults/main.yml | 2 ++ test/integration/roles/test_ec2/meta/main.yml | 4 ++-- .../roles/test_ec2_ami/meta/main.yml | 4 ++-- .../roles/test_ec2_eip/meta/main.yml | 4 ++-- .../roles/test_ec2_elb/meta/main.yml | 4 ++-- .../roles/test_ec2_elb_lb/meta/main.yml | 4 ++-- .../roles/test_ec2_facts/meta/main.yml | 4 ++-- .../roles/test_ec2_group/defaults/main.yml | 2 +- .../roles/test_ec2_key/defaults/main.yml | 2 +- .../roles/test_ec2_tag/meta/main.yml | 4 ++-- .../roles/test_ec2_vol/meta/main.yml | 4 ++-- .../roles/test_ec2_vpc/meta/main.yml | 4 ++-- 14 files changed, 42 insertions(+), 31 deletions(-) create mode 100644 test/integration/roles/setup_ec2/defaults/main.yml diff --git a/test/integration/Makefile b/test/integration/Makefile index 7a4072d44ae..94d97b46a40 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -1,6 +1,11 @@ INVENTORY ?= inventory VARS_FILE ?= integration_config.yml +# Create a semi-random string for use when testing cloud-based resources +ifndef CLOUD_RESOURCE_PREFIX +CLOUD_RESOURCE_PREFIX := $(shell python -c "import string,random; print 'ansible-testing-' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(8));") +endif + all: non_destructive destructive check_mode test_hash non_destructive: @@ -21,24 +26,24 @@ cloud: amazon rackspace cloud_cleanup: amazon_cleanup rackspace_cleanup amazon_cleanup: - python cleanup_ec2.py -y + python cleanup_ec2.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" rackspace_cleanup: @echo "FIXME - cleanup_rax.py not yet implemented" - @#python cleanup_rax.py -y + @# python cleanup_rax.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" credentials.yml: @echo "No credentials.yml file found. A file named 'credentials.yml' is needed to provide credentials needed to run cloud tests. See sample 'credentials.template' file." @exit 1 amazon: credentials.yml - ansible-playbook amazon.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -v $(TEST_FLAGS) ; \ + ansible-playbook amazon.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ RC=$$? ; \ - make amazon_cleanup ; \ + CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make amazon_cleanup ; \ exit $$RC; rackspace: credentials.yml - ansible-playbook rackspace.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -v $(TEST_FLAGS) ; \ + ansible-playbook rackspace.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ RC=$$? ; \ - make rackspace_cleanup ; \ + CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make rackspace_cleanup ; \ exit $$RC; diff --git a/test/integration/cleanup_ec2.py b/test/integration/cleanup_ec2.py index 0e974c2089e..d82dc4f340b 100644 --- a/test/integration/cleanup_ec2.py +++ b/test/integration/cleanup_ec2.py @@ -15,7 +15,7 @@ def delete_aws_resources(get_func, attr, opts): for item in get_func(): val = getattr(item, attr) if re.search(opts.match_re, val): - prompt_and_delete(item, "Delete object with %s=%s? [y/n]: " % (attr, val), opts.assumeyes) + prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes) def prompt_and_delete(item, prompt, assumeyes): if not assumeyes: @@ -23,6 +23,7 @@ def prompt_and_delete(item, prompt, assumeyes): assert hasattr(item, 'delete'), "Class <%s> has no delete attribute" % item.__class__ if assumeyes: item.delete() + print ("Deleted %s" % item) def parse_args(): # Load details from credentials.yml @@ -74,8 +75,11 @@ if __name__ == '__main__': aws = boto.connect_ec2(aws_access_key_id=opts.ec2_access_key, aws_secret_access_key=opts.ec2_secret_key) - # Delete matching keys - delete_aws_resources(aws.get_all_key_pairs, 'name', opts) + try: + # Delete matching keys + delete_aws_resources(aws.get_all_key_pairs, 'name', opts) - # Delete matching groups - delete_aws_resources(aws.get_all_security_groups, 'name', opts) + # Delete matching groups + delete_aws_resources(aws.get_all_security_groups, 'name', opts) + except KeyboardInterrupt, e: + print "\nExiting on user command." diff --git a/test/integration/roles/setup_ec2/defaults/main.yml b/test/integration/roles/setup_ec2/defaults/main.yml new file mode 100644 index 00000000000..fb1f88b1ecb --- /dev/null +++ b/test/integration/roles/setup_ec2/defaults/main.yml @@ -0,0 +1,2 @@ +--- +resource_prefix: 'ansible-testing-' diff --git a/test/integration/roles/test_ec2/meta/main.yml b/test/integration/roles/test_ec2/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2/meta/main.yml +++ b/test/integration/roles/test_ec2/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_ami/meta/main.yml b/test/integration/roles/test_ec2_ami/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_ami/meta/main.yml +++ b/test/integration/roles/test_ec2_ami/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_eip/meta/main.yml b/test/integration/roles/test_ec2_eip/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_eip/meta/main.yml +++ b/test/integration/roles/test_ec2_eip/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_elb/meta/main.yml b/test/integration/roles/test_ec2_elb/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_elb/meta/main.yml +++ b/test/integration/roles/test_ec2_elb/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_elb_lb/meta/main.yml b/test/integration/roles/test_ec2_elb_lb/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_elb_lb/meta/main.yml +++ b/test/integration/roles/test_ec2_elb_lb/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_facts/meta/main.yml b/test/integration/roles/test_ec2_facts/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_facts/meta/main.yml +++ b/test/integration/roles/test_ec2_facts/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_group/defaults/main.yml b/test/integration/roles/test_ec2_group/defaults/main.yml index e10da44d847..4063791af4b 100644 --- a/test/integration/roles/test_ec2_group/defaults/main.yml +++ b/test/integration/roles/test_ec2_group/defaults/main.yml @@ -1,5 +1,5 @@ --- # defaults file for test_ec2_group -ec2_group_name: 'ansible-testing-{{ random_string }}' +ec2_group_name: '{{resource_prefix}}' ec2_group_description: 'Created by ansible integration tests' diff --git a/test/integration/roles/test_ec2_key/defaults/main.yml b/test/integration/roles/test_ec2_key/defaults/main.yml index 2242ea07093..df0082d999b 100644 --- a/test/integration/roles/test_ec2_key/defaults/main.yml +++ b/test/integration/roles/test_ec2_key/defaults/main.yml @@ -1,3 +1,3 @@ --- # defaults file for test_ec2_key -ec2_key_name: 'ansible-testing-{{ random_string }}' +ec2_key_name: '{{resource_prefix}}' diff --git a/test/integration/roles/test_ec2_tag/meta/main.yml b/test/integration/roles/test_ec2_tag/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_tag/meta/main.yml +++ b/test/integration/roles/test_ec2_tag/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_vol/meta/main.yml b/test/integration/roles/test_ec2_vol/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_vol/meta/main.yml +++ b/test/integration/roles/test_ec2_vol/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 diff --git a/test/integration/roles/test_ec2_vpc/meta/main.yml b/test/integration/roles/test_ec2_vpc/meta/main.yml index 1050c23ce30..1f64f1169a9 100644 --- a/test/integration/roles/test_ec2_vpc/meta/main.yml +++ b/test/integration/roles/test_ec2_vpc/meta/main.yml @@ -1,3 +1,3 @@ -dependencies: +dependencies: - prepare_tests - + - setup_ec2 From b0d22b76bdb4f695a3d1c3e8306523df1c538b67 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 13 Mar 2014 12:01:55 -0400 Subject: [PATCH 342/772] Add an ast.parse unit test for modules to simulate ansible-doc --- test/units/TestModules.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 test/units/TestModules.py diff --git a/test/units/TestModules.py b/test/units/TestModules.py new file mode 100644 index 00000000000..54e3ec3213f --- /dev/null +++ b/test/units/TestModules.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import os +import ast +import unittest +from ansible import utils + + +class TestModules(unittest.TestCase): + + def list_all_modules(self): + paths = utils.plugins.module_finder._get_paths() + paths = [x for x in paths if os.path.isdir(x)] + module_list = [] + for path in paths: + for (dirpath, dirnames, filenames) in os.walk(path): + for filename in filenames: + module_list.append(os.path.join(dirpath, filename)) + return module_list + + def test_ast_parse(self): + module_list = self.list_all_modules() + ERRORS = [] + # attempt to parse each module with ast + for m in module_list: + try: + ast.parse(''.join(open(m))) + except Exception, e: + ERRORS.append((m, e)) + assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS From b93b99871b3aa7caaa8738fc80c16cffbd77009f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 12:58:20 -0500 Subject: [PATCH 343/772] Fix small typo resulting in a traceback for the apt_key module --- library/packaging/apt_key | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt_key b/library/packaging/apt_key index e071833c3be..d5473558801 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -112,7 +112,7 @@ REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key'] def check_missing_binaries(module): missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)] if len(missing): - module.fail_json(msg="binaries are missing", names=all) + module.fail_json(msg="binaries are missing", names=missing) def all_keys(module, keyring): if keyring: From 4273cb2d8ead8f95fa0032da4082ffd7da338472 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 13:51:10 -0500 Subject: [PATCH 344/772] Rejoin args list into a string for run_command when using an unsafe shell This allows the use of an args list with leading environment variables, which otherwise would fail due to the way Popen works. --- lib/ansible/module_utils/basic.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index f47347482f6..00dd8011c7e 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1009,7 +1009,9 @@ class AnsibleModule(object): shell = False if isinstance(args, list): - pass + if use_unsafe_shell: + args = " ".join([pipes.quote(x) for x in args]) + shell = True elif isinstance(args, basestring) and use_unsafe_shell: shell = True elif isinstance(args, basestring): From b1a37dcc086489bea23df64d2f47dc90cf69e620 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 14:04:51 -0500 Subject: [PATCH 345/772] Revert "Rejoin args list into a string for run_command when using an unsafe shell" This reverts commit 4273cb2d8ead8f95fa0032da4082ffd7da338472. --- lib/ansible/module_utils/basic.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 00dd8011c7e..f47347482f6 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1009,9 +1009,7 @@ class AnsibleModule(object): shell = False if isinstance(args, list): - if use_unsafe_shell: - args = " ".join([pipes.quote(x) for x in args]) - shell = True + pass elif isinstance(args, basestring) and use_unsafe_shell: shell = True elif isinstance(args, basestring): From 5233d4bc31f4759c59d2d19c580148f35c6e0a4b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 14:28:51 -0500 Subject: [PATCH 346/772] Fix typo in run_command when sending data that is not binary --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index f47347482f6..069bee6224f 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1082,7 +1082,7 @@ class AnsibleModule(object): if data: if not binary_data: - data += '\\n' + data += '\n' out, err = cmd.communicate(input=data) rc = cmd.returncode except (OSError, IOError), e: From 677008bef7f179ab411dd6e17c44522a4d898195 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 13:51:10 -0500 Subject: [PATCH 347/772] Rejoin args list into a string for run_command when using an unsafe shell This allows the use of an args list with leading environment variables, which otherwise would fail due to the way Popen works. --- lib/ansible/module_utils/basic.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 069bee6224f..7da09908eb7 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1009,7 +1009,9 @@ class AnsibleModule(object): shell = False if isinstance(args, list): - pass + if use_unsafe_shell: + args = " ".join([pipes.quote(x) for x in args]) + shell = True elif isinstance(args, basestring) and use_unsafe_shell: shell = True elif isinstance(args, basestring): From 69ff35572686172faeb064cf8f2809bba5c899d4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 14:56:44 -0500 Subject: [PATCH 348/772] Adding new unit tests for module_utils/basic (run_command only now) --- test/units/TestModuleUtilsBasic.py | 156 +++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 test/units/TestModuleUtilsBasic.py diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py new file mode 100644 index 00000000000..167ee370fd0 --- /dev/null +++ b/test/units/TestModuleUtilsBasic.py @@ -0,0 +1,156 @@ +import os +import tempfile + +import unittest +from nose.tools import raises + +from ansible import errors +from ansible.module_common import ModuleReplacer +from ansible.utils import md5 as utils_md5 + +TEST_MODULE_DATA = """ +from ansible.module_utils.basic import * + +def get_module(): + return AnsibleModule( + argument_spec = dict(), + supports_check_mode = True, + no_log = True, + ) + +get_module() + +""" + +class TestModuleUtilsBasic(unittest.TestCase): + + def cleanup_temp_file(self, fd, path): + try: + os.close(fd) + os.remove(path) + except: + pass + + def cleanup_temp_dir(self, path): + try: + os.rmdir(path) + except: + pass + + def setUp(self): + # create a temporary file for the test module + # we're about to generate + self.tmp_fd, self.tmp_path = tempfile.mkstemp() + os.write(self.tmp_fd, TEST_MODULE_DATA) + + # template the module code and eval it + module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {}) + + d = {} + exec(module_data, d, d) + self.module = d['get_module']() + + def tearDown(self): + self.cleanup_temp_file(self.tmp_fd, self.tmp_path) + + ################################################################################# + # run_command() tests + + # test run_command with a string command + def test_run_command_string(self): + (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'") + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + + # test run_command with an array of args (with both use_unsafe_shell=True|False) + def test_run_command_args(self): + (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"]) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + + # test run_command with leading environment variables + @raises(SystemExit) + def test_run_command_string_with_env_variables(self): + self.module.run_command('FOO=bar /bin/echo -n "foo bar"') + + @raises(SystemExit) + def test_run_command_args_with_env_variables(self): + self.module.run_command(['FOO=bar', '/bin/echo', '-n', 'foo bar']) + + def test_run_command_string_unsafe_with_env_variables(self): + (rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + + # test run_command with a command pipe (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_pipe(self): + (rc, out, err) = self.module.run_command('echo -n "foo bar" | cat', use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + + # test run_command with a shell redirect in (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_redirect_in(self): + (rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar\n') + + # test run_command with a shell redirect out (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_redirect_out(self): + tmp_fd, tmp_path = tempfile.mkstemp() + try: + (rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertTrue(os.path.exists(tmp_path)) + md5sum = utils_md5(tmp_path) + self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') + except: + self.cleanup_temp_file(tmp_fd, tmp_path) + raise + self.cleanup_temp_file(tmp_fd, tmp_path) + + # test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_double_redirect_out(self): + tmp_fd, tmp_path = tempfile.mkstemp() + try: + (rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertTrue(os.path.exists(tmp_path)) + md5sum = utils_md5(tmp_path) + self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') + except: + self.cleanup_temp_file(tmp_fd, tmp_path) + raise + self.cleanup_temp_file(tmp_fd, tmp_path) + + # test run_command with data + def test_run_command_string_with_data(self): + (rc, out, err) = self.module.run_command('cat', data='foo bar') + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar\n') + + # test run_command with binary data + def test_run_command_string_with_binary_data(self): + (rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'ABCD') + + # test run_command with a cwd set + def test_run_command_string_with_cwd(self): + tmp_path = tempfile.mkdtemp() + try: + (rc, out, err) = self.module.run_command('pwd', cwd=tmp_path) + self.assertEqual(rc, 0) + self.assertTrue(os.path.exists(tmp_path)) + self.assertEqual(out.strip(), tmp_path) + except: + self.cleanup_temp_dir(tmp_path) + raise + self.cleanup_temp_dir(tmp_path) + + From a348f672389e6f1883d6afbe90ef76831e200bc0 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 13 Mar 2014 17:15:23 -0400 Subject: [PATCH 349/772] Reset the current directory after running subprocess.Popen --- lib/ansible/module_utils/basic.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 7da09908eb7..d0bfde69179 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1074,12 +1074,17 @@ class AnsibleModule(object): if cwd and os.path.isdir(cwd): kwargs['cwd'] = cwd + # store the pwd + prev_dir = os.getcwd() - try: - # make sure we're in the right working directory - if cwd and os.path.isdir(cwd): + # make sure we're in the right working directory + if cwd and os.path.isdir(cwd): + try: os.chdir(cwd) + except (OSError, IOError), e: + self.fail_json(rc=e.errno, msg="Could not open %s , %s" % (cwd, str(e))) + try: cmd = subprocess.Popen(args, **kwargs) if data: @@ -1094,6 +1099,10 @@ class AnsibleModule(object): if rc != 0 and check_rc: msg = err.rstrip() self.fail_json(cmd=clean_args, rc=rc, stdout=out, stderr=err, msg=msg) + + # reset the pwd + os.chdir(prev_dir) + return (rc, out, err) def append_to_file(self, filename, str): From 32996bf121373bb742ff0e144d45b4ff3415cefd Mon Sep 17 00:00:00 2001 From: Paul Durivage Date: Thu, 13 Mar 2014 16:18:50 -0500 Subject: [PATCH 350/772] Documentation updates --- library/cloud/rax_files | 5 +++++ library/cloud/rax_files_objects | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/library/cloud/rax_files b/library/cloud/rax_files index d2958c2054a..720ea53e191 100644 --- a/library/cloud/rax_files +++ b/library/cloud/rax_files @@ -59,6 +59,11 @@ options: description: - Region to create an instance in default: DFW + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present ttl: description: - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes. diff --git a/library/cloud/rax_files_objects b/library/cloud/rax_files_objects index 0b733487714..1c200b65c88 100644 --- a/library/cloud/rax_files_objects +++ b/library/cloud/rax_files_objects @@ -83,6 +83,11 @@ options: flat directory choices: ["yes", "no"] default: "yes" + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present type: description: - Type of object to do work on From a0cb974575b3828489bd9557a28cf28715cd94bb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 15:06:59 -0500 Subject: [PATCH 351/772] Be sure to return to the old directory if cwd is set in run_command --- lib/ansible/module_utils/basic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index d0bfde69179..42b9d3d669b 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1096,6 +1096,7 @@ class AnsibleModule(object): self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args) except: self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args) + if rc != 0 and check_rc: msg = err.rstrip() self.fail_json(cmd=clean_args, rc=rc, stdout=out, stderr=err, msg=msg) From 8df29e7c8b996c6b4771701338975fe578d19ea2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 15:07:27 -0500 Subject: [PATCH 352/772] Cleaning up some exception blocks in TestModuleUtilsBasic --- test/units/TestModuleUtilsBasic.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index 167ee370fd0..547de0f792a 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -110,9 +110,9 @@ class TestModuleUtilsBasic(unittest.TestCase): md5sum = utils_md5(tmp_path) self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') except: - self.cleanup_temp_file(tmp_fd, tmp_path) raise - self.cleanup_temp_file(tmp_fd, tmp_path) + finally: + self.cleanup_temp_file(tmp_fd, tmp_path) # test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False) def test_run_command_string_unsafe_with_double_redirect_out(self): @@ -124,9 +124,9 @@ class TestModuleUtilsBasic(unittest.TestCase): md5sum = utils_md5(tmp_path) self.assertEqual(md5sum, '5ceaa7ed396ccb8e959c02753cb4bd18') except: - self.cleanup_temp_file(tmp_fd, tmp_path) raise - self.cleanup_temp_file(tmp_fd, tmp_path) + finally: + self.cleanup_temp_file(tmp_fd, tmp_path) # test run_command with data def test_run_command_string_with_data(self): @@ -149,8 +149,8 @@ class TestModuleUtilsBasic(unittest.TestCase): self.assertTrue(os.path.exists(tmp_path)) self.assertEqual(out.strip(), tmp_path) except: - self.cleanup_temp_dir(tmp_path) raise - self.cleanup_temp_dir(tmp_path) + finally: + self.cleanup_temp_dir(tmp_path) From 316d1ca977db072139ac37c76dc7c55d6f7fb2f8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 13 Mar 2014 16:31:19 -0500 Subject: [PATCH 353/772] Updating CHANGELOG/RELEASES in devel for 1.5.3 and older releases --- CHANGELOG.md | 9 +++++++++ RELEASES.txt | 2 ++ packaging/debian/changelog | 12 ++++++++++++ packaging/rpm/ansible.spec | 8 +++++++- 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f8ceb16fe4..578e6b80a54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,15 @@ Other notable changes: * security token additions to ec2 modules * misc bugfixes and other parameters +## 1.5.3 "Love Walks In" - March 13, 2014 + +- Fix validate_certs and run_command errors from previous release +- Fixes to the git module related to host key checking + +## 1.5.2 "Love Walks In" - March 11, 2014 + +- Fix module errors in airbrake and apt from previous release + ## 1.5.1 "Love Walks In" - March 10, 2014 - Force command action to not be executed by the shell unless specifically enabled. diff --git a/RELEASES.txt b/RELEASES.txt index 3537d1e8389..03f71e37efa 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -16,6 +16,8 @@ Previous ======= 1.6 "The Cradle Will Rock" - NEXT +1.5.3 "Love Walks In" -------- 03-13-2014 +1.5.2 "Love Walks In" -------- 03-11-2014 1.5.1 "Love Walks In" -------- 03-10-2014 1.5 "Love Walks In" -------- 02-28-2014 1.4.5 "Could This Be Magic?" - 02-12-2014 diff --git a/packaging/debian/changelog b/packaging/debian/changelog index 00c863f3056..446287cd52b 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -4,6 +4,18 @@ ansible (1.6) unstable; urgency=low -- Michael DeHaan Fri, 28 February 2014 15:00:03 -0500 +ansible (1.5.3) unstable; urgency=low + + * 1.5.3 release + + -- Michael DeHaan Thu, 13 March 2014 08:46:00 -0500 + +ansible (1.5.2) unstable; urgency=low + + * 1.5.2 release + + -- Michael DeHaan Tue, 11 March 2014 08:46:00 -0500 + ansible (1.5.1) unstable; urgency=low * 1.5.1 release diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index ad34053f3a9..298450d9647 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -102,9 +102,15 @@ rm -rf %{buildroot} %changelog -* Thu Mar 11 2014 Michael DeHaan - 1.6-0 +* Thu Mar 14 2014 Michael DeHaan - 1.6-0 * (PENDING) +* Thu Mar 13 2014 Michael DeHaan - 1.5.3 +- Release 1.5.3 + +* Tue Mar 11 2014 Michael DeHaan - 1.5.2 +- Release 1.5.2 + * Mon Mar 10 2014 Michael DeHaan - 1.5.1 - Release 1.5.1 From 47bfa8b9eb8dc5866199c449d9068570ca5bb2ce Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 13 Mar 2014 19:30:01 -0500 Subject: [PATCH 354/772] Update index.rst --- docsite/rst/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index f7e00bfcb68..14f0e326f4b 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu Ansible manages machines in an agentless manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.5.1) and also some development version features (1.6). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.5.3) and also some development version features (1.6). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: From 38d6956c1f2e2b59b0fdf9b9c4748c96e1b0c258 Mon Sep 17 00:00:00 2001 From: Patrick Gerken Date: Fri, 14 Mar 2014 02:13:20 +0100 Subject: [PATCH 355/772] Properly catch import errors in apt When one accidentally tries to run this module as a user, he gets the error message that python-apt must be installed, no matter what. Because importing apt will trigger an exception as a regular user. Explicitly catching the ImportError will let the exception bubble. The exception clearly says Permission denied somewhere, and the user has a better idea, what he must fix. --- library/packaging/apt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt b/library/packaging/apt index 311bfa1199b..5e041c769bd 100644 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -153,7 +153,7 @@ HAS_PYTHON_APT = True try: import apt import apt_pkg -except: +except ImportError: HAS_PYTHON_APT = False def package_split(pkgspec): From b384db53f890b0621db1d8aba35f529cdd823795 Mon Sep 17 00:00:00 2001 From: Sam Hanes Date: Fri, 14 Mar 2014 00:22:32 -0700 Subject: [PATCH 356/772] Add support for YAML lists to apt module. --- library/packaging/apt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/packaging/apt b/library/packaging/apt index 311bfa1199b..58625304461 100644 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -363,7 +363,7 @@ def main(): update_cache = dict(default=False, aliases=['update-cache'], type='bool'), cache_valid_time = dict(type='int'), purge = dict(default=False, type='bool'), - package = dict(default=None, aliases=['pkg', 'name']), + package = dict(default=None, aliases=['pkg', 'name'], type='list'), default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), @@ -444,7 +444,7 @@ def main(): if p['upgrade']: upgrade(module, p['upgrade'], force_yes, dpkg_options) - packages = p['package'].split(',') + packages = p['package'] latest = p['state'] == 'latest' for package in packages: if package.count('=') > 1: From 32fa21c95fbd7324ead570938139a4fd5268977c Mon Sep 17 00:00:00 2001 From: Johannes 'fish' Ziemke Date: Fri, 14 Mar 2014 11:11:56 +0100 Subject: [PATCH 357/772] Rename present to running, add new present state The new present state just makes sure that a container exists, not that it's running, although it get started one creation. This is very useful for data volumes. This also changes the old present, now running (default) state to only create the container if it's not found, otherwise it just get started. See also discussion on mailinglist: https://groups.google.com/forum/#!topic/ansible-devel/jB84gdhPzLQ This closes #6395 --- library/cloud/docker | 50 ++++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/library/cloud/docker b/library/cloud/docker index a1e9a5074c8..46ef30d4fe0 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -148,7 +148,7 @@ options: - Set the state of the container required: false default: present - choices: [ "present", "stopped", "absent", "killed", "restarted" ] + choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ] aliases: [] privileged: description: @@ -632,7 +632,7 @@ def main(): env = dict(), dns = dict(), detach = dict(default=True, type='bool'), - state = dict(default='present', choices=['absent', 'present', 'stopped', 'killed', 'restarted']), + state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), debug = dict(default=False, type='bool'), privileged = dict(default=False, type='bool'), lxc_conf = dict(default=None), @@ -662,25 +662,35 @@ def main(): changed = False # start/stop containers - if state == "present": - - # make sure a container with `name` is running - if name and "/" + name not in map(lambda x: x.get('Name'), running_containers): + if state in [ "running", "present" ]: + + # make sure a container with `name` exists, if not create and start it + if name and "/" + name not in map(lambda x: x.get('Name'), deployed_containers): containers = manager.create_containers(1) - manager.start_containers(containers) - - # start more containers if we don't have enough - elif delta > 0: - containers = manager.create_containers(delta) - manager.start_containers(containers) - - # stop containers if we have too many - elif delta < 0: - containers_to_stop = running_containers[0:abs(delta)] - containers = manager.stop_containers(containers_to_stop) - manager.remove_containers(containers_to_stop) - - facts = manager.get_running_containers() + if state == "present": #otherwise it get (re)started later anyways.. + manager.start_containers(containers) + running_containers = manager.get_running_containers() + deployed_containers = manager.get_deployed_containers() + + if state == "running": + # make sure a container with `name` is running + if name and "/" + name not in map(lambda x: x.get('Name'), running_containers): + manager.start_containers(deployed_containers) + + # start more containers if we don't have enough + elif delta > 0: + containers = manager.create_containers(delta) + manager.start_containers(containers) + + # stop containers if we have too many + elif delta < 0: + containers_to_stop = running_containers[0:abs(delta)] + containers = manager.stop_containers(containers_to_stop) + manager.remove_containers(containers_to_stop) + + facts = manager.get_running_containers() + else: + acts = manager.get_deployed_containers() # stop and remove containers elif state == "absent": From 5cfefb326a52025cb8d365eea3a0c3586605d97c Mon Sep 17 00:00:00 2001 From: James Laska Date: Fri, 14 Mar 2014 10:24:52 -0400 Subject: [PATCH 358/772] Correct check_mode conditional logic The desired behavior is to _not_ add the ppa signing key when check_mode is enabled. This fix corrects the conditional logic to comply with the stated behavior. --- library/packaging/apt_repository | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index 7a19dabc167..8e902d5a448 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -353,9 +353,9 @@ def get_add_ppa_signing_key_callback(module): module.run_command(command, check_rc=True) if module.check_mode: - return _run_command - else: return None + else: + return _run_command def main(): From 9d6518de5a9e76aed8b7ec3b79c4ae693c1caa6c Mon Sep 17 00:00:00 2001 From: James Tanner Date: Fri, 14 Mar 2014 11:16:45 -0400 Subject: [PATCH 359/772] Fixes #6482 Check sysctl file path and catch read exceptions --- library/system/sysctl | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/library/system/sysctl b/library/system/sysctl index 97e5bc5e6c1..1b29fed8f1e 100644 --- a/library/system/sysctl +++ b/library/system/sysctl @@ -235,7 +235,16 @@ class SysctlModule(object): # Get the token value from the sysctl file def read_sysctl_file(self): - lines = open(self.sysctl_file, "r").readlines() + + lines = [] + if os.path.isfile(self.sysctl_file): + try: + f = open(self.sysctl_file, "r") + lines = f.readlines() + f.close() + except IOError, e: + self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, str(e))) + for line in lines: line = line.strip() self.file_lines.append(line) From 5062f4962f8e7939c4656a059fc86e0fb4a3bf55 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 7 Mar 2014 21:51:12 -0600 Subject: [PATCH 360/772] Unit tests for ansible.utils --- test/units/TestModuleUtilsBasic.py | 7 +- test/units/TestUtils.py | 658 +++++++++++++++++-- test/units/TestUtilsStringFunctions.py | 33 + test/units/inventory_test_data/broken.yml | 2 + test/units/inventory_test_data/encrypted.yml | 6 + 5 files changed, 640 insertions(+), 66 deletions(-) create mode 100644 test/units/TestUtilsStringFunctions.py create mode 100644 test/units/inventory_test_data/broken.yml create mode 100644 test/units/inventory_test_data/encrypted.yml diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index 547de0f792a..3d85b613525 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -50,8 +50,13 @@ class TestModuleUtilsBasic(unittest.TestCase): exec(module_data, d, d) self.module = d['get_module']() + # module_utils/basic.py screws with CWD, let's save it and reset + self.cwd = os.getcwd() + def tearDown(self): self.cleanup_temp_file(self.tmp_fd, self.tmp_path) + # Reset CWD back to what it was before basic.py changed it + os.chdir(self.cwd) ################################################################################# # run_command() tests @@ -147,7 +152,7 @@ class TestModuleUtilsBasic(unittest.TestCase): (rc, out, err) = self.module.run_command('pwd', cwd=tmp_path) self.assertEqual(rc, 0) self.assertTrue(os.path.exists(tmp_path)) - self.assertEqual(out.strip(), tmp_path) + self.assertEqual(out.strip(), os.path.realpath(tmp_path)) except: raise finally: diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 28e0dfc0cd2..a56a79e4ef2 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -4,12 +4,21 @@ import unittest import os import os.path import tempfile +import yaml +import passlib.hash +import string +import StringIO +import copy from nose.plugins.skip import SkipTest import ansible.utils +import ansible.errors +import ansible.constants as C import ansible.utils.template as template2 +from ansible import __version__ + import sys reload(sys) sys.setdefaultencoding("utf8") @@ -22,22 +31,22 @@ class TestUtils(unittest.TestCase): input = "before # comment" expected = "before " actual = ansible.utils.before_comment(input) - assert expected == actual + self.assertEqual(expected, actual) input = "before \# not a comment" expected = "before # not a comment" actual = ansible.utils.before_comment(input) - assert expected == actual + self.assertEqual(expected, actual) input = "" expected = "" actual = ansible.utils.before_comment(input) - assert expected == actual + self.assertEqual(expected, actual) input = "#" expected = "" actual = ansible.utils.before_comment(input) - assert expected == actual + self.assertEqual(expected, actual) ##################################### ### check_conditional tests @@ -45,97 +54,616 @@ class TestUtils(unittest.TestCase): def test_check_conditional_jinja2_literals(self): # see http://jinja.pocoo.org/docs/templates/#literals + # none + self.assertEqual(ansible.utils.check_conditional( + None, '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + '', '/', {}), True) + + # list + self.assertEqual(ansible.utils.check_conditional( + ['true'], '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + ['false'], '/', {}), False) + + # non basestring or list + self.assertEqual(ansible.utils.check_conditional( + {}, '/', {}), {}) + # boolean - assert(ansible.utils.check_conditional( - 'true', '/', {}) == True) - assert(ansible.utils.check_conditional( - 'false', '/', {}) == False) - assert(ansible.utils.check_conditional( - 'True', '/', {}) == True) - assert(ansible.utils.check_conditional( - 'False', '/', {}) == False) + self.assertEqual(ansible.utils.check_conditional( + 'true', '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + 'false', '/', {}), False) + self.assertEqual(ansible.utils.check_conditional( + 'True', '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + 'False', '/', {}), False) # integer - assert(ansible.utils.check_conditional( - '1', '/', {}) == True) - assert(ansible.utils.check_conditional( - '0', '/', {}) == False) + self.assertEqual(ansible.utils.check_conditional( + '1', '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + '0', '/', {}), False) # string, beware, a string is truthy unless empty - assert(ansible.utils.check_conditional( - '"yes"', '/', {}) == True) - assert(ansible.utils.check_conditional( - '"no"', '/', {}) == True) - assert(ansible.utils.check_conditional( - '""', '/', {}) == False) + self.assertEqual(ansible.utils.check_conditional( + '"yes"', '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + '"no"', '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + '""', '/', {}), False) def test_check_conditional_jinja2_variable_literals(self): # see http://jinja.pocoo.org/docs/templates/#literals # boolean - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 'True'}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 'true'}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 'False'}) == False) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 'false'}) == False) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 'True'}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 'true'}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 'False'}), False) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 'false'}), False) # integer - assert(ansible.utils.check_conditional( - 'var', '/', {'var': '1'}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 1}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': '0'}) == False) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 0}) == False) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': '1'}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 1}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': '0'}), False) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 0}), False) # string, beware, a string is truthy unless empty - assert(ansible.utils.check_conditional( - 'var', '/', {'var': '"yes"'}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': '"no"'}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': '""'}) == False) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': '"yes"'}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': '"no"'}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': '""'}), False) # Python boolean in Jinja2 expression - assert(ansible.utils.check_conditional( - 'var', '/', {'var': True}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': False}) == False) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': True}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': False}), False) def test_check_conditional_jinja2_expression(self): - assert(ansible.utils.check_conditional( - '1 == 1', '/', {}) == True) - assert(ansible.utils.check_conditional( - 'bar == 42', '/', {'bar': 42}) == True) - assert(ansible.utils.check_conditional( - 'bar != 42', '/', {'bar': 42}) == False) + self.assertEqual(ansible.utils.check_conditional( + '1 == 1', '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + 'bar == 42', '/', {'bar': 42}), True) + self.assertEqual(ansible.utils.check_conditional( + 'bar != 42', '/', {'bar': 42}), False) def test_check_conditional_jinja2_expression_in_variable(self): - assert(ansible.utils.check_conditional( - 'var', '/', {'var': '1 == 1'}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 'bar == 42', 'bar': 42}) == True) - assert(ansible.utils.check_conditional( - 'var', '/', {'var': 'bar != 42', 'bar': 42}) == False) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': '1 == 1'}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 'bar == 42', 'bar': 42}), True) + self.assertEqual(ansible.utils.check_conditional( + 'var', '/', {'var': 'bar != 42', 'bar': 42}), False) def test_check_conditional_jinja2_unicode(self): - assert(ansible.utils.check_conditional( - u'"\u00df"', '/', {}) == True) - assert(ansible.utils.check_conditional( - u'var == "\u00df"', '/', {'var': u'\u00df'}) == True) + self.assertEqual(ansible.utils.check_conditional( + u'"\u00df"', '/', {}), True) + self.assertEqual(ansible.utils.check_conditional( + u'var == "\u00df"', '/', {'var': u'\u00df'}), True) ##################################### ### key-value parsing def test_parse_kv_basic(self): - assert (ansible.utils.parse_kv('a=simple b="with space" c="this=that"') == + self.assertEqual(ansible.utils.parse_kv('a=simple b="with space" c="this=that"'), {'a': 'simple', 'b': 'with space', 'c': 'this=that'}) + + def test_jsonify(self): + self.assertEqual(ansible.utils.jsonify(None), '{}') + self.assertEqual(ansible.utils.jsonify(dict(foo='bar', baz=['qux'])), + '{"baz": ["qux"], "foo": "bar"}') + expected = '''{ + "baz": [ + "qux" + ], + "foo": "bar" +}''' + self.assertEqual(ansible.utils.jsonify(dict(foo='bar', baz=['qux']), format=True), expected) + + def test_is_failed(self): + self.assertEqual(ansible.utils.is_failed(dict(rc=0)), False) + self.assertEqual(ansible.utils.is_failed(dict(rc=1)), True) + self.assertEqual(ansible.utils.is_failed(dict()), False) + self.assertEqual(ansible.utils.is_failed(dict(failed=False)), False) + self.assertEqual(ansible.utils.is_failed(dict(failed=True)), True) + self.assertEqual(ansible.utils.is_failed(dict(failed='True')), True) + self.assertEqual(ansible.utils.is_failed(dict(failed='true')), True) + + def test_is_changed(self): + self.assertEqual(ansible.utils.is_changed(dict()), False) + self.assertEqual(ansible.utils.is_changed(dict(changed=False)), False) + self.assertEqual(ansible.utils.is_changed(dict(changed=True)), True) + self.assertEqual(ansible.utils.is_changed(dict(changed='True')), True) + self.assertEqual(ansible.utils.is_changed(dict(changed='true')), True) + + def test_path_dwim(self): + self.assertEqual(ansible.utils.path_dwim(None, __file__), + __file__) + self.assertEqual(ansible.utils.path_dwim(None, '~'), + os.path.expanduser('~')) + self.assertEqual(ansible.utils.path_dwim(None, 'TestUtils.py'), + __file__.rstrip('c')) + + def test_path_dwim_relative(self): + self.assertEqual(ansible.utils.path_dwim_relative(__file__, 'units', 'TestUtils.py', + os.path.dirname(os.path.dirname(__file__))), + __file__.rstrip('c')) + + def test_json_loads(self): + self.assertEqual(ansible.utils.json_loads('{"foo": "bar"}'), dict(foo='bar')) + + def test_parse_json(self): + # leading junk + self.assertEqual(ansible.utils.parse_json('ansible\n{"foo": "bar"}'), dict(foo="bar")) + + # "baby" json + self.assertEqual(ansible.utils.parse_json('foo=bar baz=qux'), dict(foo='bar', baz='qux')) + + # No closing quotation + try: + ansible.utils.parse_json('foo=bar "') + except ValueError: + pass + else: + raise AssertionError('Incorrect exception, expected ValueError') + + # Failed to parse + try: + ansible.utils.parse_json('{') + except ansible.errors.AnsibleError: + pass + else: + raise AssertionError('Incorrect exception, expected ansible.errors.AnsibleError') + + # boolean changed/failed + self.assertEqual(ansible.utils.parse_json('changed=true'), dict(changed=True)) + self.assertEqual(ansible.utils.parse_json('changed=false'), dict(changed=False)) + self.assertEqual(ansible.utils.parse_json('failed=true'), dict(failed=True)) + self.assertEqual(ansible.utils.parse_json('failed=false'), dict(failed=False)) + + # rc + self.assertEqual(ansible.utils.parse_json('rc=0'), dict(rc=0)) + + # Just a string + self.assertEqual(ansible.utils.parse_json('foo'), dict(failed=True, parsed=False, msg='foo')) + + def test_smush_braces(self): + self.assertEqual(ansible.utils.smush_braces('{{ foo}}'), '{{foo}}') + self.assertEqual(ansible.utils.smush_braces('{{foo }}'), '{{foo}}') + self.assertEqual(ansible.utils.smush_braces('{{ foo }}'), '{{foo}}') + + def test_smush_ds(self): + # list + self.assertEqual(ansible.utils.smush_ds(['foo={{ foo }}']), ['foo={{foo}}']) + + # dict + self.assertEqual(ansible.utils.smush_ds(dict(foo='{{ foo }}')), dict(foo='{{foo}}')) + + # string + self.assertEqual(ansible.utils.smush_ds('foo={{ foo }}'), 'foo={{foo}}') + + # int + self.assertEqual(ansible.utils.smush_ds(0), 0) + + def test_parse_yaml(self): + #json + self.assertEqual(ansible.utils.parse_yaml('{"foo": "bar"}'), dict(foo='bar')) + + # broken json + try: + ansible.utils.parse_yaml('{') + except ansible.errors.AnsibleError: + pass + else: + raise AssertionError + + # broken json with path_hint + try: + ansible.utils.parse_yaml('{', path_hint='foo') + except ansible.errors.AnsibleError: + pass + else: + raise AssertionError + + # yaml with front-matter + self.assertEqual(ansible.utils.parse_yaml("---\nfoo: bar"), dict(foo='bar')) + # yaml no front-matter + self.assertEqual(ansible.utils.parse_yaml('foo: bar'), dict(foo='bar')) + # yaml indented first line (See #6348) + self.assertEqual(ansible.utils.parse_yaml(' - foo: bar\n baz: qux'), [dict(foo='bar', baz='qux')]) + + def test_process_common_errors(self): + # no quote + self.assertTrue('YAML thought it' in ansible.utils.process_common_errors('', 'foo: {{bar}}', 6)) + + # extra colon + self.assertTrue('an extra unquoted colon' in ansible.utils.process_common_errors('', 'foo: bar:', 8)) + + # match + self.assertTrue('same kind of quote' in ansible.utils.process_common_errors('', 'foo: "{{bar}}"baz', 6)) + self.assertTrue('same kind of quote' in ansible.utils.process_common_errors('', "foo: '{{bar}}'baz", 6)) + + # unbalanced + # The first test fails and is commented out for now, logic is wrong and the test fails + #self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', 'foo: "bad" "wolf"', 6)) + self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', "foo: 'bad' 'wolf'", 6)) + + + def test_process_yaml_error(self): + data = 'foo: bar\n baz: qux' + try: + ansible.utils.parse_yaml(data) + except yaml.YAMLError, exc: + try: + ansible.utils.process_yaml_error(exc, data, __file__) + except ansible.errors.AnsibleYAMLValidationFailed, e: + self.assertTrue('Syntax Error while loading' in e.msg) + else: + raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed') + + data = 'foo: bar\n baz: {{qux}}' + try: + ansible.utils.parse_yaml(data) + except yaml.YAMLError, exc: + try: + ansible.utils.process_yaml_error(exc, data, __file__) + except ansible.errors.AnsibleYAMLValidationFailed, e: + self.assertTrue('Syntax Error while loading' in e.msg) + else: + raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed') + + data = '\xFF' + try: + ansible.utils.parse_yaml(data) + except yaml.YAMLError, exc: + try: + ansible.utils.process_yaml_error(exc, data, __file__) + except ansible.errors.AnsibleYAMLValidationFailed, e: + self.assertTrue('Check over' in e.msg) + else: + raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed') + + data = '\xFF' + try: + ansible.utils.parse_yaml(data) + except yaml.YAMLError, exc: + try: + ansible.utils.process_yaml_error(exc, data, None) + except ansible.errors.AnsibleYAMLValidationFailed, e: + self.assertTrue('Could not parse YAML.' in e.msg) + else: + raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed') + + def test_parse_yaml_from_file(self): + test = os.path.join(os.path.dirname(__file__), 'inventory_test_data', + 'common_vars.yml') + encrypted = os.path.join(os.path.dirname(__file__), 'inventory_test_data', + 'encrypted.yml') + broken = os.path.join(os.path.dirname(__file__), 'inventory_test_data', + 'broken.yml') + + try: + ansible.utils.parse_yaml_from_file(os.path.dirname(__file__)) + except ansible.errors.AnsibleError: + pass + else: + raise AssertionError('Incorrect exception, expected AnsibleError') + + self.assertEqual(ansible.utils.parse_yaml_from_file(test), yaml.safe_load(open(test))) + + self.assertEqual(ansible.utils.parse_yaml_from_file(encrypted, 'ansible'), dict(foo='bar')) + + try: + ansible.utils.parse_yaml_from_file(broken) + except ansible.errors.AnsibleYAMLValidationFailed, e: + self.assertTrue('Syntax Error while loading' in e.msg) + else: + raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed') + + def test_merge_hash(self): + self.assertEqual(ansible.utils.merge_hash(dict(foo='bar', baz='qux'), dict(foo='baz')), + dict(foo='baz', baz='qux')) + self.assertEqual(ansible.utils.merge_hash(dict(foo=dict(bar='baz')), dict(foo=dict(bar='qux'))), + dict(foo=dict(bar='qux'))) + + def test_md5s(self): + self.assertEqual(ansible.utils.md5s('ansible'), '640c8a5376aa12fa15cf02130ce239a6') + # Need a test that causes UnicodeEncodeError See 4221 + + def test_md5(self): + self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cfg')), + 'fb7b5b90ea63f04bde33e804b6fad42c') + self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')), + None) + + def test_default(self): + self.assertEqual(ansible.utils.default(None, lambda: {}), {}) + self.assertEqual(ansible.utils.default(dict(foo='bar'), lambda: {}), dict(foo='bar')) + + def test__gitinfo(self): + # this fails if not run from git clone + # self.assertEqual('last updated' in ansible.utils._gitinfo()) + # missing test for git submodule + # missing test outside of git clone + pass + + def test_version(self): + version = ansible.utils.version('ansible') + self.assertTrue(version.startswith('ansible %s' % __version__)) + # this fails if not run from git clone + # self.assertEqual('last updated' in version) + + def test_getch(self): + # figure out how to test this + pass + + def test_sanitize_output(self): + self.assertEqual(ansible.utils.sanitize_output('password=foo'), 'password=VALUE_HIDDEN') + self.assertEqual(ansible.utils.sanitize_output('foo=user:pass@foo/whatever'), + 'foo=user:********@foo/whatever') + self.assertEqual(ansible.utils.sanitize_output('foo=http://username:pass@wherever/foo'), + 'foo=http://username:********@wherever/foo') + self.assertEqual(ansible.utils.sanitize_output('foo=http://wherever/foo'), + 'foo=http://wherever/foo') + + def test_increment_debug(self): + ansible.utils.VERBOSITY = 0 + ansible.utils.increment_debug(None, None, None, None) + self.assertEqual(ansible.utils.VERBOSITY, 1) + + def test_base_parser(self): + output = ansible.utils.base_parser(output_opts=True) + self.assertTrue(output.has_option('--one-line') and output.has_option('--tree')) + + runas = ansible.utils.base_parser(runas_opts=True) + for opt in ['--sudo', '--sudo-user', '--user', '--su', '--su-user']: + self.assertTrue(runas.has_option(opt)) + + async = ansible.utils.base_parser(async_opts=True) + self.assertTrue(async.has_option('--poll') and async.has_option('--background')) + + connect = ansible.utils.base_parser(connect_opts=True) + self.assertTrue(connect.has_option('--connection')) + + subset = ansible.utils.base_parser(subset_opts=True) + self.assertTrue(subset.has_option('--limit')) + + check = ansible.utils.base_parser(check_opts=True) + self.assertTrue(check.has_option('--check')) + + diff = ansible.utils.base_parser(diff_opts=True) + self.assertTrue(diff.has_option('--diff')) + + def test_do_encrypt(self): + salt_chars = string.ascii_letters + string.digits + './' + salt = ansible.utils.random_password(length=8, chars=salt_chars) + hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt', salt=salt) + self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash)) + + hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt') + self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash)) + + hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4) + self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash)) + + + try: + ansible.utils.do_encrypt('ansible', 'ansible') + except ansible.errors.AnsibleError: + pass + else: + raise AssertionError('Incorrect exception, expected AnsibleError') + + def test_last_non_blank_line(self): + self.assertEqual(ansible.utils.last_non_blank_line('a\n\nb\n\nc'), 'c') + self.assertEqual(ansible.utils.last_non_blank_line(''), '') + + def test_filter_leading_non_json_lines(self): + self.assertEqual(ansible.utils.filter_leading_non_json_lines('a\nb\nansible!\n{"foo": "bar"}'), + '{"foo": "bar"}\n') + self.assertEqual(ansible.utils.filter_leading_non_json_lines('a\nb\nansible!\n["foo", "bar"]'), + '["foo", "bar"]\n') + self.assertEqual(ansible.utils.filter_leading_non_json_lines('a\nb\nansible!\nfoo=bar'), + 'foo=bar\n') + + def test_boolean(self): + self.assertEqual(ansible.utils.boolean("true"), True) + self.assertEqual(ansible.utils.boolean("True"), True) + self.assertEqual(ansible.utils.boolean("TRUE"), True) + self.assertEqual(ansible.utils.boolean("t"), True) + self.assertEqual(ansible.utils.boolean("T"), True) + self.assertEqual(ansible.utils.boolean("Y"), True) + self.assertEqual(ansible.utils.boolean("y"), True) + self.assertEqual(ansible.utils.boolean("1"), True) + self.assertEqual(ansible.utils.boolean(1), True) + self.assertEqual(ansible.utils.boolean("false"), False) + self.assertEqual(ansible.utils.boolean("False"), False) + self.assertEqual(ansible.utils.boolean("0"), False) + self.assertEqual(ansible.utils.boolean(0), False) + self.assertEqual(ansible.utils.boolean("foo"), False) + + def test_make_sudo_cmd(self): + cmd = ansible.utils.make_sudo_cmd('root', '/bin/sh', '/bin/ls') + self.assertTrue(isinstance(cmd, tuple)) + self.assertEqual(len(cmd), 3) + self.assertTrue('-u root' in cmd[0]) + self.assertTrue('-p "[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key')) + self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-')) + self.assertTrue('sudo -k' in cmd[0]) + + def test_make_su_cmd(self): + cmd = ansible.utils.make_su_cmd('root', '/bin/sh', '/bin/ls') + self.assertTrue(isinstance(cmd, tuple)) + self.assertEqual(len(cmd), 3) + self.assertTrue(' root /bin/sh' in cmd[0]) + self.assertTrue(cmd[1] == 'assword: ') + self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-')) + + def test_to_unicode(self): + uni = ansible.utils.to_unicode(u'ansible') + self.assertTrue(isinstance(uni, unicode)) + self.assertEqual(uni, u'ansible') + + none = ansible.utils.to_unicode(None) + self.assertTrue(isinstance(none, type(None))) + self.assertTrue(none is None) + + utf8 = ansible.utils.to_unicode('ansible') + self.assertTrue(isinstance(utf8, unicode)) + self.assertEqual(utf8, u'ansible') + + def test_is_list_of_strings(self): + self.assertEqual(ansible.utils.is_list_of_strings(['foo', 'bar', u'baz']), True) + self.assertEqual(ansible.utils.is_list_of_strings(['foo', 'bar', True]), False) + self.assertEqual(ansible.utils.is_list_of_strings(['one', 2, 'three']), False) + + def test_safe_eval(self): + # Not basestring + self.assertEqual(ansible.utils.safe_eval(len), len) + self.assertEqual(ansible.utils.safe_eval(1), 1) + self.assertEqual(ansible.utils.safe_eval(len, include_exceptions=True), (len, None)) + self.assertEqual(ansible.utils.safe_eval(1, include_exceptions=True), (1, None)) + + # module + self.assertEqual(ansible.utils.safe_eval('foo.bar('), 'foo.bar(') + self.assertEqual(ansible.utils.safe_eval('foo.bar(', include_exceptions=True), ('foo.bar(', None)) + + # import + self.assertEqual(ansible.utils.safe_eval('import foo'), 'import foo') + self.assertEqual(ansible.utils.safe_eval('import foo', include_exceptions=True), ('import foo', None)) + + # valid simple eval + self.assertEqual(ansible.utils.safe_eval('True'), True) + self.assertEqual(ansible.utils.safe_eval('True', include_exceptions=True), (True, None)) + + # valid eval with lookup + self.assertEqual(ansible.utils.safe_eval('foo + bar', dict(foo=1, bar=2)), 3) + self.assertEqual(ansible.utils.safe_eval('foo + bar', dict(foo=1, bar=2), include_exceptions=True), (3, None)) + + # invalid eval + self.assertEqual(ansible.utils.safe_eval('foo'), 'foo') + nameerror = ansible.utils.safe_eval('foo', include_exceptions=True) + self.assertTrue(isinstance(nameerror, tuple)) + self.assertEqual(nameerror[0], 'foo') + self.assertTrue(isinstance(nameerror[1], NameError)) + + def test_listify_lookup_plugin_terms(self): + basedir = os.path.dirname(__file__) + self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict()), + ['things']) + self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), + ['one', 'two']) + + def test_deprecated(self): + sys_stderr = sys.stderr + sys.stderr = StringIO.StringIO() + ansible.utils.deprecated('Ack!', '0.0') + out = sys.stderr.getvalue() + self.assertTrue('0.0' in out) + self.assertTrue('[DEPRECATION WARNING]' in out) + + sys.stderr = StringIO.StringIO() + ansible.utils.deprecated('Ack!', None) + out = sys.stderr.getvalue() + self.assertTrue('0.0' not in out) + self.assertTrue('[DEPRECATION WARNING]' in out) + + sys.stderr = StringIO.StringIO() + warnings = C.DEPRECATION_WARNINGS + C.DEPRECATION_WARNINGS = False + ansible.utils.deprecated('Ack!', None) + out = sys.stderr.getvalue() + self.assertTrue(not out) + C.DEPRECATION_WARNINGS = warnings + + sys.stderr = sys_stderr + + try: + ansible.utils.deprecated('Ack!', '0.0', True) + except ansible.errors.AnsibleError, e: + self.assertTrue('0.0' not in e.msg) + self.assertTrue('[DEPRECATED]' in e.msg) + else: + raise AssertionError("Incorrect exception, expected AnsibleError") + + def test_warning(self): + sys_stderr = sys.stderr + sys.stderr = StringIO.StringIO() + ansible.utils.warning('ANSIBLE') + out = sys.stderr.getvalue() + sys.stderr = sys_stderr + self.assertTrue('[WARNING]: ANSIBLE' in out) + + def test_combine_vars(self): + one = {'foo': {'bar': True}, 'baz': {'one': 'qux'}} + two = {'baz': {'two': 'qux'}} + replace = {'baz': {'two': 'qux'}, 'foo': {'bar': True}} + merge = {'baz': {'two': 'qux', 'one': 'qux'}, 'foo': {'bar': True}} + + C.DEFAULT_HASH_BEHAVIOUR = 'replace' + self.assertEqual(ansible.utils.combine_vars(one, two), replace) + + C.DEFAULT_HASH_BEHAVIOUR = 'merge' + self.assertEqual(ansible.utils.combine_vars(one, two), merge) + + def test_err(self): + sys_stderr = sys.stderr + sys.stderr = StringIO.StringIO() + ansible.utils.err('ANSIBLE') + out = sys.stderr.getvalue() + sys.stderr = sys_stderr + self.assertEqual(out, 'ANSIBLE\n') + + def test_exit(self): + sys_stderr = sys.stderr + sys.stderr = StringIO.StringIO() + try: + ansible.utils.exit('ansible') + except SystemExit, e: + self.assertEqual(e.code, 1) + self.assertEqual(sys.stderr.getvalue(), 'ansible\n') + else: + raise AssertionError('Incorrect exception, expected SystemExit') + finally: + sys.stderr = sys_stderr + + def test_unfrackpath(self): + os.environ['TEST_ROOT'] = os.path.dirname(os.path.dirname(__file__)) + self.assertEqual(ansible.utils.unfrackpath('$TEST_ROOT/units/../units/TestUtils.py'), __file__.rstrip('c')) + + def test_is_executable(self): + self.assertEqual(ansible.utils.is_executable(__file__), 0) + + bin_ansible = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), + 'bin', 'ansible') + self.assertNotEqual(ansible.utils.is_executable(bin_ansible), 0) + + def test_get_diff(self): + standard = dict( + before_header='foo', + after_header='bar', + before='fooo', + after='foo' + ) + standard_expected = """--- before: foo ++++ after: bar +@@ -1 +1 @@ +-fooo+foo""" + self.assertEqual(ansible.utils.get_diff(standard), standard_expected) diff --git a/test/units/TestUtilsStringFunctions.py b/test/units/TestUtilsStringFunctions.py new file mode 100644 index 00000000000..cccedf280d3 --- /dev/null +++ b/test/units/TestUtilsStringFunctions.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +import unittest +import os +import os.path +import tempfile +import yaml +import passlib.hash +import string +import StringIO +import copy + +from nose.plugins.skip import SkipTest + +from ansible.utils import string_functions +import ansible.errors +import ansible.constants as C +import ansible.utils.template as template2 + +from ansible import __version__ + +import sys +reload(sys) +sys.setdefaultencoding("utf8") + +class TestUtilsStringFunctions(unittest.TestCase): + def test_isprintable(self): + self.assertFalse(string_functions.isprintable(chr(7))) + self.assertTrue(string_functions.isprintable('hello')) + + def test_count_newlines_from_end(self): + self.assertEqual(string_functions.count_newlines_from_end('foo\n\n\n\n'), 4) + self.assertEqual(string_functions.count_newlines_from_end('\nfoo'), 0) diff --git a/test/units/inventory_test_data/broken.yml b/test/units/inventory_test_data/broken.yml new file mode 100644 index 00000000000..0eccc1ba78c --- /dev/null +++ b/test/units/inventory_test_data/broken.yml @@ -0,0 +1,2 @@ +foo: bar + baz: qux diff --git a/test/units/inventory_test_data/encrypted.yml b/test/units/inventory_test_data/encrypted.yml new file mode 100644 index 00000000000..ca33ab25cbb --- /dev/null +++ b/test/units/inventory_test_data/encrypted.yml @@ -0,0 +1,6 @@ +$ANSIBLE_VAULT;1.1;AES256 +33343734386261666161626433386662623039356366656637303939306563376130623138626165 +6436333766346533353463636566313332623130383662340a393835656134633665333861393331 +37666233346464636263636530626332623035633135363732623332313534306438393366323966 +3135306561356164310a343937653834643433343734653137383339323330626437313562306630 +3035 From 30cdac606182738097147c4e93d6395d138b07de Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 14 Mar 2014 12:01:23 -0400 Subject: [PATCH 361/772] Fix docs for ec2_lc module. --- library/cloud/ec2_lc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_lc b/library/cloud/ec2_lc index a84449d7d91..0f2dc26a234 100644 --- a/library/cloud/ec2_lc +++ b/library/cloud/ec2_lc @@ -66,7 +66,7 @@ options: """ EXAMPLES = ''' -- ec2_lc: > +- ec2_lc: name: special image_id: ami-XXX key_name: default From e2bfa864844847f302111e46bdb9cc1c26415dfb Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 6 Mar 2014 16:10:34 -0600 Subject: [PATCH 362/772] New module: nexmo --- library/notification/nexmo | 140 +++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 library/notification/nexmo diff --git a/library/notification/nexmo b/library/notification/nexmo new file mode 100644 index 00000000000..d4898c40cdb --- /dev/null +++ b/library/notification/nexmo @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: nexmo +short_description: Send a SMS via nexmo +description: + - Send a SMS message via nexmo +version_added: 1.6 +author: Matt Martz +options: + api_key: + description: + - Nexmo API Key + required: true + api_secret: + description: + - Nexmo API Secret + required: true + src: + description: + - Nexmo Number to send from + required: true + dest: + description: + - Phone number(s) to send SMS message to + required: true + msg: + description: + - Message to text to send. Messages longer than 160 characters will be + split into multiple messages + required: true + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: + - 'yes' + - 'no' +""" + +EXAMPLES = """ +- name: Send notification message via Nexmo + local_action: + module: nexmo + api_key: 640c8a53 + api_secret: 0ce239a6 + src: 12345678901 + dest: + - 10987654321 + - 16789012345 + msg: "{{ inventory_hostname }} completed" +""" + + +NEXMO_API = 'https://rest.nexmo.com/sms/json' + + +def send_msg(module): + failed = list() + responses = dict() + msg = { + 'api_key': module.params.get('api_key'), + 'api_secret': module.params.get('api_secret'), + 'from': module.params.get('src'), + 'text': module.params.get('msg') + } + for number in module.params.get('dest'): + msg['to'] = number + url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg)) + + headers = dict(Accept='application/json') + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + failed.append(number) + responses[number] = dict(failed=True) + + try: + responses[number] = json.load(response) + except: + failed.append(number) + responses[number] = dict(failed=True) + else: + for message in responses[number]['messages']: + if int(message['status']) != 0: + failed.append(number) + responses[number] = dict(failed=True, **responses[number]) + + if failed: + msg = 'One or messages failed to send' + else: + msg = '' + + module.exit_json(failed=bool(failed), msg=msg, changed=False, + responses=responses) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + dict( + api_key=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + src=dict(required=True, type='int'), + dest=dict(required=True, type='list'), + msg=dict(required=True), + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + send_msg(module) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() From 0cea4d8a32ba815152b0e8974e3c101b3a840099 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 14 Mar 2014 11:23:34 -0500 Subject: [PATCH 363/772] Update CODING_GUIDELINES.md --- CODING_GUIDELINES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CODING_GUIDELINES.md b/CODING_GUIDELINES.md index 1ba5d5035e4..2da07681cee 100644 --- a/CODING_GUIDELINES.md +++ b/CODING_GUIDELINES.md @@ -86,6 +86,7 @@ Module Security * if you use need the shell you must pass use_unsafe_shell=True to module.run_command * if you do not need the shell, avoid using the shell * any variables that can come from the user input with use_unsafe_shell=True must be wrapped by pipes.quote(x) + * downloads of https:// resource urls must import module_utils.urls and use the fetch_url method Misc Preferences ================ From aacccd441b11706eef5dd736a8371253b3771f17 Mon Sep 17 00:00:00 2001 From: James Laska Date: Fri, 14 Mar 2014 13:04:25 -0400 Subject: [PATCH 364/772] Add integration test for apt_repository Tests several ways to specify the repository. For every repo added, the test asserts that: * the apt-cache was updated as expected (depends on `update_cache` parameter) * the PPA key was installed (depends on `repo` format) --- test/integration/destructive.yml | 4 +- .../roles/test_apt_repository/meta/main.yml | 2 + .../roles/test_apt_repository/tasks/apt.yml | 137 ++++++++++++++++++ .../test_apt_repository/tasks/cleanup.yml | 18 +++ .../roles/test_apt_repository/tasks/main.yml | 21 +++ 5 files changed, 180 insertions(+), 2 deletions(-) create mode 100644 test/integration/roles/test_apt_repository/meta/main.yml create mode 100644 test/integration/roles/test_apt_repository/tasks/apt.yml create mode 100644 test/integration/roles/test_apt_repository/tasks/cleanup.yml create mode 100644 test/integration/roles/test_apt_repository/tasks/main.yml diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 8d0b11c6acc..406db63906b 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -1,9 +1,9 @@ - hosts: testhost gather_facts: True - roles: + roles: - { role: test_service, tags: test_service } - { role: test_pip, tags: test_pip } - { role: test_gem, tags: test_gem } - { role: test_yum, tags: test_yum } - { role: test_apt, tags: test_apt } - + - { role: test_apt_repository, tags: test_apt_repository } diff --git a/test/integration/roles/test_apt_repository/meta/main.yml b/test/integration/roles/test_apt_repository/meta/main.yml new file mode 100644 index 00000000000..07faa217762 --- /dev/null +++ b/test/integration/roles/test_apt_repository/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/roles/test_apt_repository/tasks/apt.yml b/test/integration/roles/test_apt_repository/tasks/apt.yml new file mode 100644 index 00000000000..7cbc9d2128a --- /dev/null +++ b/test/integration/roles/test_apt_repository/tasks/apt.yml @@ -0,0 +1,137 @@ +--- + +- set_fact: + test_ppa_name: 'ppa:menulibre-dev/devel' + test_ppa_spec: 'deb http://ppa.launchpad.net/menulibre-dev/devel/ubuntu {{ansible_distribution_release}} main' + test_ppa_key: 'A7AD98A1' # http://keyserver.ubuntu.com:11371/pks/lookup?search=0xD06AAF4C11DAB86DF421421EFE6B20ECA7AD98A1&op=index + +# +# TEST: apt_repository: repo= +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= (expect: pass)' + apt_repository: repo='{{test_ppa_name}}' state=present + register: result + +- name: 'assert the apt cache did *NOT* change' + assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_name}}"' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +- name: 'ensure ppa key is installed (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=present + +# +# TEST: apt_repository: repo= update_cache=no +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= update_cache=no (expect: pass)' + apt_repository: repo='{{test_ppa_name}}' state=present update_cache=no + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_name}}"' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did *NOT* change' + assert: + that: + - 'cache_before.stat.mtime == cache_after.stat.mtime' + +- name: 'ensure ppa key is installed (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=present + +# +# TEST: apt_repository: repo= update_cache=yes +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= update_cache=yes (expect: pass)' + apt_repository: repo='{{test_ppa_name}}' state=present update_cache=yes + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_name}}"' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +- name: 'ensure ppa key is installed (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=present + +# +# TEST: apt_repository: repo= +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= (expect: pass)' + apt_repository: repo='{{test_ppa_spec}}' state=present + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_spec}}"' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +# When installing a repo with the spec, the key is *NOT* added +- name: 'ensure ppa key is absent (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=absent + +# +# TEARDOWN +# +- include: 'cleanup.yml' diff --git a/test/integration/roles/test_apt_repository/tasks/cleanup.yml b/test/integration/roles/test_apt_repository/tasks/cleanup.yml new file mode 100644 index 00000000000..86a09dd5aec --- /dev/null +++ b/test/integration/roles/test_apt_repository/tasks/cleanup.yml @@ -0,0 +1,18 @@ +--- +# tasks to cleanup a repo and assert it is gone + +- name: remove existing ppa + apt_repository: repo={{test_ppa_name}} state=absent + ignore_errors: true + +- name: test that ppa does not exist (expect pass) + shell: cat /etc/apt/sources.list /etc/apt/sources.list.d/* | grep "{{test_ppa_spec}}" + register: command + failed_when: command.rc == 0 + changed_when: false + +# Should this use apt-key, maybe? +- name: remove ppa key + apt_key: id={{test_ppa_key}} state=absent + ignore_errors: true + diff --git a/test/integration/roles/test_apt_repository/tasks/main.yml b/test/integration/roles/test_apt_repository/tasks/main.yml new file mode 100644 index 00000000000..8a16a061bd9 --- /dev/null +++ b/test/integration/roles/test_apt_repository/tasks/main.yml @@ -0,0 +1,21 @@ +# test code for the apt_repository module +# (c) 2014, James Laska + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- include: 'apt.yml' + when: ansible_distribution in ('Ubuntu', 'Debian') + From cfe04659341151a9e21b2950c63a88c6b4ae034c Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Wed, 5 Mar 2014 07:53:38 -0800 Subject: [PATCH 365/772] Add guide for Google Cloud Engine. --- docsite/rst/guide_gce.rst | 186 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 docsite/rst/guide_gce.rst diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst new file mode 100644 index 00000000000..29c2b3f81f3 --- /dev/null +++ b/docsite/rst/guide_gce.rst @@ -0,0 +1,186 @@ + Google Cloud Platform Guide +============================ + +.. _gce_intro: + +Introduction +------------ + +.. note:: This section of the documentation is under construction. We are in the process of adding more examples about all of the GCE modules and how they work together. + +The GCE modules require the apache-libcloud module, which you can install from pip: + +.. code-block:: bash + + $ pip install apache-libcloud + +.. note:: If you're using Ansible on Mac OS X, libcloud needs to access a CA cert chain. You'll need to download one (you can get one for `here `_.) + +Credentials +----------- + +To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command: + +.. code-block:: bash + + $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out pkey.pem + +There's three different ways to provide credentials to Ansible when you want to talk to Google Cloud: + +* by providing to the modules directly +* by populating a ``secrets.py`` file +* by populating the ``gce.ini`` file (for the inventory script only) + +Module +`````` + +For the GCE modules you can specify the credentials as argument: + +* ``service_account_email``: email associated with the project +* ``pem_file``: path to the pem file +* ``project_id``: id of the project + +For example, to create a new instance using the cloud module, you can use the following configuration: + +.. code-block:: yaml + - name: Create instance(s) + hosts: localhost + gather_facts: no + vars: + service_account_email: unique-id@developer.gserviceaccount.com + pem_file: /path/to/project.pem + project_id: project-id + machine_type: n1-standard-1 + image: debian-7 + tasks: + - name: Launch instances + local_action: gce instance_names=dev machine_type={{ machine_type }} image={{ image }} service_account_email={{ service_account_email }} pem_file={{ pem_file }} project_id={{ project_id }} + +secrets.py +`````````` + +Create a file ``secrets.py`` looking like following, and put it in some folder which is in your ``$PYTHONPATH``: + +.. code-block:: python + + GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem') + GCE_KEYWORD_PARAMS = {'project': 'project-name'} + +gce.ini +``````` + +When using the inventory script ``gce.py``, you need to populate the ``gce.ini`` file that you can find in the inventory directory. + +Host Inventory +-------------- + +The best way to interact with your hosts is to use the gce inventory plugin, which dynamically queries GCE and tells Ansible what nodes can be managed. + +gce.py +++++++ + +To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugings/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable. + +Let's test our inventory script to see if it can talk to Google Cloud. + +.. code-block:: bash + + $ GCE_INI_PATH=~/.gce.ini ansible all -i gce.py -m setup + hostname | success >> { + "ansible_facts": { + "ansible_all_ipv4_addresses": [ + "x.x.x.x" + ], + +The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. + +Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead of an individual file will cause ansible to evaluate each file in that directory for inventory. + +Let's test our inventory script to see if it can talk to Google Cloud: + +.. code-block:: bash + + $ ansible all -i inventory/ -m setup + hostname | success >> { + "ansible_facts": { + "ansible_all_ipv4_addresses": [ + "x.x.x.x" + ], + +The output should be similar to the previous command. + +Use Cases +--------- + +For the following use case, I'm using a small shell script as a wrapper. + +.. code-block:: bash + + #!/bin/bash + PLAYBOOK="$1" + + if [ -z $PLAYBOOK ]; then + echo "You need to pass a playback as argument to this script." + exit 1 + fi + + export SSL_CERT_FILE=$(pwd)/cacert.cer + export ANSIBLE_HOST_KEY_CHECKING=False + + if [ ! -f "$SSL_CERT_FILE" ]; then + curl -O http://curl.haxx.se/ca/cacert.pem + fi + + ansible-playbook -v -i inventory/ "$PLAYBOOK" + + +Create an instance +`````````````````` + +The GCE module provides the ability to provision instances within Google Compute Engine. The provisioning task is typically performed from your Ansible control server against Google Cloud's API. + +A playbook would looks like this: + +.. code-block:: yaml + + - name: Create instance(s) + hosts: localhost + gather_facts: no + vars: + machine_type: n1-standard-1 # default + image: debian-7 + service_account_email: unique-id@developer.gserviceaccount.com + pem_file: /path/to/project.pem + project_id: project-id + tasks: + - name: Launch instances + local_action: gce instance_names=dev machine_type={{ machine_type }} image={{ image }} service_account_email={{ service_account_email }} pem_file={{ pem_file }} project_id={{ project_id }} + register: gce + - name: Wait for SSH to come up + local_action: wait_for host={{ item.public_ip }} port=22 delay=10 timeout=60 state=started + with_items: gce.instance_data + +Create a web server +``````````````````` + +With this example we will install a web server (lighttpd) on our new instance and ensure that the port 80 is open for incoming connections. + +.. code-block:: yaml + + - name: Create a firewall rule to allow HTTP + hosts: dev + gather_facts: no + vars: + machine_type: n1-standard-1 # default + image: debian-7 + service_account_email: unique-id@developer.gserviceaccount.com + pem_file: /path/to/project.pem + project_id: project-id + tasks: + - name: Install lighttpd + apt: pkg=lighttpd state=installed + sudo: True + - name: Allow HTTP + local_action: gce_net fwname=all-http name=default allowed=tcp:80 state=present service_account_email={{ service_account_email }} pem_file={{ pem_file }} project_id={{ project_id }} + +By pointing your browser to the IP of the server, you should see a page welcoming you. From 8082f747988b9f98ed19c91a89db85af90d0efba Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 14 Mar 2014 15:01:07 -0400 Subject: [PATCH 366/772] Add GCE guide and retool a bit to show the add_host interactions, improvements/upgrades are welcome. Had to shoot the recently merged nova_group module in the head temporarily as it contained a dict comprehension, which means it can't work on all the platforms and was also breaking docs builds on CentOS. Will engage with list about that shortly. --- CHANGELOG.md | 1 - docsite/rst/guide_gce.rst | 169 +++++++++++++------ docsite/rst/guides.rst | 1 + library/cloud/nova_group | 343 -------------------------------------- 4 files changed, 115 insertions(+), 399 deletions(-) delete mode 100644 library/cloud/nova_group diff --git a/CHANGELOG.md b/CHANGELOG.md index 578e6b80a54..044b3e1aa28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,6 @@ New Modules: * system: locale_gen * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey -* cloud: nova_group (security groups) * cloud: nova_fip (floating IPs) * cloud: rax_identity * cloud: ec2_asg (configure autoscaling groups) diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index 29c2b3f81f3..b6313652560 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -1,20 +1,23 @@ - Google Cloud Platform Guide -============================ +Google Cloud Platform Guide +=========================== -.. _gce_intro: +.. gce_intro: Introduction ------------ -.. note:: This section of the documentation is under construction. We are in the process of adding more examples about all of the GCE modules and how they work together. +.. note:: This section of the documentation is under construction. We are in the process of adding more examples about all of the GCE modules and how they work together. Upgrades via github pull requests are welcomed! -The GCE modules require the apache-libcloud module, which you can install from pip: +Ansible contains modules for managing Google Compute Engine resources, including creating instances, controlling network access, working with persistent disks, and managing +load balancers. Additionally, there is an inventory plugin that can automatically suck down all of your GCE instances into Ansible dynamic inventory, and create groups by tag and other properties. + +The GCE modules all require the apache-libcloud module, which you can install from pip: .. code-block:: bash $ pip install apache-libcloud -.. note:: If you're using Ansible on Mac OS X, libcloud needs to access a CA cert chain. You'll need to download one (you can get one for `here `_.) +.. note:: If you're using Ansible on Mac OS X, libcloud also needs to access a CA cert chain. You'll need to download one (you can get one for `here `_.) Credentials ----------- @@ -25,16 +28,15 @@ To work with the GCE modules, you'll first need to get some credentials. You can $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out pkey.pem -There's three different ways to provide credentials to Ansible when you want to talk to Google Cloud: +There are two different ways to provide credentials to Ansible so that it can talk with Google Cloud for provisioning and configuration actions: * by providing to the modules directly * by populating a ``secrets.py`` file -* by populating the ``gce.ini`` file (for the inventory script only) -Module -`````` +Calling Modules By Passing Credentials +`````````````````````````````````````` -For the GCE modules you can specify the credentials as argument: +For the GCE modules you can specify the credentials as arguments: * ``service_account_email``: email associated with the project * ``pem_file``: path to the pem file @@ -43,21 +45,32 @@ For the GCE modules you can specify the credentials as argument: For example, to create a new instance using the cloud module, you can use the following configuration: .. code-block:: yaml + - name: Create instance(s) hosts: localhost + connection: local gather_facts: no + vars: service_account_email: unique-id@developer.gserviceaccount.com pem_file: /path/to/project.pem project_id: project-id machine_type: n1-standard-1 image: debian-7 + tasks: + - name: Launch instances - local_action: gce instance_names=dev machine_type={{ machine_type }} image={{ image }} service_account_email={{ service_account_email }} pem_file={{ pem_file }} project_id={{ project_id }} + gce: + instance_names: dev + machine_type: "{{ machine_type }}" + image: "{{ image }}" + service_account_email: "{{ service_account_email }}" + pem_file: "{{ pem_file }}" + project_id: "{{ project_id }}" -secrets.py -`````````` +Calling Modules with secrets.py +``````````````````````````````` Create a file ``secrets.py`` looking like following, and put it in some folder which is in your ``$PYTHONPATH``: @@ -66,22 +79,26 @@ Create a file ``secrets.py`` looking like following, and put it in some folder w GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem') GCE_KEYWORD_PARAMS = {'project': 'project-name'} -gce.ini -``````` +Now the modules can be used as above, but the account information can be omitted. -When using the inventory script ``gce.py``, you need to populate the ``gce.ini`` file that you can find in the inventory directory. - -Host Inventory --------------- +GCE Dynamic Inventory +--------------------- The best way to interact with your hosts is to use the gce inventory plugin, which dynamically queries GCE and tells Ansible what nodes can be managed. -gce.py -++++++ +Note that when using the inventory script ``gce.py``, you also need to populate the ``gce.ini`` file that you can find in the plugins/inventory directory of the ansible checkout. + +To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugings/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script. + +Let's see if inventory is working: + +.. code-block: bash -To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugings/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable. + $ ./gce.py --list -Let's test our inventory script to see if it can talk to Google Cloud. +You should see output describing the hosts you have, if any, running in Google Compute Engine. + +Now let's see if we can use the inventory script to talk to Google. .. code-block:: bash @@ -92,11 +109,11 @@ Let's test our inventory script to see if it can talk to Google Cloud. "x.x.x.x" ], -The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. +As with all dynamic inventory plugins in Ansible, you can configure the inventory path in ansible.cfg. The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. This can allow for cloud inventory to be used alongside local inventory (such as a physical datacenter) or machines running in different providers. Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead of an individual file will cause ansible to evaluate each file in that directory for inventory. -Let's test our inventory script to see if it can talk to Google Cloud: +Let's once again use our inventory script to see if it can talk to Google Cloud: .. code-block:: bash @@ -107,12 +124,12 @@ Let's test our inventory script to see if it can talk to Google Cloud: "x.x.x.x" ], -The output should be similar to the previous command. +The output should be similar to the previous command. If you're wanting less output and just want to check for SSH connectivity, use "-m" ping instead. Use Cases --------- -For the following use case, I'm using a small shell script as a wrapper. +For the following use case, let's use this small shell script as a wrapper. .. code-block:: bash @@ -146,41 +163,83 @@ A playbook would looks like this: - name: Create instance(s) hosts: localhost gather_facts: no + connection: local + vars: machine_type: n1-standard-1 # default image: debian-7 service_account_email: unique-id@developer.gserviceaccount.com pem_file: /path/to/project.pem project_id: project-id + tasks: - name: Launch instances - local_action: gce instance_names=dev machine_type={{ machine_type }} image={{ image }} service_account_email={{ service_account_email }} pem_file={{ pem_file }} project_id={{ project_id }} - register: gce - - name: Wait for SSH to come up - local_action: wait_for host={{ item.public_ip }} port=22 delay=10 timeout=60 state=started - with_items: gce.instance_data - -Create a web server -``````````````````` + gce: + instance_names: dev + machine_type: "{{ machine_type }}" + image: "{{ image }}" + service_account_email: "{{ service_account_email }}" + pem_file: "{{ pem_file }}" + project_id: "{{ project_id }}" + tags: webserver + register: gce + + - name: Wait for SSH to come up + wait_for: host={{ item.public_ip }} port=22 delay=10 timeout=60 + with_items: gce.instance_data + + - name: add_host hostname={{ item.public_ip }} groupname=new_instances + + - name: Manage new instances + hosts: new_instances + connection: ssh + roles: + - base_configuration + - production_server + +Note that use of the "add_host" module above creates a temporary, in-memory group. This means that a play in the same playbook can then manage machines +in the 'new_instances' group, if so desired. Any sort of arbitrary configuration is possible at this point. + +Configuring instances in a group +```````````````````````````````` + +All of the created instances in GCE are grouped by tag. Since this is a cloud, it's probably best to ignore hostnames and just focus on group management. + +Normally we'd also use roles here, but the following example is a simple one. Here we will also use the "gce_net" module to open up access to port 80 on +these nodes. + +The variables in the 'vars' section could also be kept in a 'vars_files' file or something encrypted with Ansible-vault, if you so choose. This is just +a basic example of what is possible:: + + - name: Setup web servers + hosts: tag_webserver + gather_facts: no + + vars: + machine_type: n1-standard-1 # default + image: debian-7 + service_account_email: unique-id@developer.gserviceaccount.com + pem_file: /path/to/project.pem + project_id: project-id + + roles: + + - name: Install lighttpd + apt: pkg=lighttpd state=installed + sudo: True + + - name: Allow HTTP + local_action: gce_net + args: + fwname: "all-http" + name: "default" + allowed: "tcp:80" + state: "present" + service_account_email: "{{ service_account_email }}" + pem_file: "{{ pem_file }}" + project_id: "{{ project_id }}" -With this example we will install a web server (lighttpd) on our new instance and ensure that the port 80 is open for incoming connections. - -.. code-block:: yaml +By pointing your browser to the IP of the server, you should see a page welcoming you. - - name: Create a firewall rule to allow HTTP - hosts: dev - gather_facts: no - vars: - machine_type: n1-standard-1 # default - image: debian-7 - service_account_email: unique-id@developer.gserviceaccount.com - pem_file: /path/to/project.pem - project_id: project-id - tasks: - - name: Install lighttpd - apt: pkg=lighttpd state=installed - sudo: True - - name: Allow HTTP - local_action: gce_net fwname=all-http name=default allowed=tcp:80 state=present service_account_email={{ service_account_email }} pem_file={{ pem_file }} project_id={{ project_id }} +Upgrades to this documentation are welcome, hit the github link at the top right of this page if you would like to make additions! -By pointing your browser to the IP of the server, you should see a page welcoming you. diff --git a/docsite/rst/guides.rst b/docsite/rst/guides.rst index 05af9b023d7..0585d966097 100644 --- a/docsite/rst/guides.rst +++ b/docsite/rst/guides.rst @@ -8,6 +8,7 @@ This section is new and evolving. The idea here is explore particular use cases guide_aws guide_rax + guide_gce guide_vagrant guide_rolling_upgrade diff --git a/library/cloud/nova_group b/library/cloud/nova_group deleted file mode 100644 index 21393a79afe..00000000000 --- a/library/cloud/nova_group +++ /dev/null @@ -1,343 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, John Dewey -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -import locale -import os -import six - -try: - from novaclient.openstack.common import uuidutils - from novaclient.openstack.common import strutils - from novaclient.v1_1 import client - from novaclient.v1_1 import security_groups - from novaclient.v1_1 import security_group_rules - from novaclient import exceptions -except ImportError: - print("failed=True msg='novaclient is required for this module to work'") - -DOCUMENTATION = ''' ---- -module: security_group -version_added: "1.6" -short_description: Maintain nova security groups. -description: - - Manage nova security groups using the python-novaclient library. -options: - - login_username: - description: - - Login username to authenticate to keystone. If not set then the value of the OS_USERNAME environment variable is used. - required: false - default: None - login_password: - description: - - Password of login user. If not set then the value of the OS_PASSWORD environment variable is used. - required: false - default: None - login_tenant_name: - description: - - The tenant name of the login user. If not set then the value of the OS_TENANT_NAME environment variable is used. - required: false - default: None - auth_url: - description: - - The keystone url for authentication. If not set then the value of the OS_AUTH_URL environment variable is used. - required: false - default: None - region_name: - description: - - Name of the region. - required: false - default: None - name: - description: - - Name of the security group. - required: true - description: - description: - - Description of the security group. - required: true - rules: - description: - - List of firewall rules to enforce in this group (see example). - Must specify either an IPv4 'cidr' address or 'group' UUID. - required: true - state: - description: - - Indicate desired state of the resource. - choices: ['present', 'absent'] - required: false - default: 'present' - -requirements: ["novaclient"] -''' - -EXAMPLES = ''' -- name: create example group and rules - local_action: - module: security_group - name: example - description: an example nova group - rules: - - ip_protocol: tcp - from_port: 80 - to_port: 80 - cidr: 0.0.0.0/0 - - ip_protocol: tcp - from_port: 3306 - to_port: 3306 - group: "{{ group_uuid }}" - - ip_protocol: icmp - from_port: -1 - to_port: -1 - cidr: 0.0.0.0/0 - -- name: delete rule from example group - local_action: - module: security_group - name: example - description: an example nova group - rules: - - ip_protocol: tcp - from_port: 80 - to_port: 80 - cidr: 0.0.0.0/0 - - ip_protocol: icmp - from_port: -1 - to_port: -1 - cidr: 0.0.0.0/0 - state: absent -''' - -class NovaGroup(object): - def __init__(self, client): - self._sg = security_groups.SecurityGroupManager(client) - - # Taken from novaclient/v1_1/shell.py. - def _get_secgroup(self, secgroup): - # Check secgroup is an UUID - if uuidutils.is_uuid_like(strutils.safe_encode(secgroup)): - try: - sg = self._sg.get(secgroup) - return sg - except exceptions.NotFound: - return False - - # Check secgroup as a name - for s in self._sg.list(): - encoding = (locale.getpreferredencoding() or - sys.stdin.encoding or - 'UTF-8') - if not six.PY3: - s.name = s.name.encode(encoding) - if secgroup == s.name: - return s - return False - - -class SecurityGroup(NovaGroup): - def __init__(self, client, module): - super(SecurityGroup, self).__init__(client) - self._module = module - self._name = module.params.get('name') - self._description = module.params.get('description') - - def get(self): - return self._get_secgroup(self._name) - - def create(self): - return self._sg.create(self._name, self._description) - - def delete(self): - return self._sg.delete(self._name) - - -class SecurityGroupRule(NovaGroup): - def __init__(self, client, module): - super(SecurityGroupRule, self).__init__(client) - self._module = module - self._name = module.params.get('name') - self._rules = module.params.get('rules') - self._validate_rules() - self._sgr = security_group_rules.SecurityGroupRuleManager(client) - self._secgroup = self._get_secgroup(self._name) - self._current_rules = self._lookup_dict(self._secgroup.rules) - - def _concat_security_group_rule(self, rule): - """ - Normalize the given rule into a string in the format of: - protocol-from_port-to_port-group - The `group` needs a bit of massaging. - 1. If an empty dict -- return None. - 2. If a dict -- lookup group UUID (novaclient only returns the name). - 3. Return `group` from rules dict. - - :param rule: A novaclient SecurityGroupRule object. - """ - group = rule.get('group') - # Oddly novaclient occasionaly returns None as {}. - if group is not None and not any(group): - group = None - elif type(group) == dict: - g = group.get('name') - group = self._get_secgroup(g) - r = "%s-%s-%s-%s" % (rule.get('ip_protocol'), - rule.get('from_port'), - rule.get('to_port'), - group) - return r - - def _lookup_dict(self, rules): - """ - Populate a dict with current rules. - - :param rule: A novaclient SecurityGroupRule object. - """ - return {self._concat_security_group_rule(rule): rule for rule in rules} - - def _get_rule(self, rule): - """ - Return rule when found and False when not. - - :param rule: A novaclient SecurityGroupRule object. - """ - r = self._concat_security_group_rule(rule) - if r in self._current_rules: - return self._current_rules[r] - - def _validate_rules(self): - for rule in self._rules: - if 'group' in rule and 'cidr' in rule: - self._module.fail_json(msg="Specify group OR cidr") - - def create(self): - changed = False - filtered = [rule for rule in self._rules - if rule.get('state') != 'absent'] - for rule in filtered: - if not self._get_rule(rule): - if 'cidr' in rule: - self._sgr.create(self._secgroup.id, - rule.get('ip_protocol'), - rule.get('from_port'), - rule.get('to_port'), - cidr=rule.get('cidr')) - changed = True - if 'group' in rule: - self._sgr.create(self._secgroup.id, - rule.get('ip_protocol'), - rule.get('from_port'), - rule.get('to_port'), - group_id=rule.get('group')) - changed = True - return changed - - def delete(self): - changed = False - filtered = [rule for rule in self._rules - if rule.get('state') == 'absent'] - for rule in filtered: - r = self._get_rule(rule) - if r: - self._sgr.delete(r.get('id')) - changed = True - return changed - - def update(self): - changed = False - if self.create(): - changed = True - if self.delete(): - changed = True - return changed - - -def main(): - module = AnsibleModule( - argument_spec=dict( - name=dict(required=True), - description=dict(required=True), - rules=dict(), - login_username=dict(), - login_password=dict(no_log=True), - login_tenant_name=dict(), - auth_url= dict(), - region_name=dict(default=None), - state = dict(default='present', choices=['present', 'absent']), - ), - supports_check_mode=False, - ) - login_username = module.params.get('login_username') - login_password = module.params.get('login_password') - login_tenant_name = module.params.get('login_tenant_name') - auth_url = module.params.get('auth_url') - - # allow stackrc environment variables to be used if ansible vars aren't set - if not login_username and 'OS_USERNAME' in os.environ: - login_username = os.environ['OS_USERNAME'] - - if not login_password and 'OS_PASSWORD' in os.environ: - login_password = os.environ['OS_PASSWORD'] - - if not login_tenant_name and 'OS_TENANT_NAME' in os.environ: - login_tenant_name = os.environ['OS_TENANT_NAME'] - - if not auth_url and 'OS_AUTH_URL' in os.environ: - auth_url = os.environ['OS_AUTH_URL'] - - nova = client.Client(login_username, - login_password, - login_tenant_name, - auth_url, - service_type='compute') - try: - nova.authenticate() - except exceptions.Unauthorized as e: - module.fail_json(msg="Invalid OpenStack Nova credentials.: %s" % e.message) - except exceptions.AuthorizationFailure as e: - module.fail_json(msg="Unable to authorize user: %s" % e.message) - - rules = module.params.get('rules') - state = module.params.get('state') - security_group = SecurityGroup(nova, module) - - changed = False - group_id = None - group = security_group.get() - if group: - group_id = group.id - if state == 'absent': - security_group.delete() - changed = True - elif state == 'present': - group = security_group.create() - changed = True - group_id = group.id - - if rules is not None: - security_group_rules = SecurityGroupRule(nova, module) - if security_group_rules.update(): - changed = True - - module.exit_json(changed=changed, group_id=group_id) - - -# import module snippets -from ansible.module_utils.basic import * - -main() \ No newline at end of file From 94198690c1e041a228a2586df891eba2db88acf3 Mon Sep 17 00:00:00 2001 From: tmperret Date: Fri, 14 Mar 2014 22:39:57 +0000 Subject: [PATCH 367/772] Made load_balencers in ec2_lc non-required as not all ASGs require an ELB --- library/cloud/ec2_asg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg index a411bcd4279..15df0ed41b9 100644 --- a/library/cloud/ec2_asg +++ b/library/cloud/ec2_asg @@ -159,7 +159,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True, type='str'), - load_balancers = dict(required=True, type='list'), + load_balancers = dict(required=False, type='list'), availability_zones = dict(required=True, type='list'), launch_config_name = dict(required=True, type='str'), min_size = dict(required=True, type='int'), From b81a3bbb20d68cc95125fb5b846872c3ab273a22 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 15 Mar 2014 01:10:15 -0400 Subject: [PATCH 368/772] fixed bad detection of change on default and premature break --- library/files/acl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/library/files/acl b/library/files/acl index b8d2b85cb65..63400b3d61a 100644 --- a/library/files/acl +++ b/library/files/acl @@ -115,6 +115,9 @@ def split_entry(entry): print "wtf?? %s => %s" % (entry,a) raise e + if d: + d = True + if t.startswith("u"): t = "user" elif t.startswith("g"): @@ -248,7 +251,6 @@ def main(): if not old_permissions == permissions: changed = True break - break if not matched: changed=True From e05e57e0a9068ed54e0712bd435e98f9958290f2 Mon Sep 17 00:00:00 2001 From: follower Date: Sat, 15 Mar 2014 19:16:14 +1300 Subject: [PATCH 369/772] Fix typo in example name --- library/cloud/ec2_ami_search | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_ami_search b/library/cloud/ec2_ami_search index a1f53cd3d67..932dca855a8 100644 --- a/library/cloud/ec2_ami_search +++ b/library/cloud/ec2_ami_search @@ -67,7 +67,7 @@ author: Lorin Hochstein ''' EXAMPLES = ''' -- name: Lauch an Ubuntu 12.04 (Precise Pangolin) EC2 instance +- name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance hosts: 127.0.0.1 connection: local tasks: From 76d5b9640623064bf3b006cfb05f9f84c58b2df0 Mon Sep 17 00:00:00 2001 From: George Yoshida Date: Sat, 15 Mar 2014 15:30:43 +0900 Subject: [PATCH 370/772] Fix docs for playbooks_intro remove unnecessary period. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 70925521007..70db3f7fe27 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -370,7 +370,7 @@ package is installed. Try it! To see what hosts would be affected by a playbook before you run it, you can do this:: - ansible-playbook playbook.yml --list-hosts. + ansible-playbook playbook.yml --list-hosts .. seealso:: From af6109c169ebb838481176779415b9f3e014f340 Mon Sep 17 00:00:00 2001 From: follower Date: Sat, 15 Mar 2014 20:04:38 +1300 Subject: [PATCH 371/772] Fix typo "resort" -> "result" --- library/files/template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/template b/library/files/template index 29fa905207f..39f92c72a72 100644 --- a/library/files/template +++ b/library/files/template @@ -17,7 +17,7 @@ description: the template's machine, C(template_uid) the owner, C(template_path) the absolute path of the template, C(template_fullpath) is the absolute path of the template, and C(template_run_date) is the date that the template was rendered. Note that including - a string that uses a date in the template will resort in the template being marked 'changed' + a string that uses a date in the template will result in the template being marked 'changed' each time." options: src: From 5ef3cb26d4d7f3db886ee3af79b1c8c4b09fec05 Mon Sep 17 00:00:00 2001 From: George Yoshida Date: Sat, 15 Mar 2014 23:36:42 +0900 Subject: [PATCH 372/772] Fix docs for faq - correct spelling --- docsite/rst/faq.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 82841d43812..f42ff0c999d 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -140,16 +140,16 @@ Then you can use the facts inside your template, like this:: .. _programatic_access_to_a_variable: -How do I access a variable name programatically? -++++++++++++++++++++++++++++++++++++++++++++++++ +How do I access a variable name programmatically? ++++++++++++++++++++++++++++++++++++++++++++++++++ An example may come up where we need to get the ipv4 address of an arbitrary interface, where the interface to be used may be supplied via a role parameter or other input. Variable names can be built by adding strings together, like so:: {{ hostvars[inventory_hostname]['ansible_' + which_interface]['ipv4']['address'] }} -The trick about going through hostvars is neccessary because it's a dictionary of the entire namespace of variables. 'inventory_hostname' -is a magic variable that indiciates the current host you are looping over in the host loop. +The trick about going through hostvars is necessary because it's a dictionary of the entire namespace of variables. 'inventory_hostname' +is a magic variable that indicates the current host you are looping over in the host loop. .. _first_host_in_a_group: From 2d25577e1104101c67bb5ac41790693780a6d51b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 13 Mar 2014 23:07:35 -0400 Subject: [PATCH 373/772] Fixes and cleanup to file functions and module - unified set attribute functions ... not sure why 2 identical functions exist with diff names, now there are 3 while i repoint all modules to 1 - fixed issue with symlinks being created w/o existing src when force=no - refactored conditionals, simplified where possible - added tests for symlink to nonexistant source, with both force options - made symlink on existing attomic (force) --- lib/ansible/module_utils/basic.py | 21 +- library/files/file | 187 ++++++++---------- .../roles/test_file/tasks/main.yml | 19 ++ 3 files changed, 112 insertions(+), 115 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 42b9d3d669b..2f0c0f61aca 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -464,7 +464,7 @@ class AnsibleModule(object): changed = True return changed - def set_file_attributes_if_different(self, file_args, changed): + def set_fs_attributes_if_different(self, file_args, changed): # set modes owners and context as needed changed = self.set_context_if_different( file_args['path'], file_args['secontext'], changed @@ -481,19 +481,10 @@ class AnsibleModule(object): return changed def set_directory_attributes_if_different(self, file_args, changed): - changed = self.set_context_if_different( - file_args['path'], file_args['secontext'], changed - ) - changed = self.set_owner_if_different( - file_args['path'], file_args['owner'], changed - ) - changed = self.set_group_if_different( - file_args['path'], file_args['group'], changed - ) - changed = self.set_mode_if_different( - file_args['path'], file_args['mode'], changed - ) - return changed + return self.set_fs_attributes_if_different(file_args, changed) + + def set_file_attributes_if_different(self, file_args, changed): + return self.set_fs_attributes_if_different(file_args, changed) def add_path_info(self, kwargs): ''' @@ -963,7 +954,7 @@ class AnsibleModule(object): context = self.selinux_default_context(dest) try: - # Optimistically try a rename, solves some corner cases and can avoid useless work. + # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic. os.rename(src, dest) except (IOError,OSError), e: # only try workarounds for errno 18 (cross device), 1 (not permited) and 13 (permission denied) diff --git a/library/files/file b/library/files/file index 7a038c9f362..4d6fc0e7b40 100644 --- a/library/files/file +++ b/library/files/file @@ -139,9 +139,6 @@ EXAMPLES = ''' def main(): - # FIXME: pass this around, should not use global - global module - module = AnsibleModule( argument_spec = dict( state = dict(choices=['file','directory','link','hard','touch','absent'], default=None), @@ -151,6 +148,7 @@ def main(): force = dict(required=False,default=False,type='bool'), diff_peek = dict(default=None), validate = dict(required=False, default=None), + src = dict(required=False, default=None), ), add_file_common_args=True, supports_check_mode=True @@ -159,10 +157,14 @@ def main(): params = module.params state = params['state'] force = params['force'] + diff_peek = params['diff_peek'] + src = params['src'] + + # modify source as we later reload and pass, specially relevant when used by other modules. params['path'] = path = os.path.expanduser(params['path']) # short-circuit for diff_peek - if params.get('diff_peek', None) is not None: + if diff_peek is not None: appears_binary = False try: f = open(path) @@ -174,8 +176,8 @@ def main(): pass module.exit_json(path=path, changed=False, appears_binary=appears_binary) + # Find out current state prev_state = 'absent' - if os.path.lexists(path): if os.path.islink(path): prev_state = 'link' @@ -187,76 +189,60 @@ def main(): # could be many other things, but defaulting to file prev_state = 'file' - if prev_state is not None and state is None: - # set state to current type of file - state = prev_state - elif state is None: - # set default state to file - state = 'file' + # state should default to file, but since that creates many conflicts, + # default to 'current' when it exists. + if state is None: + if prev_state != 'absent': + state = prev_state + else: + state = 'file' # source is both the source of a symlink or an informational passing of the src for a template module # or copy module, even if this module never uses it, it is needed to key off some things - - src = params.get('src', None) - if src: + if src is not None: src = os.path.expanduser(src) - if src is not None and os.path.isdir(path) and state not in ["link", "absent"]: - if params['original_basename']: - basename = params['original_basename'] - else: - basename = os.path.basename(src) - params['path'] = path = os.path.join(path, basename) + # original_basename is used by other modules that depend on file. + if os.path.isdir(path) and state not in ["link", "absent", "directory"]: + if params['original_basename']: + basename = params['original_basename'] + else: + basename = os.path.basename(src) + params['path'] = path = os.path.join(path, basename) + else: + if state in ['link','hard']: + module.fail_json(msg='src and dest are required for creating links') file_args = module.load_file_common_arguments(params) - - if state in ['link','hard'] and (src is None or path is None): - module.fail_json(msg='src and dest are required for creating links') - elif path is None: - module.fail_json(msg='path is required') - changed = False recurse = params['recurse'] + if recurse and state != 'directory': + module.fail_json(path=path, msg="recurse option requires state to be 'directory'") - if recurse and state == 'file' and prev_state == 'directory': - state = 'directory' - - if prev_state != 'absent' and state == 'absent': - try: - if prev_state == 'directory': - if os.path.islink(path): - if module.check_mode: - module.exit_json(changed=True) - os.unlink(path) - else: + if state == 'absent': + if state != prev_state: + if not module.check_mode: + if prev_state == 'directory': try: - if module.check_mode: - module.exit_json(changed=True) shutil.rmtree(path, ignore_errors=False) except Exception, e: module.fail_json(msg="rmtree failed: %s" % str(e)) - else: - if module.check_mode: - module.exit_json(changed=True) - os.unlink(path) - except Exception, e: - module.fail_json(path=path, msg=str(e)) - module.exit_json(path=path, changed=True) - - if prev_state != 'absent' and prev_state != state: - if not (force and (prev_state == 'file' or prev_state == 'hard' or prev_state == 'directory') and state == 'link') and state != 'touch': - module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, src)) - - if prev_state == 'absent' and state == 'absent': - module.exit_json(path=path, changed=False) - - if state == 'file': + else: + try: + os.unlink(path) + except Exception, e: + module.fail_json(path=path, msg="unlinking failed: %s " % str(e)) + module.exit_json(path=path, changed=True) + else: + module.exit_json(path=path, changed=False) - if prev_state != 'file': - module.fail_json(path=path, msg='file (%s) does not exist, use copy or template module to create' % path) + elif state == 'file': + if state != prev_state: + # file is not absent and any other state is a conflict + module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) - changed = module.set_file_attributes_if_different(file_args, changed) + changed = module.set_fs_attributes_if_different(file_args, changed) module.exit_json(path=path, changed=changed) elif state == 'directory': @@ -266,31 +252,29 @@ def main(): os.makedirs(path) changed = True - changed = module.set_directory_attributes_if_different(file_args, changed) + changed = module.set_fs_attributes_if_different(file_args, changed) + if recurse: for root,dirs,files in os.walk( file_args['path'] ): - for dir in dirs: - dirname=os.path.join(root,dir) + for fsobj in dirs + files: + fsname=os.path.join(root, fsobj) tmp_file_args = file_args.copy() - tmp_file_args['path']=dirname - changed = module.set_directory_attributes_if_different(tmp_file_args, changed) - for file in files: - filename=os.path.join(root,file) - tmp_file_args = file_args.copy() - tmp_file_args['path']=filename - changed = module.set_file_attributes_if_different(tmp_file_args, changed) + tmp_file_args['path']=fsname + changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + module.exit_json(path=path, changed=changed) elif state in ['link','hard']: + if not os.path.exists(src) and not force: + module.fail_json(path=path, src=src, msg='src file does not exist') + if state == 'hard': - if os.path.isabs(src): - abs_src = src - else: + if not os.path.isabs(src): module.fail_json(msg="absolute paths are required") - if not os.path.exists(abs_src) and not force: - module.fail_json(path=path, src=src, msg='src file does not exist') + elif prev_state in ['file', 'hard', 'directory'] and not force: + module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, src)) if prev_state == 'absent': changed = True @@ -300,26 +284,29 @@ def main(): changed = True elif prev_state == 'hard': if not (state == 'hard' and os.stat(path).st_ino == os.stat(src).st_ino): + changed = True if not force: module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination') - changed = True - elif prev_state == 'file': - if not force: - module.fail_json(dest=path, src=src, msg='Cannot link, file exists at destination') + elif prev_state in ['file', 'directory']: changed = True - elif prev_state == 'directory': if not force: - module.fail_json(dest=path, src=src, msg='Cannot link, directory exists at destination') - changed = True + module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state) else: module.fail_json(dest=path, src=src, msg='unexpected position reached') if changed and not module.check_mode: if prev_state != 'absent': + # try to replace atomically + tmppath = ".%s.%s.%s.tmp" % (path,os.getpid(),time.time()) try: - os.unlink(path) + if state == 'hard': + os.link(src,tmppath) + else: + os.symlink(src, tmppath) + os.rename(tmppath, path) except OSError, e: - module.fail_json(path=path, msg='Error while removing existing target: %s' % str(e)) + os.unlink(tmppath) + module.fail_json(path=path, msg='Error while replacing: %s' % str(e)) try: if state == 'hard': os.link(src,path) @@ -328,30 +315,30 @@ def main(): except OSError, e: module.fail_json(path=path, msg='Error while linking: %s' % str(e)) - changed = module.set_file_attributes_if_different(file_args, changed) + changed = module.set_fs_attributes_if_different(file_args, changed) module.exit_json(dest=path, src=src, changed=changed) elif state == 'touch': - if module.check_mode: - module.exit_json(path=path, skipped=True) + if not module.check_mode: + + if prev_state == 'absent': + try: + open(path, 'w').close() + except OSError, e: + module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) + elif prev_state in ['file', 'directory']: + try: + os.utime(path, None) + except OSError, e: + module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) + else: + module.fail_json(msg='Cannot touch other than files and directories') + + module.set_fs_attributes_if_different(file_args, True) - if prev_state not in ['file', 'directory', 'absent']: - module.fail_json(msg='Cannot touch other than files and directories') - if prev_state != 'absent': - try: - os.utime(path, None) - except OSError, e: - module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) - else: - try: - open(path, 'w').close() - except OSError, e: - module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - module.set_file_attributes_if_different(file_args, True) module.exit_json(dest=path, changed=True) - else: - module.fail_json(path=path, msg='unexpected position reached') + module.fail_json(path=path, msg='unexpected position reached') # import module snippets from ansible.module_utils.basic import * diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 174f66a9fba..588c1b6747b 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -164,5 +164,24 @@ that: - "file11_result.uid == 1235" +- name: fail to create soft link to non existant file + file: src=/noneexistant dest={{output_dir}}/soft2.txt state=link force=no + register: file12_result + ignore_errors: true + +- name: verify that link was not created + assert: + that: + - "file12_result.failed == true" + +- name: force creation soft link to non existant + file: src=/noneexistant dest={{output_dir}}/soft2.txt state=link force=yes + register: file13_result + +- name: verify that link was created + assert: + that: + - "file13_result.changed == true" + - name: remote directory foobar file: path={{output_dir}}/foobar state=absent From c67631ccd009a4a3efecc2dc329101381c23b05a Mon Sep 17 00:00:00 2001 From: = Date: Sat, 15 Mar 2014 20:43:10 -0400 Subject: [PATCH 374/772] resolves #6494 - added note to page --- docsite/rst/playbooks_lookups.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index afa12821546..1f4e4ed5d7f 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -7,6 +7,8 @@ in Ansible, and are typically used to load variables or templates with informati .. note:: This is considered an advanced feature, and many users will probably not rely on these features. +.. note:: Lookups occur on the local computer, not on the remote computer. + .. contents:: Topics .. _getting_file_contents: From c4f20be77457500dd9dc9a798af7f188447e3040 Mon Sep 17 00:00:00 2001 From: follower Date: Mon, 17 Mar 2014 00:23:16 +1300 Subject: [PATCH 375/772] Fix typo "a the" -> "the" --- library/cloud/ec2_snapshot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index e637ebefa38..2b8a9dabba6 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -60,7 +60,7 @@ options: aliases: [] instance_id: description: - - instance that has a the required volume to snapshot mounted + - instance that has the required volume to snapshot mounted required: false default: null aliases: [] From 9a8a8dfb2a6ce6e67d560adf091a171ad9b5c585 Mon Sep 17 00:00:00 2001 From: Steve Smith Date: Tue, 25 Feb 2014 13:37:00 +0100 Subject: [PATCH 376/772] Add a JIRA control module for Ansible. This version uses fetch_url rather than urllib2. --- library/web_infrastructure/jira | 347 ++++++++++++++++++++++++++++++++ 1 file changed, 347 insertions(+) create mode 100644 library/web_infrastructure/jira diff --git a/library/web_infrastructure/jira b/library/web_infrastructure/jira new file mode 100644 index 00000000000..950fc3dbfcf --- /dev/null +++ b/library/web_infrastructure/jira @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Steve Smith +# Atlassian open-source approval reference OSR-76. +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = """ +module: jira +version_added: "1.6" +short_description: create and modify issues in a JIRA instance +description: + - Create and modify issues in a JIRA instance. + +options: + uri: + required: true + description: + - Base URI for the JIRA instance + + operation: + required: true + aliases: [ command ] + choices: [ create, comment, edit, fetch, transition ] + description: + - The operation to perform. + + username: + required: true + description: + - The username to log-in with. + + password: + required: true + description: + - The password to log-in with. + + project: + aliases: [ prj ] + required: false + description: + - The project for this operation. Required for issue creation. + + summary: + required: false + description: + - The issue summary, where appropriate. + + description: + required: false + description: + - The issue description, where appropriate. + + issuetype: + required: false + description: + - The issue type, for issue creation. + + issue: + required: false + description: + - An existing issue key to operate on. + + comment: + required: false + description: + - The comment text to add. + + status: + required: false + description: + - The desired status; only relevant for the transition operation. + + assignee: + required: false + description: + - Sets the assignee on create or transition operations. Note not all transitions will allow this. + + fields: + required: false + description: + - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields. + +notes: + - "Currently this only works with basic-auth." + +author: Steve Smith +""" + +EXAMPLES = """ +# Create a new issue and add a comment to it: +- name: Create an issue + jira: uri={{server}} username={{user}} password={{pass}} + project=ANS operation=create + summary="Example Issue" description="Created using Ansible" issuetype=Task + register: issue + +- name: Comment on issue + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=comment + comment="A comment added by Ansible" + +# Assign an existing issue using edit +- name: Assign an issue using free-form fields + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=edit + assignee=ssmith + +# Create an issue with an existing assignee +- name: Create an assigned issue + jira: uri={{server}} username={{user}} password={{pass}} + project=ANS operation=create + summary="Assigned issue" description="Created and assigned using Ansible" + issuetype=Task assignee=ssmith + +# Edit an issue using free-form fields +- name: Set the labels on an issue using free-form fields + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=edit + args: { fields: {labels: ["autocreated", "ansible"]}} + +- name: Set the labels on an issue, YAML version + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=edit + args: + fields: + labels: + - "autocreated" + - "ansible" + - "yaml" + +# Retrieve metadata for an issue and use it to create an account +- name: Get an issue + jira: uri={{server}} username={{user}} password={{pass}} + project=ANS operation=fetch issue="ANS-63" + register: issue + +- name: Create a unix account for the reporter + sudo: true + user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}" + +# Transition an issue by target status +- name: Close the issue + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=transition status="Done" +""" + +import json +import base64 + +def request(url, user, passwd, data=None, method=None): + if data: + data = json.dumps(data) + + # NOTE: fetch_url uses a password manager, which follows the + # standard request-then-challenge basic-auth semantics. However as + # JIRA allows some unauthorised operations it doesn't necessarily + # send the challenge, so the request occurs as the anonymous user, + # resulting in unexpected results. To work around this we manually + # inject the basic-auth header up-front to ensure that JIRA treats + # the requests as authorized for this user. + auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') + response, info = fetch_url(module, url, data=data, method=method, + headers={'Content-Type':'application/json', + 'Authorization':"Basic %s" % auth}) + + if info['status'] not in (200, 204): + module.fail_json(msg=info['msg']) + + body = response.read() + + if body: + return json.loads(body) + else: + return {} + +def post(url, user, passwd, data): + return request(url, user, passwd, data=data, method='POST') + +def put(url, user, passwd, data): + return request(url, user, passwd, data=data, method='PUT') + +def get(url, user, passwd): + return request(url, user, passwd) + + +def create(restbase, user, passwd, params): + createfields = { + 'project': { 'key': params['project'] }, + 'summary': params['summary'], + 'description': params['description'], + 'issuetype': { 'name': params['issuetype'] }} + + # Merge in any additional or overridden fields + if params['fields']: + createfields.update(params['fields']) + + data = {'fields': createfields} + + url = restbase + '/issue/' + + ret = post(url, user, passwd, data) + + return ret + + +def comment(restbase, user, passwd, params): + data = { + 'body': params['comment'] + } + + url = restbase + '/issue/' + params['issue'] + '/comment' + + ret = post(url, user, passwd, data) + + return ret + + +def edit(restbase, user, passwd, params): + data = { + 'fields': params['fields'] + } + + url = restbase + '/issue/' + params['issue'] + + ret = put(url, user, passwd, data) + + return ret + + +def fetch(restbase, user, passwd, params): + url = restbase + '/issue/' + params['issue'] + ret = get(url, user, passwd) + return ret + + +def transition(restbase, user, passwd, params): + # Find the transition id + turl = restbase + '/issue/' + params['issue'] + "/transitions" + tmeta = get(turl, user, passwd) + + target = params['status'] + tid = None + for t in tmeta['transitions']: + if t['name'] == target: + tid = t['id'] + break + + if not tid: + raise ValueError("Failed find valid transition for '%s'" % target) + + # Perform it + url = restbase + '/issue/' + params['issue'] + "/transitions" + data = { 'transition': { "id" : tid }, + 'fields': params['fields']} + + ret = post(url, user, passwd, data) + + return ret + + +# Some parameters are required depending on the operation: +OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'], + comment=['issue', 'comment'], + edit=[], + fetch=['issue'], + transition=['status']) + +def main(): + + global module + module = AnsibleModule( + argument_spec=dict( + uri=dict(required=True), + operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'], + aliases=['command'], required=True), + username=dict(required=True), + password=dict(required=True), + project=dict(), + summary=dict(), + description=dict(), + issuetype=dict(), + issue=dict(aliases=['ticket']), + comment=dict(), + status=dict(), + assignee=dict(), + fields=dict(default={}) + ), + supports_check_mode=False + ) + + op = module.params['operation'] + + # Check we have the necessary per-operation parameters + missing = [] + for parm in OP_REQUIRED[op]: + if not module.params[parm]: + missing.append(parm) + if missing: + module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing))) + + # Handle rest of parameters + uri = module.params['uri'] + user = module.params['username'] + passwd = module.params['password'] + if module.params['assignee']: + module.params['fields']['assignee'] = { 'name': module.params['assignee'] } + + if not uri.endswith('/'): + uri = uri+'/' + restbase = uri + 'rest/api/2' + + # Dispatch + try: + + # Lookup the corresponding method for this operation. This is + # safe as the AnsibleModule should remove any unknown operations. + thismod = sys.modules[__name__] + method = getattr(thismod, op) + + ret = method(restbase, user, passwd, module.params) + + except Exception as e: + return module.fail_json(msg=e.message) + + + module.exit_json(changed=True, meta=ret) + + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() From d18619690145c9b942f7453e8702233ded8140c7 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 12:29:19 -0400 Subject: [PATCH 377/772] Remove nova_fip (redundant merge relative to quantum module) --- CHANGELOG.md | 1 - library/cloud/nova_fip | 234 ----------------------------------------- 2 files changed, 235 deletions(-) delete mode 100644 library/cloud/nova_fip diff --git a/CHANGELOG.md b/CHANGELOG.md index 044b3e1aa28..3f0615b6dce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,6 @@ New Modules: * system: locale_gen * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey -* cloud: nova_fip (floating IPs) * cloud: rax_identity * cloud: ec2_asg (configure autoscaling groups) diff --git a/library/cloud/nova_fip b/library/cloud/nova_fip deleted file mode 100644 index d10b4d6ab62..00000000000 --- a/library/cloud/nova_fip +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, John Dewey -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - from novaclient import utils - from novaclient.v1_1 import client - from novaclient.v1_1 import servers -except ImportError: - print("failed=True msg='novaclient is required for this module to work'") - -DOCUMENTATION = ''' ---- -module: nova_fip -version_added: "1.6" -short_description: Associate an OpenStack floating IP with a server. -description: - - Manage nova floating IPs using the python-novaclient library. -options: - - login_username: - description: - - Login username to authenticate to keystone. If not set then the value of the OS_USERNAME environment variable is used. - required: false - default: None - login_password: - description: - - Password of login user. If not set then the value of the OS_PASSWORD environment variable is used. - required: false - default: None - login_tenant_name: - description: - - The tenant name of the login user. If not set then the value of the OS_TENANT_NAME environment variable is used. - required: false - default: None - auth_url: - description: - - The keystone url for authentication. If not set then the value of the OS_AUTH_URL environment variable is used. - required: false - default: None - region_name: - description: - - Name of the region. - required: false - default: None - server: - description: - - Name or ID of server. - required: false - default: None - floating_ip: - description: - - The public IP address to associate with the instance. - - If absent, allocate a new address - required: false - default: None - pool: - description: - - The pool the floating_ip belongs to. - required: false - default: external - state: - description: - - Indicate desired state of the resource. - choices: ['present', 'absent'] - required: false - default: 'present' - -requirements: ["novaclient"] -notes: - - This module will return C(floating_ip) on success, which will contain the - public IP address associated with the instance. - - There may be a delay between the time the floating IP is assigned and when - the cloud instance is reachable via the new address. Use wait_for and pause - to delay further playbook execution until the instance is reachable, if - necessary. -''' - -EXAMPLES = ''' -- name: associate a floating IP with a server - nova_fip: server={{ UUID or name }} ip={{ IP }} - -- name: disassociate a floating IP from a server - nova_fip: server={{ UUID or name }} ip={{ IP }} state=absent - -- name: allocate a new floating IP and associate it with a server - nova_fip: server={{ UUID or name }} - -- name: allocate a new floating IP without associating it to anything - nova_fip: - register: fip - -- name: deallocate a floating IP - nova_fip: ip={{ IP }} state=absent - -- name: output the IP - debug: msg="Allocated IP is {{ fip.floating_ip }}" -''' - -def _floating_ip_already_associated(server, floating_ip): - changed = False - for network, ip_list in server.networks.iteritems(): - if floating_ip in ip_list: - changed = True - return changed - -def _associate_floating_ip(nova, floating_ip, server): - s = _find_server(nova, server) - if not _floating_ip_already_associated(s, floating_ip): - s.add_floating_ip(floating_ip) - return True - -def _disassociate_floating_ip(nova, floating_ip, server): - s = _find_server(nova, server) - if _floating_ip_already_associated(s, floating_ip): - s.remove_floating_ip(floating_ip) - return True - -def _find_server(nova, server): - return utils.find_resource(nova.servers, server) - -def _allocate_address(nova, pool): - address = None - floating_ips = nova.floating_ips.list() - for fip in floating_ips: - # allocated but not assigned - if fip.pool == pool and fip.instance_id is None: - address = fip - - # return an available floating ip - if address: - return address - # allocate and return a floating ip - else: - return nova.floating_ips.create(pool=pool) - -def _deallocate_address(nova, floating_ip): - changed = False - floating_ips = nova.floating_ips.list() - - for fip in floating_ips: - if fip.ip == floating_ip: - nova.floating_ips.delete(fip.id) - changed = True - return changed - -def main(): - module = AnsibleModule( - argument_spec=dict( - server=dict(required=False), - floating_ip=dict(required=False, aliases=['ip']), - pool=dict(default='external'), - login_username=dict(), - login_password=dict(no_log=True), - login_tenant_name=dict(), - auth_url= dict(), - region_name=dict(default=None), - state = dict(default='present', choices=['present', 'absent']), - ), - supports_check_mode=True, - ) - login_username = module.params.get('login_username') - login_password = module.params.get('login_password') - login_tenant_name = module.params.get('login_tenant_name') - auth_url = module.params.get('auth_url') - - # allow stackrc environment variables to be used if ansible vars aren't set - if not login_username and 'OS_USERNAME' in os.environ: - login_username = os.environ['OS_USERNAME'] - - if not login_password and 'OS_PASSWORD' in os.environ: - login_password = os.environ['OS_PASSWORD'] - - if not login_tenant_name and 'OS_TENANT_NAME' in os.environ: - login_tenant_name = os.environ['OS_TENANT_NAME'] - - if not auth_url and 'OS_AUTH_URL' in os.environ: - auth_url = os.environ['OS_AUTH_URL'] - - nova = client.Client(login_username, - login_password, - login_tenant_name, - auth_url, - service_type='compute') - try: - nova.authenticate() - except exceptions.Unauthorized as e: - module.fail_json(msg="Invalid OpenStack Nova credentials.: %s" % e.message) - except exceptions.AuthorizationFailure as e: - module.fail_json(msg="Unable to authorize user: %s" % e.message) - - server = module.params.get('server') - floating_ip = module.params.get('floating_ip') - pool = module.params.get('pool') - state = module.params.get('state') - - if state == 'present': - if floating_ip is None: - if server is None: - address = _allocate_address(nova, pool) - module.exit_json(changed=True, floating_ip=address.ip) - else: - address = _allocate_address(nova, pool) - changed = _associate_floating_ip(nova, address.ip, server) - module.exit_json(changed=True, floating_ip=address.ip) - else: - changed = _associate_floating_ip(nova, floating_ip, server) - module.exit_json(changed=changed) - else: - if server is None: - changed = _deallocate_address(nova, floating_ip) - module.exit_json(changed=changed) - else: - changed = _disassociate_floating_ip(nova, floating_ip, server) - module.exit_json(changed=changed) - -# import module snippets -from ansible.module_utils.basic import * - -main() \ No newline at end of file From 9180013682ddc7e6d2592d192e866e5c4b84a8c4 Mon Sep 17 00:00:00 2001 From: Ali Asad Lotia Date: Sun, 16 Mar 2014 16:24:04 +0000 Subject: [PATCH 378/772] Fix acl module doc and error messages - Fix typos in module doc - Provide clearer error message when entry and etype are both missing --- library/files/acl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/files/acl b/library/files/acl index 63400b3d61a..d15a31e618c 100644 --- a/library/files/acl +++ b/library/files/acl @@ -95,7 +95,7 @@ EXAMPLES = ''' - acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present # Same as previous but using entry shorthand -- acl: name=/etc/foo.d entrty="default:user:joe:rw-" state=present +- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present # Obtain the acl for a specific file - acl: name=/etc/foo.conf @@ -218,7 +218,7 @@ def main(): if state in ['present','absent']: if not entry and not etype: - module.fail_json(msg="%s requries to have ither either etype and permissions or entry to be set" % state) + module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state) if entry: if etype or entity or permissions: From a7da5d870227902816fb9df4bd4dde853cdefd35 Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Thu, 23 Jan 2014 16:14:24 +0200 Subject: [PATCH 379/772] Micro-optimization of inventory.expand_hosts.detect_range --- lib/ansible/inventory/expand_hosts.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/ansible/inventory/expand_hosts.py b/lib/ansible/inventory/expand_hosts.py index a1db9f1c6a4..b1cc0dcb82f 100644 --- a/lib/ansible/inventory/expand_hosts.py +++ b/lib/ansible/inventory/expand_hosts.py @@ -41,10 +41,7 @@ def detect_range(line = None): Returnes True if the given line contains a pattern, else False. ''' - if (line.find("[") != -1 and - line.find(":") != -1 and - line.find("]") != -1 and - line.index("[") < line.index(":") < line.index("]")): + if 0 <= line.find("[") < line.find(":") < line.find("]"): return True else: return False From 0749112286bb28a22f8c455df8be2e4821ab5d08 Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Thu, 23 Jan 2014 17:02:17 +0200 Subject: [PATCH 380/772] Micro-optimization: replace s.find(x)!=-1 with x in s timeit shows a speedup of ~3x on Python 2.7.5 x86_64. It also makes the code a bit shorter. --- bin/ansible-galaxy | 2 +- docsite/rst/developing_modules.rst | 2 +- hacking/module_formatter.py | 2 +- lib/ansible/callbacks.py | 2 +- lib/ansible/inventory/ini.py | 16 ++++++++-------- lib/ansible/module_common.py | 4 ++-- lib/ansible/playbook/play.py | 2 +- lib/ansible/runner/__init__.py | 8 ++++---- lib/ansible/runner/action_plugins/async.py | 2 +- lib/ansible/runner/action_plugins/copy.py | 2 +- lib/ansible/runner/action_plugins/script.py | 2 +- lib/ansible/runner/connection_plugins/ssh.py | 9 +++++---- lib/ansible/utils/__init__.py | 17 +++++++++-------- library/cloud/virt | 2 +- library/commands/command | 2 +- library/database/mysql_user | 2 +- library/files/file | 2 +- library/system/open_iscsi | 2 +- library/system/service | 18 +++++++++--------- 19 files changed, 50 insertions(+), 48 deletions(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index a528b950f83..7b346ac6e44 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -655,7 +655,7 @@ def execute_install(args, options, parser): if role_name == "" or role_name.startswith("#"): continue - elif role_name.find(',') != -1: + elif ',' in role_name: role_name,role_version = role_name.split(',',1) role_name = role_name.strip() role_version = role_version.strip() diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 3f1c1e68dca..e8da717aed5 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -123,7 +123,7 @@ a lot shorter than this:: for arg in arguments: # ignore any arguments without an equals in it - if arg.find("=") != -1: + if "=" in arg: (key, value) = arg.split("=") diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index d5ed3031508..0a36c3951ca 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -185,7 +185,7 @@ def process_module(module, options, env, template, outputname, module_map): fname = module_map[module] # ignore files with extensions - if os.path.basename(fname).find(".") != -1: + if "." in os.path.basename(fname): return # use ansible core library to parse out doc metadata YAML and plaintext examples diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index b56d1f90695..c1b41a2a92f 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -250,7 +250,7 @@ def regular_generic_msg(hostname, result, oneline, caption): def banner_cowsay(msg): - if msg.find(": [") != -1: + if ": [" in msg: msg = msg.replace("[","") if msg.endswith("]"): msg = msg[:-1] diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 718fee1338d..3b38911d253 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -69,7 +69,7 @@ class InventoryParser(object): line = utils.before_comment(line).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") - if line.find(":vars") != -1 or line.find(":children") != -1: + if ":vars" in line or ":children" in line: active_group_name = active_group_name.rsplit(":", 1)[0] if active_group_name not in self.groups: new_group = self.groups[active_group_name] = Group(name=active_group_name) @@ -95,11 +95,11 @@ class InventoryParser(object): # FQDN foo.example.com if hostname.count(".") == 1: (hostname, port) = hostname.rsplit(".", 1) - elif (hostname.find("[") != -1 and - hostname.find("]") != -1 and - hostname.find(":") != -1 and + elif ("[" in hostname and + "]" in hostname and + ":" in hostname and (hostname.rindex("]") < hostname.rindex(":")) or - (hostname.find("]") == -1 and hostname.find(":") != -1)): + ("]" not in hostname and ":" in hostname)): (hostname, port) = hostname.rsplit(":", 1) hostnames = [] @@ -152,7 +152,7 @@ class InventoryParser(object): line = line.strip() if line is None or line == '': continue - if line.startswith("[") and line.find(":children]") != -1: + if line.startswith("[") and ":children]" in line: line = line.replace("[","").replace(":children]","") group = self.groups.get(line, None) if group is None: @@ -177,7 +177,7 @@ class InventoryParser(object): group = None for line in self.lines: line = line.strip() - if line.startswith("[") and line.find(":vars]") != -1: + if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: @@ -189,7 +189,7 @@ class InventoryParser(object): elif line == '': pass elif group: - if line.find("=") == -1: + if "=" not in line: raise errors.AnsibleError("variables assigned to group must be in key=value form") else: (k, v) = [e.strip() for e in line.split("=", 1)] diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py index da02882d935..a6af86d6fcb 100644 --- a/lib/ansible/module_common.py +++ b/lib/ansible/module_common.py @@ -95,7 +95,7 @@ class ModuleReplacer(object): for line in lines: - if line.find(REPLACER) != -1: + if REPLACER in line: output.write(self.slurp(os.path.join(self.snippet_path, "basic.py"))) snippet_names.append('basic') elif line.startswith('from ansible.module_utils.'): @@ -103,7 +103,7 @@ class ModuleReplacer(object): import_error = False if len(tokens) != 3: import_error = True - if line.find(" import *") == -1: + if " import *" not in line: import_error = True if import_error: raise errors.AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 2289b0a4d3c..c1f5ba4e86f 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -707,7 +707,7 @@ class Play(object): # ************************************************* def _has_vars_in(self, msg): - return ((msg.find("$") != -1) or (msg.find("{{") != -1)) + return "$" in msg or "{{" in msg # ************************************************* diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 27314ed8cad..385cdd141c1 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -415,7 +415,7 @@ class Runner(object): environment_string = self._compute_environment_string(inject) - if tmp.find("tmp") != -1 and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): + if "tmp" in tmp and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): # deal with possible umask issues once sudo'ed to other user cmd_chmod = "chmod a+r %s" % remote_module_path self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) @@ -469,7 +469,7 @@ class Runner(object): cmd = " ".join([environment_string.strip(), shebang.replace("#!","").strip(), cmd]) cmd = cmd.strip() - if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: + if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if not self.sudo or self.su or self.sudo_user == 'root' or self.su_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step cmd = cmd + "; rm -rf %s >/dev/null 2>&1" % tmp @@ -485,7 +485,7 @@ class Runner(object): else: res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) - if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: + if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step @@ -883,7 +883,7 @@ class Runner(object): return False def _late_needs_tmp_path(self, conn, tmp, module_style): - if tmp.find("tmp") != -1: + if "tmp" in tmp: # tmp has already been created return False if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su: diff --git a/lib/ansible/runner/action_plugins/async.py b/lib/ansible/runner/action_plugins/async.py index 12fe279a471..ac0d6e84928 100644 --- a/lib/ansible/runner/action_plugins/async.py +++ b/lib/ansible/runner/action_plugins/async.py @@ -33,7 +33,7 @@ class ActionModule(object): module_name = 'command' module_args += " #USE_SHELL" - if tmp.find("tmp") == -1: + if "tmp" not in tmp: tmp = self.runner._make_tmp_path(conn) (module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 79acdaba587..d395d1df6f5 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -331,7 +331,7 @@ class ActionModule(object): src = open(source) src_contents = src.read(8192) st = os.stat(source) - if src_contents.find("\x00") != -1: + if "\x00" in src_contents: diff['src_binary'] = 1 elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF: diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py index 149be3cc113..f50e2b08d6f 100644 --- a/lib/ansible/runner/action_plugins/script.py +++ b/lib/ansible/runner/action_plugins/script.py @@ -128,7 +128,7 @@ class ActionModule(object): result = handler.run(conn, tmp, 'raw', module_args, inject) # clean up after - if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES: + if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES: self.runner._low_level_exec_command(conn, 'rm -rf %s >/dev/null 2>&1' % tmp, tmp) result.result['changed'] = True diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 22189caadf3..cc548a1c9b2 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -68,9 +68,9 @@ class Connection(object): cp_in_use = False cp_path_set = False for arg in self.common_args: - if arg.find("ControlPersist") != -1: + if "ControlPersist" in arg: cp_in_use = True - if arg.find("ControlPath") != -1: + if "ControlPath" in arg: cp_path_set = True if cp_in_use and not cp_path_set: @@ -137,7 +137,7 @@ class Connection(object): data = host_fh.read() host_fh.close() for line in data.split("\n"): - if line is None or line.find(" ") == -1: + if line is None or " " not in line: continue tokens = line.split() if tokens[0].find(self.HASHED_KEY_MAGIC) == 0: @@ -324,7 +324,8 @@ class Connection(object): # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN) fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) - controlpersisterror = stderr.find('Bad configuration option: ControlPersist') != -1 or stderr.find('unknown configuration option: ControlPersist') != -1 + controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \ + 'unknown configuration option: ControlPersist' in stderr if C.HOST_KEY_CHECKING: if ssh_cmd[0] == "sshpass" and p.returncode == 6: diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 6c2f8112aba..476622e6766 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -193,7 +193,7 @@ def check_conditional(conditional, basedir, inject, fail_on_undefined=False): conditional = conditional.replace("jinja2_compare ","") # allow variable names - if conditional in inject and str(inject[conditional]).find('-') == -1: + if conditional in inject and '-' not in str(inject[conditional]): conditional = inject[conditional] conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined) original = str(conditional).replace("jinja2_compare ","") @@ -206,9 +206,9 @@ def check_conditional(conditional, basedir, inject, fail_on_undefined=False): # variable was undefined. If we happened to be # looking for an undefined variable, return True, # otherwise fail - if conditional.find("is undefined") != -1: + if "is undefined" in conditional: return True - elif conditional.find("is defined") != -1: + elif "is defined" in conditional: return False else: raise errors.AnsibleError("error while evaluating conditional: %s" % original) @@ -331,9 +331,9 @@ def parse_json(raw_data): def smush_braces(data): ''' smush Jinaj2 braces so unresolved templates like {{ foo }} don't get parsed weird by key=value code ''' - while data.find('{{ ') != -1: + while '{{ ' in data: data = data.replace('{{ ', '{{') - while data.find(' }}') != -1: + while ' }}' in data: data = data.replace(' }}', '}}') return data @@ -374,7 +374,7 @@ def parse_yaml(data, path_hint=None): def process_common_errors(msg, probline, column): replaced = probline.replace(" ","") - if replaced.find(":{{") != -1 and replaced.find("}}") != -1: + if ":{{" in replaced and "}}" in replaced: msg = msg + """ This one looks easy to fix. YAML thought it was looking for the start of a hash/dictionary and was confused to see a second "{". Most likely this was @@ -542,7 +542,7 @@ def parse_kv(args): vargs = [x.decode('utf-8') for x in shlex.split(args, posix=True)] #vargs = shlex.split(str(args), posix=True) for x in vargs: - if x.find("=") != -1: + if "=" in x: k, v = x.split("=",1) options[k]=v return options @@ -1023,7 +1023,7 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # not sure why the "/" is in above code :) try: new_terms = template.template(basedir, "{{ %s }}" % terms, inject) - if isinstance(new_terms, basestring) and new_terms.find("{{") != -1: + if isinstance(new_terms, basestring) and "{{" in new_terms.find: pass else: terms = new_terms @@ -1097,3 +1097,4 @@ def before_comment(msg): return msg + diff --git a/library/cloud/virt b/library/cloud/virt index 3400c3ff727..78d2aa1ab91 100644 --- a/library/cloud/virt +++ b/library/cloud/virt @@ -120,7 +120,7 @@ class LibvirtConnection(object): cmd = "uname -r" rc, stdout, stderr = self.module.run_command(cmd) - if stdout.find("xen") != -1: + if "xen" in stdout: conn = libvirt.open(None) else: conn = libvirt.open(uri) diff --git a/library/commands/command b/library/commands/command index ba9ae30a7f2..b35501f1bf8 100644 --- a/library/commands/command +++ b/library/commands/command @@ -180,7 +180,7 @@ class CommandModule(AnsibleModule): params['removes'] = None params['shell'] = False params['executable'] = None - if args.find("#USE_SHELL") != -1: + if "#USE_SHELL" in args: args = args.replace("#USE_SHELL", "") params['shell'] = True diff --git a/library/database/mysql_user b/library/database/mysql_user index e7fad3d77c6..b7c84fd1c3e 100644 --- a/library/database/mysql_user +++ b/library/database/mysql_user @@ -259,7 +259,7 @@ def privileges_unpack(priv): output = {} for item in priv.split('/'): pieces = item.split(':') - if pieces[0].find('.') != -1: + if '.' in pieces[0]: pieces[0] = pieces[0].split('.') for idx, piece in enumerate(pieces): if pieces[0][idx] != "*": diff --git a/library/files/file b/library/files/file index 7a038c9f362..65a3f417c03 100644 --- a/library/files/file +++ b/library/files/file @@ -168,7 +168,7 @@ def main(): f = open(path) b = f.read(8192) f.close() - if b.find("\x00") != -1: + if "\x00" in b: appears_binary = True except: pass diff --git a/library/system/open_iscsi b/library/system/open_iscsi index 2e57727cf59..3fd2b1a5a21 100644 --- a/library/system/open_iscsi +++ b/library/system/open_iscsi @@ -138,7 +138,7 @@ def iscsi_get_cached_nodes(module, portal=None): # older versions of scsiadm don't have nice return codes # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details # err can contain [N|n]o records... - elif rc == 21 or (rc == 255 and err.find("o records found") != -1): + elif rc == 21 or (rc == 255 and "o records found" in err): nodes = [] else: module.fail_json(cmd=cmd, rc=rc, msg=err) diff --git a/library/system/service b/library/system/service index 5180a14d82b..ed30b72aa5b 100644 --- a/library/system/service +++ b/library/system/service @@ -483,9 +483,9 @@ class LinuxService(Service): if self.svc_initctl and self.running is None: # check the job status by upstart response initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s" % (self.svc_initctl, self.name)) - if initctl_status_stdout.find("stop/waiting") != -1: + if "stop/waiting" in initctl_status_stdout: self.running = False - elif initctl_status_stdout.find("start/running") != -1: + elif "start/running" in initctl_status_stdout: self.running = True if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None: @@ -525,7 +525,7 @@ class LinuxService(Service): # if the job status is still not known check it by special conditions if self.running is None: - if self.name == 'iptables' and status_stdout.find("ACCEPT") != -1: + if self.name == 'iptables' and "ACCEPT" in status_stdout: # iptables status command output is lame # TODO: lookup if we can use a return code for this instead? self.running = True @@ -631,16 +631,16 @@ class LinuxService(Service): if line.startswith('rename'): self.changed = True break - elif self.enable and line.find('do not exist') != -1: + elif self.enable and 'do not exist' in line: self.changed = True break - elif not self.enable and line.find('already exist') != -1: + elif not self.enable and 'already exist' in line: self.changed = True break # Debian compatibility for line in err.splitlines(): - if self.enable and line.find('no runlevel symlinks to modify') != -1: + if self.enable and 'no runlevel symlinks to modify' in line: self.changed = True break @@ -982,9 +982,9 @@ class SunOSService(Service): # enabled false for line in stdout.split("\n"): if line.find("enabled") == 0: - if line.find("true") != -1: + if "true" in line: enabled = True - if line.find("temporary") != -1: + if "temporary" in line: temporary = True startup_enabled = (enabled and not temporary) or (not enabled and temporary) @@ -1176,7 +1176,7 @@ def main(): (rc, out, err) = service.modify_service_state() if rc != 0: - if err and err.find("is already") != -1: + if err and "is already" in err: # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 # where status may report it has no start/stop links and we could # not get accurate status From 138b45e32f35f8ab647925c7d4a9f0e9c69dadc7 Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Fri, 24 Jan 2014 16:31:45 +0200 Subject: [PATCH 381/772] hostname module: don't filter hostnamectl with awk There's no need to filter hostnamectl's output with awk because its man page says: hostnamectl [OPTIONS...] {COMMAND} --static, --transient, --pretty If status is used (or no explicit command is given) and one of those fields is given, hostnamectl will print out just this selected hostname. E.g. hostnamectl --static status => ansible.example.org --- library/system/hostname | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/hostname b/library/system/hostname index cca2364b611..c6d1f819451 100644 --- a/library/system/hostname +++ b/library/system/hostname @@ -285,7 +285,7 @@ class FedoraStrategy(GenericStrategy): (rc, out, err)) def get_permanent_hostname(self): - cmd = 'hostnamectl status | awk \'/^ *Static hostname:/{printf("%s", $3)}\'' + cmd = 'hostnamectl --static status' rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % From 2b2b174fd39ea79573573411d454c936f4a3d9c6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 13:36:52 -0400 Subject: [PATCH 382/772] Merge conflict. --- library/system/setup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/setup b/library/system/setup index c6583d42029..71120b72dc2 100644 --- a/library/system/setup +++ b/library/system/setup @@ -2305,8 +2305,8 @@ def ansible_facts(module): def run_setup(module): - setup_options = {} - facts = ansible_facts(module) + setup_options = dict(module_setup=True) + facts = ansible_facts() for (k, v) in facts.items(): setup_options["ansible_%s" % k.replace('-', '_')] = v From 58eec2e4c22c800f1d8a68f0345f2872ece9c6e4 Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Tue, 4 Mar 2014 18:23:43 +0100 Subject: [PATCH 383/772] Populate module_setup from the setup module rather than special code elsewhere This small change allows for individual setup actions to populate the SETUP_CACHE and not cause a subsequent facts-gathering when not needed. This follows the standard of other facts modules as laid out in #1206 and implemented in fedfd187749654105a22be20c27e0050bc722d0a. It allows to test of the setup module has already been run even when gather_facts was explicitely disabled. --- lib/ansible/playbook/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 65965526251..332bfe55c80 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -525,7 +525,6 @@ class PlayBook(object): # let runner template out future commands setup_ok = setup_results.get('contacted', {}) for (host, result) in setup_ok.iteritems(): - self.SETUP_CACHE[host].update({'module_setup': True}) self.SETUP_CACHE[host].update(result.get('ansible_facts', {})) return setup_results From f6a4c20cc0a1a35af0826f255a58f0f3dfd79791 Mon Sep 17 00:00:00 2001 From: Oleg Bulatov Date: Sun, 16 Mar 2014 21:47:26 +0400 Subject: [PATCH 384/772] Make parsing variables for hosts and groups more consistent This commit allows to specify arrays in [group:vars] block. Signed-off-by: Oleg Bulatov --- lib/ansible/inventory/ini.py | 38 +++++++++----------- test/units/TestInventory.py | 5 +-- test/units/inventory_test_data/complex_hosts | 1 + 3 files changed, 20 insertions(+), 24 deletions(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 3b38911d253..9863de17b8e 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -48,6 +48,20 @@ class InventoryParser(object): self._parse_group_variables() return self.groups + @staticmethod + def _parse_value(v): + if "#" not in v: + try: + return ast.literal_eval(v) + # Using explicit exceptions. + # Likely a string that literal_eval does not like. We wil then just set it. + except ValueError: + # For some reason this was thought to be malformed. + pass + except SyntaxError: + # Is this a hash with an equals at the end? + pass + return v # [webservers] # alpha @@ -123,22 +137,7 @@ class InventoryParser(object): (k,v) = t.split("=", 1) except ValueError, e: raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) - - # If there is a hash in the value don't pass it through to ast at ast will split at the hash. - if "#" in v: - host.set_variable(k, v) - else: - try: - host.set_variable(k,ast.literal_eval(v)) - # Using explicit exceptions. - # Likely a string that literal_eval does not like. We wil then just set it. - except ValueError: - # For some reason this was thought to be malformed. - host.set_variable(k, v) - except SyntaxError: - # Is this a hash with an equals at the end? - host.set_variable(k, v) - + host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) # [southeast:children] @@ -193,12 +192,7 @@ class InventoryParser(object): raise errors.AnsibleError("variables assigned to group must be in key=value form") else: (k, v) = [e.strip() for e in line.split("=", 1)] - # When the value is a single-quoted or double-quoted string - if re.match(r"^(['\"]).*\1$", v): - # Unquote the string - group.set_variable(k, re.sub(r"^['\"]|['\"]$", '', v)) - else: - group.set_variable(k, v) + group.set_variable(k, self._parse_value(v)) def get_host_variables(self, host): return {} diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index f8e9232c540..4aae739a233 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -236,9 +236,10 @@ class TestInventory(unittest.TestCase): print vars expected = dict( - a='1', b='2', c='3', d='10002', e='10003', f='10004 != 10005', + a=1, b=2, c=3, d=10002, e=10003, f='10004 != 10005', g=' g ', h=' h ', i="' i \"", j='" j', - rga='1', rgb='2', rgc='3', + k=[ 'k1', 'k2' ], + rga=1, rgb=2, rgc=3, inventory_hostname='rtp_a', inventory_hostname_short='rtp_a', group_names=[ 'eastcoast', 'nc', 'redundantgroup', 'redundantgroup2', 'redundantgroup3', 'rtp', 'us' ] ) diff --git a/test/units/inventory_test_data/complex_hosts b/test/units/inventory_test_data/complex_hosts index d7f172f203a..0217d03f993 100644 --- a/test/units/inventory_test_data/complex_hosts +++ b/test/units/inventory_test_data/complex_hosts @@ -40,6 +40,7 @@ e = 10003 h = ' h ' i = ' i " j = " j + k = ['k1', 'k2'] [rtp] rtp_a From 4a006a78b3825038b38522276ce6df2eed4b0643 Mon Sep 17 00:00:00 2001 From: Ali Asad Lotia Date: Sun, 16 Mar 2014 17:49:36 +0000 Subject: [PATCH 385/772] Whitespace fixes in incompatible param check - Remove extra whitespace in conditional that checks if etype, entity or permissions are also set when entry is set. --- library/files/acl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/acl b/library/files/acl index d15a31e618c..93431ecf472 100644 --- a/library/files/acl +++ b/library/files/acl @@ -221,7 +221,7 @@ def main(): module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state) if entry: - if etype or entity or permissions: + if etype or entity or permissions: module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set") if entry.count(":") not in [2,3]: module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry) From 26efc7ace82bdbe54a72479659600e1ff267c842 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 14:10:57 -0400 Subject: [PATCH 386/772] Revert "Populate module_setup from the setup module rather than special code elsewhere" This reverts commit 58eec2e4c22c800f1d8a68f0345f2872ece9c6e4. --- lib/ansible/playbook/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 7063e9fe808..918b9341717 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -529,6 +529,7 @@ class PlayBook(object): # let runner template out future commands setup_ok = setup_results.get('contacted', {}) for (host, result) in setup_ok.iteritems(): + self.SETUP_CACHE[host].update({'module_setup': True}) self.SETUP_CACHE[host].update(result.get('ansible_facts', {})) return setup_results From eef5a8a847770c472d1b3761157f6f947475645c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 14:11:27 -0400 Subject: [PATCH 387/772] Update setup module signature from merge commit. --- library/system/setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/setup b/library/system/setup index 71120b72dc2..ae03ad8d74d 100644 --- a/library/system/setup +++ b/library/system/setup @@ -2306,7 +2306,7 @@ def ansible_facts(module): def run_setup(module): setup_options = dict(module_setup=True) - facts = ansible_facts() + facts = ansible_facts(module) for (k, v) in facts.items(): setup_options["ansible_%s" % k.replace('-', '_')] = v From ba4fbd2b66104fb1622d4d5e66369a8a750f9363 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 16 Mar 2014 13:58:42 -0500 Subject: [PATCH 388/772] Adding man page for ansible-galaxy Partially resolves #6471 --- Makefile | 3 +- docs/man/man1/ansible-galaxy.1 | 180 +++++++++++++++++++++ docs/man/man1/ansible-galaxy.1.asciidoc.in | 167 +++++++++++++++++++ 3 files changed, 349 insertions(+), 1 deletion(-) create mode 100644 docs/man/man1/ansible-galaxy.1 create mode 100644 docs/man/man1/ansible-galaxy.1.asciidoc.in diff --git a/Makefile b/Makefile index 982cd143b27..41d80a13c3b 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ OS = $(shell uname -s) # Manpages are currently built with asciidoc -- would like to move to markdown # This doesn't evaluate until it's called. The -D argument is the # directory of the target file ($@), kinda like `dirname`. -MANPAGES := docs/man/man1/ansible.1 docs/man/man1/ansible-playbook.1 docs/man/man1/ansible-pull.1 docs/man/man1/ansible-doc.1 +MANPAGES := docs/man/man1/ansible.1 docs/man/man1/ansible-playbook.1 docs/man/man1/ansible-pull.1 docs/man/man1/ansible-doc.1 docs/man/man1/ansible-galaxy.1 ifneq ($(shell which a2x 2>/dev/null),) ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $< ASCII2HTMLMAN = a2x -D docs/html/man/ -d manpage -f xhtml @@ -172,3 +172,4 @@ deb: debian webdocs: $(MANPAGES) (cd docsite/; make docs) +docs: $(MANPAGES) diff --git a/docs/man/man1/ansible-galaxy.1 b/docs/man/man1/ansible-galaxy.1 new file mode 100644 index 00000000000..af2285121a6 --- /dev/null +++ b/docs/man/man1/ansible-galaxy.1 @@ -0,0 +1,180 @@ +'\" t +.\" Title: ansible-galaxy +.\" Author: [see the "AUTHOR" section] +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 03/16/2014 +.\" Manual: System administration commands +.\" Source: Ansible 1.6 +.\" Language: English +.\" +.TH "ANSIBLE\-GALAXY" "1" "03/16/2014" "Ansible 1\&.6" "System administration commands" +.\" ----------------------------------------------------------------- +.\" * Define some portability stuff +.\" ----------------------------------------------------------------- +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.\" http://bugs.debian.org/507673 +.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.\" ----------------------------------------------------------------- +.\" * set default formatting +.\" ----------------------------------------------------------------- +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.\" ----------------------------------------------------------------- +.\" * MAIN CONTENT STARTS HERE * +.\" ----------------------------------------------------------------- +.SH "NAME" +ansible-galaxy \- manage roles using galaxy\&.ansible\&.com +.SH "SYNOPSIS" +.sp +ansible\-galaxy [init|info|install|list|remove] [\-\-help] [options] \&... +.SH "DESCRIPTION" +.sp +\fBAnsible Galaxy\fR is a shared repository for Ansible roles (added in ansible version 1\&.2)\&. The ansible\-galaxy command can be used to manage these roles, or by creating a skeleton framework for roles you\(cqd like to upload to Galaxy\&. +.SH "COMMON OPTIONS" +.PP +\fB\-h\fR, \fB\-\-help\fR +.RS 4 +Show a help message related to the given sub\-command\&. +.RE +.SH "INSTALL" +.sp +The \fBinstall\fR sub\-command is used to install roles\&. +.SS "USAGE" +.sp +$ ansible\-galaxy install [options] [\-r FILE | role_name(s)[,version] | tar_file(s)] +.sp +Roles can be installed in several different ways: +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +A username\&.rolename[,version] \- this will install a single role\&. The Galaxy API will be contacted to provide the information about the role, and the corresponding \&.tar\&.gz will be downloaded from +\fBgithub\&.com\fR\&. If the version is omitted, the most recent version available will be installed\&. +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +A file name, using +\fB\-r\fR +\- this will install multiple roles listed one per line\&. The format of each line is the same as above: username\&.rolename[,version] +.RE +.sp +.RS 4 +.ie n \{\ +\h'-04'\(bu\h'+03'\c +.\} +.el \{\ +.sp -1 +.IP \(bu 2.3 +.\} +A \&.tar\&.gz of a valid role you\(cqve downloaded directly from +\fBgithub\&.com\fR\&. This is mainly useful when the system running Ansible does not have access to the Galaxy API, for instance when behind a firewall or proxy\&. +.RE +.SS "OPTIONS" +.PP +\fB\-f\fR, \fB\-\-force\fR +.RS 4 +Force overwriting an existing role\&. +.RE +.PP +\fB\-i\fR, \fB\-\-ignore\-errors\fR +.RS 4 +Ignore errors and continue with the next specified role\&. +.RE +.PP +\fB\-n\fR, \fB\-\-no\-deps\fR +.RS 4 +Don\(cqt download roles listed as dependencies\&. +.RE +.PP +\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR +.RS 4 +The path to the directory containing your roles\&. The default is the +\fBroles_path\fR +configured in your +\fBansible\&.cfg\fR +file (/etc/ansible/roles if not configured) +.RE +.PP +\fB\-r\fR \fIROLE_FILE\fR, \fB\-\-role\-file=\fR\fIROLE_FILE\fR +.RS 4 +A file containing a list of roles to be imported, as specified above\&. This option cannot be used if a rolename or \&.tar\&.gz have been specified\&. +.RE +.SH "REMOVE" +.sp +The \fBremove\fR sub\-command is used to remove one or more roles\&. +.SS "USAGE" +.sp +$ ansible\-galaxy remove role1 role2 \&... +.SS "OPTIONS" +.PP +\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR +.RS 4 +The path to the directory containing your roles\&. The default is the +\fBroles_path\fR +configured in your +\fBansible\&.cfg\fR +file (/etc/ansible/roles if not configured) +.RE +.SH "INIT" +.sp +The \fBinit\fR command is used to create an empty role suitable for uploading to https://galaxy\&.ansible\&.com (or for roles in general)\&. +.SS "USAGE" +.sp +$ ansible\-galaxy init [options] role_name +.SS "OPTIONS" +.PP +\fB\-f\fR, \fB\-\-force\fR +.RS 4 +Force overwriting an existing role\&. +.RE +.PP +\fB\-p\fR \fIINIT_PATH\fR, \fB\-\-init\-path=\fR\fIINIT_PATH\fR +.RS 4 +The path in which the skeleton role will be created\&.The default is the current working directory\&. +.RE +.SH "LIST" +.sp +The \fBlist\fR sub\-command is used to show what roles are currently instaled\&. You can specify a role name, and if installed only that role will be shown\&. +.SS "USAGE" +.sp +$ ansible\-galaxy list [role_name] +.SS "OPTIONS" +.PP +\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR +.RS 4 +The path to the directory containing your roles\&. The default is the +\fBroles_path\fR +configured in your +\fBansible\&.cfg\fR +file (/etc/ansible/roles if not configured) +.RE +.SH "AUTHOR" +.sp +Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. +.SH "COPYRIGHT" +.sp +Copyright \(co 2014, Michael DeHaan +.sp +Ansible is released under the terms of the GPLv3 License\&. +.SH "SEE ALSO" +.sp +\fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) +.sp +Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in new file mode 100644 index 00000000000..b8a80e6b2c5 --- /dev/null +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -0,0 +1,167 @@ +ansible-galaxy(1) +=================== +:doctype: manpage +:man source: Ansible +:man version: %VERSION% +:man manual: System administration commands + +NAME +---- +ansible-galaxy - manage roles using galaxy.ansible.com + + +SYNOPSIS +-------- +ansible-galaxy [init|info|install|list|remove] [--help] [options] ... + + +DESCRIPTION +----------- + +*Ansible Galaxy* is a shared repository for Ansible roles (added in +ansible version 1.2). The ansible-galaxy command can be used to manage +these roles, or by creating a skeleton framework for roles you'd like +to upload to Galaxy. + +COMMON OPTIONS +-------------- + +*-h*, *--help*:: + +Show a help message related to the given sub-command. + + +INSTALL +------- + +The *install* sub-command is used to install roles. + +USAGE +~~~~~ + +$ ansible-galaxy install [options] [-r FILE | role_name(s)[,version] | tar_file(s)] + +Roles can be installed in several different ways: + +* A username.rolename[,version] - this will install a single role. The Galaxy + API will be contacted to provide the information about the role, and the + corresponding .tar.gz will be downloaded from *github.com*. If the version + is omitted, the most recent version available will be installed. + +* A file name, using *-r* - this will install multiple roles listed one per + line. The format of each line is the same as above: username.rolename[,version] + +* A .tar.gz of a valid role you've downloaded directly from *github.com*. This + is mainly useful when the system running Ansible does not have access to + the Galaxy API, for instance when behind a firewall or proxy. + + +OPTIONS +~~~~~~~ + +*-f*, *--force*:: + +Force overwriting an existing role. + +*-i*, *--ignore-errors*:: + +Ignore errors and continue with the next specified role. + +*-n*, *--no-deps*:: + +Don't download roles listed as dependencies. + +*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: + +The path to the directory containing your roles. The default is the *roles_path* +configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) + +*-r* 'ROLE_FILE', *--role-file=*'ROLE_FILE':: + +A file containing a list of roles to be imported, as specified above. This +option cannot be used if a rolename or .tar.gz have been specified. + +REMOVE +------ + +The *remove* sub-command is used to remove one or more roles. + +USAGE +~~~~~ + +$ ansible-galaxy remove role1 role2 ... + +OPTIONS +~~~~~~~ + +*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: + +The path to the directory containing your roles. The default is the *roles_path* +configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) + +INIT +---- + +The *init* command is used to create an empty role suitable for uploading +to https://galaxy.ansible.com (or for roles in general). + +USAGE +~~~~~ + +$ ansible-galaxy init [options] role_name + +OPTIONS +~~~~~~~ + +*-f*, *--force*:: + +Force overwriting an existing role. + +*-p* 'INIT_PATH', *--init-path=*'INIT_PATH':: + +The path in which the skeleton role will be created.The default is the current +working directory. + +LIST +---- + +The *list* sub-command is used to show what roles are currently instaled. +You can specify a role name, and if installed only that role will be shown. + +USAGE +~~~~~ + +$ ansible-galaxy list [role_name] + +OPTIONS +~~~~~~~ + +*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: + +The path to the directory containing your roles. The default is the *roles_path* +configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) + + +AUTHOR +------ + +Ansible was originally written by Michael DeHaan. See the AUTHORS file +for a complete list of contributors. + + +COPYRIGHT +--------- + +Copyright © 2014, Michael DeHaan + +Ansible is released under the terms of the GPLv3 License. + + +SEE ALSO +-------- + +*ansible*(1), *ansible-pull*(1), *ansible-doc*(1) + +Extensive documentation is available in the documentation site: +. IRC and mailing list info can be found +in file CONTRIBUTING.md, available in: From 0e06e8bb7fd46b89252ace750a8e2c2676453459 Mon Sep 17 00:00:00 2001 From: dmtrs Date: Tue, 4 Mar 2014 22:38:29 +0200 Subject: [PATCH 389/772] Add module composer (php dependency manager) --- library/packaging/composer | 153 +++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 library/packaging/composer diff --git a/library/packaging/composer b/library/packaging/composer new file mode 100644 index 00000000000..9220ed16bcc --- /dev/null +++ b/library/packaging/composer @@ -0,0 +1,153 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Dimitrios Tydeas Mengidis + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: composer +author: Dimitrios Tydeas Mengidis +short_description: Dependency Manager for PHP +version_added: "1.4" +description: + - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you +options: + working_dir: + description: + - Directory of your project ( see --working-dir ) + required: true + default: null + aliases: [ "working-dir" ] + prefer_source: + description: + - Forces installation from package sources when possible ( see --prefer-source ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "prefer-source" ] + prefer_dist: + description: + - Forces installation from package dist even for de versions ( see --prefer-dist ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "prefer-dist" ] + no_dev: + description: + - Disables installation of require-dev packages ( see --no-dev ) + required: false + default: "yes" + choices: [ "yes", "no" ] + aliases: [ "no-dev" ] + no_scripts: + description: + - Skips the execution of all scripts defined in composer.json ( see --no-scripts ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "no-scripts" ] + no_plugins: + description: + - Disables all plugins ( see --no-plugins ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "no-plugins" ] + optimize_autoloader: + description: + - Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default. + required: false + default: "yes" + choices: [ "yes", "no" ] + aliases: [ "optimize-autoloader" ] +requirements: + - php + - composer installed in bin path (recommended /usr/local/bin) +notes: + - Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction +''' + +EXAMPLES = ''' +# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock +- composer: command=install working_dir=/path/to/project +''' + +import os +import re + +def parse_out(string): + return re.sub("\s+", " ", string).strip() + +def has_changed(string): + return (re.match("Nothing to install or update", string) != None) + +def composer_install(module, options): + php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) + composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + cmd = "%s %s install %s" % (php_path, composer_path, " ".join(options)) + + return module.run_command(cmd) + +def main(): + module = AnsibleModule( + argument_spec = dict( + working_dir = dict(aliases=["working-dir"], required=True), + prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), + prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), + no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), + no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), + no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), + optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + ), + supports_check_mode=True + ) + + module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) + + options = set([]) + # Default options + options.add("--no-ansi") + options.add("--no-progress") + options.add("--no-interaction") + + if module.check_mode: + options.add("--dry-run") + + # Prepare options + for i in module.params: + opt = "--%s" % i.replace("_","-") + p = module.params[i] + if isinstance(p, (bool)) and p: + options.add(opt) + elif isinstance(p, (str)): + options.add("%s=%s" % (opt, p)) + + rc, out, err = composer_install(module, options) + + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output) + else: + output = parse_out(out) + module.exit_json(changed=has_changed(output), msg=output) + +# import module snippets +from ansible.module_utils.basic import * + +main() From b0dbc61d635d8c86fa7851c01f78719e175b16c2 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:02:37 -0400 Subject: [PATCH 390/772] Move facts to 'module_utils' so they can be referenced by other modules. Note unlike other module_utils/ dirs this keeps the original license -- so usage of facts code in modules will be limited to GPLv3 modules. --- lib/ansible/module_utils/facts.py | 2301 +++++++++++++++++++++++++++++ library/system/setup | 2238 +--------------------------- 2 files changed, 2305 insertions(+), 2234 deletions(-) create mode 100644 lib/ansible/module_utils/facts.py diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py new file mode 100644 index 00000000000..01f812def7b --- /dev/null +++ b/lib/ansible/module_utils/facts.py @@ -0,0 +1,2301 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import array +import fcntl +import fnmatch +import glob +import platform +import re +import socket +import struct +import datetime +import getpass +import ConfigParser +import StringIO + +try: + import selinux + HAVE_SELINUX=True +except ImportError: + HAVE_SELINUX=False + +try: + import json +except ImportError: + import simplejson as json + +class Facts(object): + """ + This class should only attempt to populate those facts that + are mostly generic to all systems. This includes platform facts, + service facts (eg. ssh keys or selinux), and distribution facts. + Anything that requires extensive code or may have more than one + possible implementation to establish facts for a given topic should + subclass Facts. + """ + + _I386RE = re.compile(r'i[3456]86') + # For the most part, we assume that platform.dist() will tell the truth. + # This is the fallback to handle unknowns or exceptions + OSDIST_DICT = { '/etc/redhat-release': 'RedHat', + '/etc/vmware-release': 'VMwareESX', + '/etc/openwrt_release': 'OpenWrt', + '/etc/system-release': 'OtherLinux', + '/etc/alpine-release': 'Alpine', + '/etc/release': 'Solaris', + '/etc/arch-release': 'Archlinux', + '/etc/SuSE-release': 'SuSE', + '/etc/gentoo-release': 'Gentoo', + '/etc/os-release': 'Debian' } + SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } + + # A list of dicts. If there is a platform with more than one + # package manager, put the preferred one last. If there is an + # ansible module, use that as the value for the 'name' key. + PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' }, + { 'path' : '/usr/bin/apt-get', 'name' : 'apt' }, + { 'path' : '/usr/bin/zypper', 'name' : 'zypper' }, + { 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' }, + { 'path' : '/usr/bin/pacman', 'name' : 'pacman' }, + { 'path' : '/bin/opkg', 'name' : 'opkg' }, + { 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' }, + { 'path' : '/opt/local/bin/port', 'name' : 'macports' }, + { 'path' : '/sbin/apk', 'name' : 'apk' }, + { 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' }, + { 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' }, + { 'path' : '/usr/bin/emerge', 'name' : 'portage' }, + ] + + def __init__(self): + self.facts = {} + self.get_platform_facts() + self.get_distribution_facts() + self.get_cmdline() + self.get_public_ssh_host_keys() + self.get_selinux_facts() + self.get_pkg_mgr_facts() + self.get_lsb_facts() + self.get_date_time_facts() + self.get_user_facts() + self.get_local_facts() + self.get_env_facts() + + def populate(self): + return self.facts + + # Platform + # platform.system() can be Linux, Darwin, Java, or Windows + def get_platform_facts(self): + self.facts['system'] = platform.system() + self.facts['kernel'] = platform.release() + self.facts['machine'] = platform.machine() + self.facts['python_version'] = platform.python_version() + self.facts['fqdn'] = socket.getfqdn() + self.facts['hostname'] = platform.node().split('.')[0] + self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:]) + arch_bits = platform.architecture()[0] + self.facts['userspace_bits'] = arch_bits.replace('bit', '') + if self.facts['machine'] == 'x86_64': + self.facts['architecture'] = self.facts['machine'] + if self.facts['userspace_bits'] == '64': + self.facts['userspace_architecture'] = 'x86_64' + elif self.facts['userspace_bits'] == '32': + self.facts['userspace_architecture'] = 'i386' + elif Facts._I386RE.search(self.facts['machine']): + self.facts['architecture'] = 'i386' + if self.facts['userspace_bits'] == '64': + self.facts['userspace_architecture'] = 'x86_64' + elif self.facts['userspace_bits'] == '32': + self.facts['userspace_architecture'] = 'i386' + else: + self.facts['architecture'] = self.facts['machine'] + if self.facts['system'] == 'Linux': + self.get_distribution_facts() + elif self.facts['system'] == 'AIX': + rc, out, err = module.run_command("/usr/sbin/bootinfo -p") + data = out.split('\n') + self.facts['architecture'] = data[0] + + + def get_local_facts(self): + + fact_path = module.params.get('fact_path', None) + if not fact_path or not os.path.exists(fact_path): + return + + local = {} + for fn in sorted(glob.glob(fact_path + '/*.fact')): + # where it will sit under local facts + fact_base = os.path.basename(fn).replace('.fact','') + if os.access(fn, os.X_OK): + # run it + # try to read it as json first + # if that fails read it with ConfigParser + # if that fails, skip it + rc, out, err = module.run_command(fn) + else: + out = open(fn).read() + + # load raw json + fact = 'loading %s' % fact_base + try: + fact = json.loads(out) + except ValueError, e: + # load raw ini + cp = ConfigParser.ConfigParser() + try: + cp.readfp(StringIO.StringIO(out)) + except ConfigParser.Error, e: + fact="error loading fact - please check content" + else: + fact = {} + #print cp.sections() + for sect in cp.sections(): + if sect not in fact: + fact[sect] = {} + for opt in cp.options(sect): + val = cp.get(sect, opt) + fact[sect][opt]=val + + local[fact_base] = fact + if not local: + return + self.facts['local'] = local + + # platform.dist() is deprecated in 2.6 + # in 2.6 and newer, you should use platform.linux_distribution() + def get_distribution_facts(self): + + # A list with OS Family members + OS_FAMILY = dict( + RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat', + SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat', + OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', + XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse', + SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo', + Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', + Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris', + SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin', + FreeBSD = 'FreeBSD', HPUX = 'HP-UX' + ) + + if self.facts['system'] == 'AIX': + self.facts['distribution'] = 'AIX' + rc, out, err = module.run_command("/usr/bin/oslevel") + data = out.split('.') + self.facts['distribution_version'] = data[0] + self.facts['distribution_release'] = data[1] + elif self.facts['system'] == 'HP-UX': + self.facts['distribution'] = 'HP-UX' + rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True) + data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out) + if data: + self.facts['distribution_version'] = data.groups()[0] + self.facts['distribution_release'] = data.groups()[1] + elif self.facts['system'] == 'Darwin': + self.facts['distribution'] = 'MacOSX' + rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion") + data = out.split()[-1] + self.facts['distribution_version'] = data + elif self.facts['system'] == 'FreeBSD': + self.facts['distribution'] = 'FreeBSD' + self.facts['distribution_release'] = platform.release() + self.facts['distribution_version'] = platform.version() + elif self.facts['system'] == 'OpenBSD': + self.facts['distribution'] = 'OpenBSD' + self.facts['distribution_release'] = platform.release() + rc, out, err = module.run_command("/sbin/sysctl -n kern.version") + match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out) + if match: + self.facts['distribution_version'] = match.groups()[0] + else: + self.facts['distribution_version'] = 'release' + else: + dist = platform.dist() + self.facts['distribution'] = dist[0].capitalize() or 'NA' + self.facts['distribution_version'] = dist[1] or 'NA' + self.facts['distribution_release'] = dist[2] or 'NA' + # Try to handle the exceptions now ... + for (path, name) in Facts.OSDIST_DICT.items(): + if os.path.exists(path): + if self.facts['distribution'] == 'Fedora': + pass + elif name == 'RedHat': + data = get_file_content(path) + if 'Red Hat' in data: + self.facts['distribution'] = name + else: + self.facts['distribution'] = data.split()[0] + elif name == 'OtherLinux': + data = get_file_content(path) + if 'Amazon' in data: + self.facts['distribution'] = 'Amazon' + self.facts['distribution_version'] = data.split()[-1] + elif name == 'OpenWrt': + data = get_file_content(path) + if 'OpenWrt' in data: + self.facts['distribution'] = name + version = re.search('DISTRIB_RELEASE="(.*)"', data) + if version: + self.facts['distribution_version'] = version.groups()[0] + release = re.search('DISTRIB_CODENAME="(.*)"', data) + if release: + self.facts['distribution_release'] = release.groups()[0] + elif name == 'Alpine': + data = get_file_content(path) + self.facts['distribution'] = 'Alpine' + self.facts['distribution_version'] = data + elif name == 'Solaris': + data = get_file_content(path).split('\n')[0] + ora_prefix = '' + if 'Oracle Solaris' in data: + data = data.replace('Oracle ','') + ora_prefix = 'Oracle ' + self.facts['distribution'] = data.split()[0] + self.facts['distribution_version'] = data.split()[1] + self.facts['distribution_release'] = ora_prefix + data + elif name == 'SuSE': + data = get_file_content(path).splitlines() + self.facts['distribution_release'] = data[2].split('=')[1].strip() + elif name == 'Debian': + data = get_file_content(path).split('\n')[0] + release = re.search("PRETTY_NAME.+ \(?([^ ]+?)\)?\"", data) + if release: + self.facts['distribution_release'] = release.groups()[0] + else: + self.facts['distribution'] = name + + self.facts['os_family'] = self.facts['distribution'] + if self.facts['distribution'] in OS_FAMILY: + self.facts['os_family'] = OS_FAMILY[self.facts['distribution']] + + def get_cmdline(self): + data = get_file_content('/proc/cmdline') + if data: + self.facts['cmdline'] = {} + for piece in shlex.split(data): + item = piece.split('=', 1) + if len(item) == 1: + self.facts['cmdline'][item[0]] = True + else: + self.facts['cmdline'][item[0]] = item[1] + + def get_public_ssh_host_keys(self): + dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub' + rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub' + ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub' + + if self.facts['system'] == 'Darwin': + dsa_filename = '/etc/ssh_host_dsa_key.pub' + rsa_filename = '/etc/ssh_host_rsa_key.pub' + ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub' + dsa = get_file_content(dsa_filename) + rsa = get_file_content(rsa_filename) + ecdsa = get_file_content(ecdsa_filename) + if dsa is None: + dsa = 'NA' + else: + self.facts['ssh_host_key_dsa_public'] = dsa.split()[1] + if rsa is None: + rsa = 'NA' + else: + self.facts['ssh_host_key_rsa_public'] = rsa.split()[1] + if ecdsa is None: + ecdsa = 'NA' + else: + self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1] + + def get_pkg_mgr_facts(self): + self.facts['pkg_mgr'] = 'unknown' + for pkg in Facts.PKG_MGRS: + if os.path.exists(pkg['path']): + self.facts['pkg_mgr'] = pkg['name'] + if self.facts['system'] == 'OpenBSD': + self.facts['pkg_mgr'] = 'openbsd_pkg' + + def get_lsb_facts(self): + lsb_path = module.get_bin_path('lsb_release') + if lsb_path: + rc, out, err = module.run_command([lsb_path, "-a"]) + if rc == 0: + self.facts['lsb'] = {} + for line in out.split('\n'): + if len(line) < 1: + continue + value = line.split(':', 1)[1].strip() + if 'LSB Version:' in line: + self.facts['lsb']['release'] = value + elif 'Distributor ID:' in line: + self.facts['lsb']['id'] = value + elif 'Description:' in line: + self.facts['lsb']['description'] = value + elif 'Release:' in line: + self.facts['lsb']['release'] = value + elif 'Codename:' in line: + self.facts['lsb']['codename'] = value + if 'lsb' in self.facts and 'release' in self.facts['lsb']: + self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0] + elif lsb_path is None and os.path.exists('/etc/lsb-release'): + self.facts['lsb'] = {} + f = open('/etc/lsb-release', 'r') + try: + for line in f.readlines(): + value = line.split('=',1)[1].strip() + if 'DISTRIB_ID' in line: + self.facts['lsb']['id'] = value + elif 'DISTRIB_RELEASE' in line: + self.facts['lsb']['release'] = value + elif 'DISTRIB_DESCRIPTION' in line: + self.facts['lsb']['description'] = value + elif 'DISTRIB_CODENAME' in line: + self.facts['lsb']['codename'] = value + finally: + f.close() + else: + return self.facts + + if 'lsb' in self.facts and 'release' in self.facts['lsb']: + self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0] + + + def get_selinux_facts(self): + if not HAVE_SELINUX: + self.facts['selinux'] = False + return + self.facts['selinux'] = {} + if not selinux.is_selinux_enabled(): + self.facts['selinux']['status'] = 'disabled' + else: + self.facts['selinux']['status'] = 'enabled' + try: + self.facts['selinux']['policyvers'] = selinux.security_policyvers() + except OSError, e: + self.facts['selinux']['policyvers'] = 'unknown' + try: + (rc, configmode) = selinux.selinux_getenforcemode() + if rc == 0: + self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown') + else: + self.facts['selinux']['config_mode'] = 'unknown' + except OSError, e: + self.facts['selinux']['config_mode'] = 'unknown' + try: + mode = selinux.security_getenforce() + self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown') + except OSError, e: + self.facts['selinux']['mode'] = 'unknown' + try: + (rc, policytype) = selinux.selinux_getpolicytype() + if rc == 0: + self.facts['selinux']['type'] = policytype + else: + self.facts['selinux']['type'] = 'unknown' + except OSError, e: + self.facts['selinux']['type'] = 'unknown' + + + def get_date_time_facts(self): + self.facts['date_time'] = {} + + now = datetime.datetime.now() + self.facts['date_time']['year'] = now.strftime('%Y') + self.facts['date_time']['month'] = now.strftime('%m') + self.facts['date_time']['day'] = now.strftime('%d') + self.facts['date_time']['hour'] = now.strftime('%H') + self.facts['date_time']['minute'] = now.strftime('%M') + self.facts['date_time']['second'] = now.strftime('%S') + self.facts['date_time']['epoch'] = now.strftime('%s') + if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%': + self.facts['date_time']['epoch'] = str(int(time.time())) + self.facts['date_time']['date'] = now.strftime('%Y-%m-%d') + self.facts['date_time']['time'] = now.strftime('%H:%M:%S') + self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ") + self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") + self.facts['date_time']['tz'] = time.strftime("%Z") + self.facts['date_time']['tz_offset'] = time.strftime("%z") + + + # User + def get_user_facts(self): + self.facts['user_id'] = getpass.getuser() + + def get_env_facts(self): + self.facts['env'] = {} + for k,v in os.environ.iteritems(): + self.facts['env'][k] = v + +class Hardware(Facts): + """ + This is a generic Hardware subclass of Facts. This should be further + subclassed to implement per platform. If you subclass this, it + should define: + - memfree_mb + - memtotal_mb + - swapfree_mb + - swaptotal_mb + - processor (a list) + - processor_cores + - processor_count + + All subclasses MUST define platform. + """ + platform = 'Generic' + + def __new__(cls, *arguments, **keyword): + subclass = cls + for sc in Hardware.__subclasses__(): + if sc.platform == platform.system(): + subclass = sc + return super(cls, subclass).__new__(subclass, *arguments, **keyword) + + def __init__(self): + Facts.__init__(self) + + def populate(self): + return self.facts + +class LinuxHardware(Hardware): + """ + Linux-specific subclass of Hardware. Defines memory and CPU facts: + - memfree_mb + - memtotal_mb + - swapfree_mb + - swaptotal_mb + - processor (a list) + - processor_cores + - processor_count + + In addition, it also defines number of DMI facts and device facts. + """ + + platform = 'Linux' + MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'] + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.get_cpu_facts() + self.get_memory_facts() + self.get_dmi_facts() + self.get_device_facts() + self.get_mount_facts() + return self.facts + + def get_memory_facts(self): + if not os.access("/proc/meminfo", os.R_OK): + return + for line in open("/proc/meminfo").readlines(): + data = line.split(":", 1) + key = data[0] + if key in LinuxHardware.MEMORY_FACTS: + val = data[1].strip().split(' ')[0] + self.facts["%s_mb" % key.lower()] = long(val) / 1024 + + def get_cpu_facts(self): + i = 0 + physid = 0 + coreid = 0 + sockets = {} + cores = {} + if not os.access("/proc/cpuinfo", os.R_OK): + return + self.facts['processor'] = [] + for line in open("/proc/cpuinfo").readlines(): + data = line.split(":", 1) + key = data[0].strip() + # model name is for Intel arch, Processor (mind the uppercase P) + # works for some ARM devices, like the Sheevaplug. + if key == 'model name' or key == 'Processor': + if 'processor' not in self.facts: + self.facts['processor'] = [] + self.facts['processor'].append(data[1].strip()) + i += 1 + elif key == 'physical id': + physid = data[1].strip() + if physid not in sockets: + sockets[physid] = 1 + elif key == 'core id': + coreid = data[1].strip() + if coreid not in sockets: + cores[coreid] = 1 + elif key == 'cpu cores': + sockets[physid] = int(data[1].strip()) + elif key == 'siblings': + cores[coreid] = int(data[1].strip()) + self.facts['processor_count'] = sockets and len(sockets) or i + self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1 + self.facts['processor_threads_per_core'] = ((cores.values() and + cores.values()[0] or 1) / self.facts['processor_cores']) + self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] * + self.facts['processor_count'] * self.facts['processor_cores']) + + def get_dmi_facts(self): + ''' learn dmi facts from system + + Try /sys first for dmi related facts. + If that is not available, fall back to dmidecode executable ''' + + if os.path.exists('/sys/devices/virtual/dmi/id/product_name'): + # Use kernel DMI info, if available + + # DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf + FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop", + "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower", + "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station", + "All In One", "Sub Notebook", "Space-saving", "Lunch Box", + "Main Server Chassis", "Expansion Chassis", "Sub Chassis", + "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis", + "Rack Mount Chassis", "Sealed-case PC", "Multi-system", + "CompactPCI", "AdvancedTCA", "Blade" ] + + DMI_DICT = { + 'bios_date': '/sys/devices/virtual/dmi/id/bios_date', + 'bios_version': '/sys/devices/virtual/dmi/id/bios_version', + 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type', + 'product_name': '/sys/devices/virtual/dmi/id/product_name', + 'product_serial': '/sys/devices/virtual/dmi/id/product_serial', + 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid', + 'product_version': '/sys/devices/virtual/dmi/id/product_version', + 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor' + } + + for (key,path) in DMI_DICT.items(): + data = get_file_content(path) + if data is not None: + if key == 'form_factor': + try: + self.facts['form_factor'] = FORM_FACTOR[int(data)] + except IndexError, e: + self.facts['form_factor'] = 'unknown (%s)' % data + else: + self.facts[key] = data + else: + self.facts[key] = 'NA' + + else: + # Fall back to using dmidecode, if available + dmi_bin = module.get_bin_path('dmidecode') + DMI_DICT = { + 'bios_date': 'bios-release-date', + 'bios_version': 'bios-version', + 'form_factor': 'chassis-type', + 'product_name': 'system-product-name', + 'product_serial': 'system-serial-number', + 'product_uuid': 'system-uuid', + 'product_version': 'system-version', + 'system_vendor': 'system-manufacturer' + } + for (k, v) in DMI_DICT.items(): + if dmi_bin is not None: + (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v)) + if rc == 0: + # Strip out commented lines (specific dmidecode output) + thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ]) + try: + json.dumps(thisvalue) + except UnicodeDecodeError: + thisvalue = "NA" + + self.facts[k] = thisvalue + else: + self.facts[k] = 'NA' + else: + self.facts[k] = 'NA' + + def get_mount_facts(self): + self.facts['mounts'] = [] + mtab = get_file_content('/etc/mtab', '') + for line in mtab.split('\n'): + if line.startswith('/'): + fields = line.rstrip('\n').split() + if(fields[2] != 'none'): + size_total = None + size_available = None + try: + statvfs_result = os.statvfs(fields[1]) + size_total = statvfs_result.f_bsize * statvfs_result.f_blocks + size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail) + except OSError, e: + continue + + self.facts['mounts'].append( + {'mount': fields[1], + 'device':fields[0], + 'fstype': fields[2], + 'options': fields[3], + # statvfs data + 'size_total': size_total, + 'size_available': size_available, + }) + + def get_device_facts(self): + self.facts['devices'] = {} + lspci = module.get_bin_path('lspci') + if lspci: + rc, pcidata, err = module.run_command([lspci, '-D']) + else: + pcidata = None + + try: + block_devs = os.listdir("/sys/block") + except OSError: + return + + for block in block_devs: + virtual = 1 + sysfs_no_links = 0 + try: + path = os.readlink(os.path.join("/sys/block/", block)) + except OSError, e: + if e.errno == errno.EINVAL: + path = block + sysfs_no_links = 1 + else: + continue + if "virtual" in path: + continue + sysdir = os.path.join("/sys/block", path) + if sysfs_no_links == 1: + for folder in os.listdir(sysdir): + if "device" in folder: + virtual = 0 + break + if virtual: + continue + d = {} + diskname = os.path.basename(sysdir) + for key in ['vendor', 'model']: + d[key] = get_file_content(sysdir + "/device/" + key) + + for key,test in [ ('removable','/removable'), \ + ('support_discard','/queue/discard_granularity'), + ]: + d[key] = get_file_content(sysdir + test) + + d['partitions'] = {} + for folder in os.listdir(sysdir): + m = re.search("(" + diskname + "\d+)", folder) + if m: + part = {} + partname = m.group(1) + part_sysdir = sysdir + "/" + partname + + part['start'] = get_file_content(part_sysdir + "/start",0) + part['sectors'] = get_file_content(part_sysdir + "/size",0) + part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512) + part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize']))) + d['partitions'][partname] = part + + d['rotational'] = get_file_content(sysdir + "/queue/rotational") + d['scheduler_mode'] = "" + scheduler = get_file_content(sysdir + "/queue/scheduler") + if scheduler is not None: + m = re.match(".*?(\[(.*)\])", scheduler) + if m: + d['scheduler_mode'] = m.group(2) + + d['sectors'] = get_file_content(sysdir + "/size") + if not d['sectors']: + d['sectors'] = 0 + d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size") + if not d['sectorsize']: + d['sectorsize'] = 512 + d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize'])) + + d['host'] = "" + + # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7). + m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir) + if m and pcidata: + pciid = m.group(1) + did = re.escape(pciid) + m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE) + d['host'] = m.group(1) + + d['holders'] = [] + if os.path.isdir(sysdir + "/holders"): + for folder in os.listdir(sysdir + "/holders"): + if not folder.startswith("dm-"): + continue + name = get_file_content(sysdir + "/holders/" + folder + "/dm/name") + if name: + d['holders'].append(name) + else: + d['holders'].append(folder) + + self.facts['devices'][diskname] = d + + +class SunOSHardware(Hardware): + """ + In addition to the generic memory and cpu facts, this also sets + swap_reserved_mb and swap_allocated_mb that is available from *swap -s*. + """ + platform = 'SunOS' + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.get_cpu_facts() + self.get_memory_facts() + return self.facts + + def get_cpu_facts(self): + physid = 0 + sockets = {} + rc, out, err = module.run_command("/usr/bin/kstat cpu_info") + self.facts['processor'] = [] + for line in out.split('\n'): + if len(line) < 1: + continue + data = line.split(None, 1) + key = data[0].strip() + # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9. + if key == 'module:': + brand = '' + elif key == 'brand': + brand = data[1].strip() + elif key == 'clock_MHz': + clock_mhz = data[1].strip() + elif key == 'implementation': + processor = brand or data[1].strip() + # Add clock speed to description for SPARC CPU + if self.facts['machine'] != 'i86pc': + processor += " @ " + clock_mhz + "MHz" + if 'processor' not in self.facts: + self.facts['processor'] = [] + self.facts['processor'].append(processor) + elif key == 'chip_id': + physid = data[1].strip() + if physid not in sockets: + sockets[physid] = 1 + else: + sockets[physid] += 1 + # Counting cores on Solaris can be complicated. + # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu + # Treat 'processor_count' as physical sockets and 'processor_cores' as + # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as + # these processors have: sockets -> cores -> threads/virtual CPU. + if len(sockets) > 0: + self.facts['processor_count'] = len(sockets) + self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values()) + else: + self.facts['processor_cores'] = 'NA' + self.facts['processor_count'] = len(self.facts['processor']) + + def get_memory_facts(self): + rc, out, err = module.run_command(["/usr/sbin/prtconf"]) + for line in out.split('\n'): + if 'Memory size' in line: + self.facts['memtotal_mb'] = line.split()[2] + rc, out, err = module.run_command("/usr/sbin/swap -s") + allocated = long(out.split()[1][:-1]) + reserved = long(out.split()[5][:-1]) + used = long(out.split()[8][:-1]) + free = long(out.split()[10][:-1]) + self.facts['swapfree_mb'] = free / 1024 + self.facts['swaptotal_mb'] = (free + used) / 1024 + self.facts['swap_allocated_mb'] = allocated / 1024 + self.facts['swap_reserved_mb'] = reserved / 1024 + +class OpenBSDHardware(Hardware): + """ + OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts: + - memfree_mb + - memtotal_mb + - swapfree_mb + - swaptotal_mb + - processor (a list) + - processor_cores + - processor_count + - processor_speed + - devices + """ + platform = 'OpenBSD' + DMESG_BOOT = '/var/run/dmesg.boot' + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.sysctl = self.get_sysctl() + self.get_memory_facts() + self.get_processor_facts() + self.get_device_facts() + return self.facts + + def get_sysctl(self): + rc, out, err = module.run_command(["/sbin/sysctl", "hw"]) + if rc != 0: + return dict() + sysctl = dict() + for line in out.splitlines(): + (key, value) = line.split('=') + sysctl[key] = value.strip() + return sysctl + + def get_memory_facts(self): + # Get free memory. vmstat output looks like: + # procs memory page disks traps cpu + # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id + # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99 + rc, out, err = module.run_command("/usr/bin/vmstat") + if rc == 0: + self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024 + self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024 + + # Get swapctl info. swapctl output looks like: + # total: 69268 1K-blocks allocated, 0 used, 69268 available + # And for older OpenBSD: + # total: 69268k bytes allocated = 0k used, 69268k available + rc, out, err = module.run_command("/sbin/swapctl -sk") + if rc == 0: + data = out.split() + self.facts['swapfree_mb'] = long(data[-2].translate(None, "kmg")) / 1024 + self.facts['swaptotal_mb'] = long(data[1].translate(None, "kmg")) / 1024 + + def get_processor_facts(self): + processor = [] + dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT) + if not dmesg_boot: + rc, dmesg_boot, err = module.run_command("/sbin/dmesg") + i = 0 + for line in dmesg_boot.splitlines(): + if line.split(' ', 1)[0] == 'cpu%i:' % i: + processor.append(line.split(' ', 1)[1]) + i = i + 1 + processor_count = i + self.facts['processor'] = processor + self.facts['processor_count'] = processor_count + # I found no way to figure out the number of Cores per CPU in OpenBSD + self.facts['processor_cores'] = 'NA' + + def get_device_facts(self): + devices = [] + devices.extend(self.sysctl['hw.disknames'].split(',')) + self.facts['devices'] = devices + +class FreeBSDHardware(Hardware): + """ + FreeBSD-specific subclass of Hardware. Defines memory and CPU facts: + - memfree_mb + - memtotal_mb + - swapfree_mb + - swaptotal_mb + - processor (a list) + - processor_cores + - processor_count + - devices + """ + platform = 'FreeBSD' + DMESG_BOOT = '/var/run/dmesg.boot' + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.get_cpu_facts() + self.get_memory_facts() + self.get_dmi_facts() + self.get_device_facts() + self.get_mount_facts() + return self.facts + + def get_cpu_facts(self): + self.facts['processor'] = [] + rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu") + self.facts['processor_count'] = out.strip() + + dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT) + if not dmesg_boot: + rc, dmesg_boot, err = module.run_command("/sbin/dmesg") + for line in dmesg_boot.split('\n'): + if 'CPU:' in line: + cpu = re.sub(r'CPU:\s+', r"", line) + self.facts['processor'].append(cpu.strip()) + if 'Logical CPUs per core' in line: + self.facts['processor_cores'] = line.split()[4] + + + def get_memory_facts(self): + rc, out, err = module.run_command("/sbin/sysctl vm.stats") + for line in out.split('\n'): + data = line.split() + if 'vm.stats.vm.v_page_size' in line: + pagesize = long(data[1]) + if 'vm.stats.vm.v_page_count' in line: + pagecount = long(data[1]) + if 'vm.stats.vm.v_free_count' in line: + freecount = long(data[1]) + self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024 + self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024 + # Get swapinfo. swapinfo output looks like: + # Device 1M-blocks Used Avail Capacity + # /dev/ada0p3 314368 0 314368 0% + # + rc, out, err = module.run_command("/usr/sbin/swapinfo -m") + lines = out.split('\n') + if len(lines[-1]) == 0: + lines.pop() + data = lines[-1].split() + self.facts['swaptotal_mb'] = data[1] + self.facts['swapfree_mb'] = data[3] + + def get_mount_facts(self): + self.facts['mounts'] = [] + fstab = get_file_content('/etc/fstab') + if fstab: + for line in fstab.split('\n'): + if line.startswith('#') or line.strip() == '': + continue + fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() + self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) + + def get_device_facts(self): + sysdir = '/dev' + self.facts['devices'] = {} + drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks") + slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)') + if os.path.isdir(sysdir): + dirlist = sorted(os.listdir(sysdir)) + for device in dirlist: + d = drives.match(device) + if d: + self.facts['devices'][d.group(1)] = [] + s = slices.match(device) + if s: + self.facts['devices'][d.group(1)].append(s.group(1)) + + def get_dmi_facts(self): + ''' learn dmi facts from system + + Use dmidecode executable if available''' + + # Fall back to using dmidecode, if available + dmi_bin = module.get_bin_path('dmidecode') + DMI_DICT = dict( + bios_date='bios-release-date', + bios_version='bios-version', + form_factor='chassis-type', + product_name='system-product-name', + product_serial='system-serial-number', + product_uuid='system-uuid', + product_version='system-version', + system_vendor='system-manufacturer' + ) + for (k, v) in DMI_DICT.items(): + if dmi_bin is not None: + (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v)) + if rc == 0: + # Strip out commented lines (specific dmidecode output) + self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ]) + try: + json.dumps(self.facts[k]) + except UnicodeDecodeError: + self.facts[k] = 'NA' + else: + self.facts[k] = 'NA' + else: + self.facts[k] = 'NA' + + +class NetBSDHardware(Hardware): + """ + NetBSD-specific subclass of Hardware. Defines memory and CPU facts: + - memfree_mb + - memtotal_mb + - swapfree_mb + - swaptotal_mb + - processor (a list) + - processor_cores + - processor_count + - devices + """ + platform = 'NetBSD' + MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'] + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.get_cpu_facts() + self.get_memory_facts() + self.get_mount_facts() + return self.facts + + def get_cpu_facts(self): + + i = 0 + physid = 0 + sockets = {} + if not os.access("/proc/cpuinfo", os.R_OK): + return + self.facts['processor'] = [] + for line in open("/proc/cpuinfo").readlines(): + data = line.split(":", 1) + key = data[0].strip() + # model name is for Intel arch, Processor (mind the uppercase P) + # works for some ARM devices, like the Sheevaplug. + if key == 'model name' or key == 'Processor': + if 'processor' not in self.facts: + self.facts['processor'] = [] + self.facts['processor'].append(data[1].strip()) + i += 1 + elif key == 'physical id': + physid = data[1].strip() + if physid not in sockets: + sockets[physid] = 1 + elif key == 'cpu cores': + sockets[physid] = int(data[1].strip()) + if len(sockets) > 0: + self.facts['processor_count'] = len(sockets) + self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values()) + else: + self.facts['processor_count'] = i + self.facts['processor_cores'] = 'NA' + + def get_memory_facts(self): + if not os.access("/proc/meminfo", os.R_OK): + return + for line in open("/proc/meminfo").readlines(): + data = line.split(":", 1) + key = data[0] + if key in NetBSDHardware.MEMORY_FACTS: + val = data[1].strip().split(' ')[0] + self.facts["%s_mb" % key.lower()] = long(val) / 1024 + + def get_mount_facts(self): + self.facts['mounts'] = [] + fstab = get_file_content('/etc/fstab') + if fstab: + for line in fstab.split('\n'): + if line.startswith('#') or line.strip() == '': + continue + fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() + self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) + +class AIX(Hardware): + """ + AIX-specific subclass of Hardware. Defines memory and CPU facts: + - memfree_mb + - memtotal_mb + - swapfree_mb + - swaptotal_mb + - processor (a list) + - processor_cores + - processor_count + """ + platform = 'AIX' + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.get_cpu_facts() + self.get_memory_facts() + self.get_dmi_facts() + return self.facts + + def get_cpu_facts(self): + self.facts['processor'] = [] + + + rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor") + if out: + i = 0 + for line in out.split('\n'): + + if 'Available' in line: + if i == 0: + data = line.split(' ') + cpudev = data[0] + + i += 1 + self.facts['processor_count'] = int(i) + + rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type") + + data = out.split(' ') + self.facts['processor'] = data[1] + + rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads") + + data = out.split(' ') + self.facts['processor_cores'] = int(data[1]) + + def get_memory_facts(self): + pagesize = 4096 + rc, out, err = module.run_command("/usr/bin/vmstat -v") + for line in out.split('\n'): + data = line.split() + if 'memory pages' in line: + pagecount = long(data[0]) + if 'free pages' in line: + freecount = long(data[0]) + self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024 + self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024 + # Get swapinfo. swapinfo output looks like: + # Device 1M-blocks Used Avail Capacity + # /dev/ada0p3 314368 0 314368 0% + # + rc, out, err = module.run_command("/usr/sbin/lsps -s") + if out: + lines = out.split('\n') + data = lines[1].split() + swaptotal_mb = long(data[0].rstrip('MB')) + percused = int(data[1].rstrip('%')) + self.facts['swaptotal_mb'] = swaptotal_mb + self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100) + + def get_dmi_facts(self): + rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion") + data = out.split() + self.facts['firmware_version'] = data[1].strip('IBM,') + +class HPUX(Hardware): + """ + HP-UX-specifig subclass of Hardware. Defines memory and CPU facts: + - memfree_mb + - memtotal_mb + - swapfree_mb + - swaptotal_mb + - processor + - processor_cores + - processor_count + - model + - firmware + """ + + platform = 'HP-UX' + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.get_cpu_facts() + self.get_memory_facts() + self.get_hw_facts() + return self.facts + + def get_cpu_facts(self): + if self.facts['architecture'] == '9000/800': + rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) + self.facts['processor_count'] = int(out.strip()) + #Working with machinfo mess + elif self.facts['architecture'] == 'ia64': + if self.facts['distribution_version'] == "B.11.23": + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True) + self.facts['processor_count'] = int(out.strip().split('=')[1]) + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True) + self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip() + rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) + self.facts['processor_cores'] = int(out.strip()) + if self.facts['distribution_version'] == "B.11.31": + #if machinfo return cores strings release B.11.31 > 1204 + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True) + if out.strip()== '0': + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) + self.facts['processor_count'] = int(out.strip().split(" ")[0]) + #If hyperthreading is active divide cores by 2 + rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True) + data = re.sub(' +',' ',out).strip().split(' ') + if len(data) == 1: + hyperthreading = 'OFF' + else: + hyperthreading = data[1] + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True) + data = out.strip().split(" ") + if hyperthreading == 'ON': + self.facts['processor_cores'] = int(data[0])/2 + else: + if len(data) == 1: + self.facts['processor_cores'] = self.facts['processor_count'] + else: + self.facts['processor_cores'] = int(data[0]) + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True) + self.facts['processor'] = out.strip() + else: + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True) + self.facts['processor_count'] = int(out.strip().split(" ")[0]) + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True) + self.facts['processor_cores'] = int(out.strip().split(" ")[0]) + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) + self.facts['processor'] = out.strip() + + def get_memory_facts(self): + pagesize = 4096 + rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True) + data = int(re.sub(' +',' ',out).split(' ')[5].strip()) + self.facts['memfree_mb'] = pagesize * data / 1024 / 1024 + if self.facts['architecture'] == '9000/800': + rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log") + data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip() + self.facts['memtotal_mb'] = int(data) / 1024 + else: + rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True) + data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip() + self.facts['memtotal_mb'] = int(data) + rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q") + self.facts['swaptotal_mb'] = int(out.strip()) + rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True) + swap = 0 + for line in out.strip().split('\n'): + swap += int(re.sub(' +',' ',line).split(' ')[3].strip()) + self.facts['swapfree_mb'] = swap + + def get_hw_facts(self): + rc, out, err = module.run_command("model") + self.facts['model'] = out.strip() + if self.facts['architecture'] == 'ia64': + rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True) + self.facts['firmware_version'] = out.split(':')[1].strip() + + +class Darwin(Hardware): + """ + Darwin-specific subclass of Hardware. Defines memory and CPU facts: + - processor + - processor_cores + - memtotal_mb + - memfree_mb + - model + - osversion + - osrevision + """ + platform = 'Darwin' + + def __init__(self): + Hardware.__init__(self) + + def populate(self): + self.sysctl = self.get_sysctl() + self.get_mac_facts() + self.get_cpu_facts() + self.get_memory_facts() + return self.facts + + def get_sysctl(self): + rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"]) + if rc != 0: + return dict() + sysctl = dict() + for line in out.splitlines(): + if line.rstrip("\n"): + (key, value) = re.split(' = |: ', line, maxsplit=1) + sysctl[key] = value.strip() + return sysctl + + def get_system_profile(self): + rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"]) + if rc != 0: + return dict() + system_profile = dict() + for line in out.splitlines(): + if ': ' in line: + (key, value) = line.split(': ', 1) + system_profile[key.strip()] = ' '.join(value.strip().split()) + return system_profile + + def get_mac_facts(self): + self.facts['model'] = self.sysctl['hw.model'] + self.facts['osversion'] = self.sysctl['kern.osversion'] + self.facts['osrevision'] = self.sysctl['kern.osrevision'] + + def get_cpu_facts(self): + if 'machdep.cpu.brand_string' in self.sysctl: # Intel + self.facts['processor'] = self.sysctl['machdep.cpu.brand_string'] + self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count'] + else: # PowerPC + system_profile = self.get_system_profile() + self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed']) + self.facts['processor_cores'] = self.sysctl['hw.physicalcpu'] + + def get_memory_facts(self): + self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024 + self.facts['memfree_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024 + +class Network(Facts): + """ + This is a generic Network subclass of Facts. This should be further + subclassed to implement per platform. If you subclass this, + you must define: + - interfaces (a list of interface names) + - interface_ dictionary of ipv4, ipv6, and mac address information. + + All subclasses MUST define platform. + """ + platform = 'Generic' + + IPV6_SCOPE = { '0' : 'global', + '10' : 'host', + '20' : 'link', + '40' : 'admin', + '50' : 'site', + '80' : 'organization' } + + def __new__(cls, *arguments, **keyword): + subclass = cls + for sc in Network.__subclasses__(): + if sc.platform == platform.system(): + subclass = sc + return super(cls, subclass).__new__(subclass, *arguments, **keyword) + + def __init__(self, module): + self.module = module + Facts.__init__(self) + + def populate(self): + return self.facts + +class LinuxNetwork(Network): + """ + This is a Linux-specific subclass of Network. It defines + - interfaces (a list of interface names) + - interface_ dictionary of ipv4, ipv6, and mac address information. + - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses. + - ipv4_address and ipv6_address: the first non-local address for each family. + """ + platform = 'Linux' + + def __init__(self, module): + Network.__init__(self, module) + + def populate(self): + ip_path = self.module.get_bin_path('ip') + if ip_path is None: + return self.facts + default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path) + interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6) + self.facts['interfaces'] = interfaces.keys() + for iface in interfaces: + self.facts[iface] = interfaces[iface] + self.facts['default_ipv4'] = default_ipv4 + self.facts['default_ipv6'] = default_ipv6 + self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses'] + self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses'] + return self.facts + + def get_default_interfaces(self, ip_path): + # Use the commands: + # ip -4 route get 8.8.8.8 -> Google public DNS + # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com + # to find out the default outgoing interface, address, and gateway + command = dict( + v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'], + v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012'] + ) + interface = dict(v4 = {}, v6 = {}) + for v in 'v4', 'v6': + if v == 'v6' and self.facts['os_family'] == 'RedHat' \ + and self.facts['distribution_version'].startswith('4.'): + continue + if v == 'v6' and not socket.has_ipv6: + continue + rc, out, err = module.run_command(command[v]) + if not out: + # v6 routing may result in + # RTNETLINK answers: Invalid argument + continue + words = out.split('\n')[0].split() + # A valid output starts with the queried address on the first line + if len(words) > 0 and words[0] == command[v][-1]: + for i in range(len(words) - 1): + if words[i] == 'dev': + interface[v]['interface'] = words[i+1] + elif words[i] == 'src': + interface[v]['address'] = words[i+1] + elif words[i] == 'via' and words[i+1] != command[v][-1]: + interface[v]['gateway'] = words[i+1] + return interface['v4'], interface['v6'] + + def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6): + interfaces = {} + ips = dict( + all_ipv4_addresses = [], + all_ipv6_addresses = [], + ) + + for path in glob.glob('/sys/class/net/*'): + if not os.path.isdir(path): + continue + device = os.path.basename(path) + interfaces[device] = { 'device': device } + if os.path.exists(os.path.join(path, 'address')): + macaddress = open(os.path.join(path, 'address')).read().strip() + if macaddress and macaddress != '00:00:00:00:00:00': + interfaces[device]['macaddress'] = macaddress + if os.path.exists(os.path.join(path, 'mtu')): + interfaces[device]['mtu'] = int(open(os.path.join(path, 'mtu')).read().strip()) + if os.path.exists(os.path.join(path, 'operstate')): + interfaces[device]['active'] = open(os.path.join(path, 'operstate')).read().strip() != 'down' +# if os.path.exists(os.path.join(path, 'carrier')): +# interfaces[device]['link'] = open(os.path.join(path, 'carrier')).read().strip() == '1' + if os.path.exists(os.path.join(path, 'device','driver', 'module')): + interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module'))) + if os.path.exists(os.path.join(path, 'type')): + type = open(os.path.join(path, 'type')).read().strip() + if type == '1': + interfaces[device]['type'] = 'ether' + elif type == '512': + interfaces[device]['type'] = 'ppp' + elif type == '772': + interfaces[device]['type'] = 'loopback' + if os.path.exists(os.path.join(path, 'bridge')): + interfaces[device]['type'] = 'bridge' + interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ] + if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')): + interfaces[device]['id'] = open(os.path.join(path, 'bridge', 'bridge_id')).read().strip() + if os.path.exists(os.path.join(path, 'bridge', 'stp_state')): + interfaces[device]['stp'] = open(os.path.join(path, 'bridge', 'stp_state')).read().strip() == '1' + if os.path.exists(os.path.join(path, 'bonding')): + interfaces[device]['type'] = 'bonding' + interfaces[device]['slaves'] = open(os.path.join(path, 'bonding', 'slaves')).read().split() + interfaces[device]['mode'] = open(os.path.join(path, 'bonding', 'mode')).read().split()[0] + interfaces[device]['miimon'] = open(os.path.join(path, 'bonding', 'miimon')).read().split()[0] + interfaces[device]['lacp_rate'] = open(os.path.join(path, 'bonding', 'lacp_rate')).read().split()[0] + primary = open(os.path.join(path, 'bonding', 'primary')).read() + if primary: + interfaces[device]['primary'] = primary + path = os.path.join(path, 'bonding', 'all_slaves_active') + if os.path.exists(path): + interfaces[device]['all_slaves_active'] = open(path).read() == '1' + + # Check whether a interface is in promiscuous mode + if os.path.exists(os.path.join(path,'flags')): + promisc_mode = False + # The second byte indicates whether the interface is in promiscuous mode. + # 1 = promisc + # 0 = no promisc + data = int(open(os.path.join(path, 'flags')).read().strip(),16) + promisc_mode = (data & 0x0100 > 0) + interfaces[device]['promisc'] = promisc_mode + + def parse_ip_output(output, secondary=False): + for line in output.split('\n'): + if not line: + continue + words = line.split() + if words[0] == 'inet': + if '/' in words[1]: + address, netmask_length = words[1].split('/') + else: + # pointopoint interfaces do not have a prefix + address = words[1] + netmask_length = "32" + address_bin = struct.unpack('!L', socket.inet_aton(address))[0] + netmask_bin = (1<<32) - (1<<32>>int(netmask_length)) + netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin)) + network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin)) + iface = words[-1] + if iface != device: + interfaces[iface] = {} + if not secondary or "ipv4" not in interfaces[iface]: + interfaces[iface]['ipv4'] = {'address': address, + 'netmask': netmask, + 'network': network} + else: + if "ipv4_secondaries" not in interfaces[iface]: + interfaces[iface]["ipv4_secondaries"] = [] + interfaces[iface]["ipv4_secondaries"].append({ + 'address': address, + 'netmask': netmask, + 'network': network, + }) + + # add this secondary IP to the main device + if secondary: + if "ipv4_secondaries" not in interfaces[device]: + interfaces[device]["ipv4_secondaries"] = [] + interfaces[device]["ipv4_secondaries"].append({ + 'address': address, + 'netmask': netmask, + 'network': network, + }) + + # If this is the default address, update default_ipv4 + if 'address' in default_ipv4 and default_ipv4['address'] == address: + default_ipv4['netmask'] = netmask + default_ipv4['network'] = network + default_ipv4['macaddress'] = macaddress + default_ipv4['mtu'] = interfaces[device]['mtu'] + default_ipv4['type'] = interfaces[device].get("type", "unknown") + default_ipv4['alias'] = words[-1] + if not address.startswith('127.'): + ips['all_ipv4_addresses'].append(address) + elif words[0] == 'inet6': + address, prefix = words[1].split('/') + scope = words[3] + if 'ipv6' not in interfaces[device]: + interfaces[device]['ipv6'] = [] + interfaces[device]['ipv6'].append({ + 'address' : address, + 'prefix' : prefix, + 'scope' : scope + }) + # If this is the default address, update default_ipv6 + if 'address' in default_ipv6 and default_ipv6['address'] == address: + default_ipv6['prefix'] = prefix + default_ipv6['scope'] = scope + default_ipv6['macaddress'] = macaddress + default_ipv6['mtu'] = interfaces[device]['mtu'] + default_ipv6['type'] = interfaces[device].get("type", "unknown") + if not address == '::1': + ips['all_ipv6_addresses'].append(address) + + ip_path = module.get_bin_path("ip") + + args = [ip_path, 'addr', 'show', 'primary', device] + rc, stdout, stderr = self.module.run_command(args) + primary_data = stdout + + args = [ip_path, 'addr', 'show', 'secondary', device] + rc, stdout, stderr = self.module.run_command(args) + secondary_data = stdout + + parse_ip_output(primary_data) + parse_ip_output(secondary_data, secondary=True) + + # replace : by _ in interface name since they are hard to use in template + new_interfaces = {} + for i in interfaces: + if ':' in i: + new_interfaces[i.replace(':','_')] = interfaces[i] + else: + new_interfaces[i] = interfaces[i] + return new_interfaces, ips + +class GenericBsdIfconfigNetwork(Network): + """ + This is a generic BSD subclass of Network using the ifconfig command. + It defines + - interfaces (a list of interface names) + - interface_ dictionary of ipv4, ipv6, and mac address information. + - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses. + It currently does not define + - default_ipv4 and default_ipv6 + - type, mtu and network on interfaces + """ + platform = 'Generic_BSD_Ifconfig' + + def __init__(self, module): + Network.__init__(self, module) + + def populate(self): + + ifconfig_path = module.get_bin_path('ifconfig') + + if ifconfig_path is None: + return self.facts + route_path = module.get_bin_path('route') + + if route_path is None: + return self.facts + + default_ipv4, default_ipv6 = self.get_default_interfaces(route_path) + interfaces, ips = self.get_interfaces_info(ifconfig_path) + self.merge_default_interface(default_ipv4, interfaces, 'ipv4') + self.merge_default_interface(default_ipv6, interfaces, 'ipv6') + self.facts['interfaces'] = interfaces.keys() + + for iface in interfaces: + self.facts[iface] = interfaces[iface] + + self.facts['default_ipv4'] = default_ipv4 + self.facts['default_ipv6'] = default_ipv6 + self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses'] + self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses'] + + return self.facts + + def get_default_interfaces(self, route_path): + + # Use the commands: + # route -n get 8.8.8.8 -> Google public DNS + # route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com + # to find out the default outgoing interface, address, and gateway + + command = dict( + v4 = [route_path, '-n', 'get', '8.8.8.8'], + v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012'] + ) + + interface = dict(v4 = {}, v6 = {}) + + for v in 'v4', 'v6': + + if v == 'v6' and not socket.has_ipv6: + continue + rc, out, err = module.run_command(command[v]) + if not out: + # v6 routing may result in + # RTNETLINK answers: Invalid argument + continue + lines = out.split('\n') + for line in lines: + words = line.split() + # Collect output from route command + if len(words) > 1: + if words[0] == 'interface:': + interface[v]['interface'] = words[1] + if words[0] == 'gateway:': + interface[v]['gateway'] = words[1] + + return interface['v4'], interface['v6'] + + def get_interfaces_info(self, ifconfig_path): + interfaces = {} + current_if = {} + ips = dict( + all_ipv4_addresses = [], + all_ipv6_addresses = [], + ) + # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a' + # when running the command 'ifconfig'. + # Solaris must explicitly run the command 'ifconfig -a'. + rc, out, err = module.run_command([ifconfig_path, '-a']) + + for line in out.split('\n'): + + if line: + words = line.split() + + if re.match('^\S', line) and len(words) > 3: + current_if = self.parse_interface_line(words) + interfaces[ current_if['device'] ] = current_if + elif words[0].startswith('options='): + self.parse_options_line(words, current_if, ips) + elif words[0] == 'nd6': + self.parse_nd6_line(words, current_if, ips) + elif words[0] == 'ether': + self.parse_ether_line(words, current_if, ips) + elif words[0] == 'media:': + self.parse_media_line(words, current_if, ips) + elif words[0] == 'status:': + self.parse_status_line(words, current_if, ips) + elif words[0] == 'lladdr': + self.parse_lladdr_line(words, current_if, ips) + elif words[0] == 'inet': + self.parse_inet_line(words, current_if, ips) + elif words[0] == 'inet6': + self.parse_inet6_line(words, current_if, ips) + else: + self.parse_unknown_line(words, current_if, ips) + + return interfaces, ips + + def parse_interface_line(self, words): + device = words[0][0:-1] + current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} + current_if['flags'] = self.get_options(words[1]) + current_if['mtu'] = words[3] + current_if['macaddress'] = 'unknown' # will be overwritten later + return current_if + + def parse_options_line(self, words, current_if, ips): + # Mac has options like this... + current_if['options'] = self.get_options(words[0]) + + def parse_nd6_line(self, words, current_if, ips): + # FreBSD has options like this... + current_if['options'] = self.get_options(words[1]) + + def parse_ether_line(self, words, current_if, ips): + current_if['macaddress'] = words[1] + + def parse_media_line(self, words, current_if, ips): + # not sure if this is useful - we also drop information + current_if['media'] = words[1] + if len(words) > 2: + current_if['media_select'] = words[2] + if len(words) > 3: + current_if['media_type'] = words[3][1:] + if len(words) > 4: + current_if['media_options'] = self.get_options(words[4]) + + def parse_status_line(self, words, current_if, ips): + current_if['status'] = words[1] + + def parse_lladdr_line(self, words, current_if, ips): + current_if['lladdr'] = words[1] + + def parse_inet_line(self, words, current_if, ips): + address = {'address': words[1]} + # deal with hex netmask + if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8: + words[3] = '0x' + words[3] + if words[3].startswith('0x'): + address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16))) + else: + # otherwise assume this is a dotted quad + address['netmask'] = words[3] + # calculate the network + address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0] + netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0] + address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin)) + # broadcast may be given or we need to calculate + if len(words) > 5: + address['broadcast'] = words[5] + else: + address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff))) + # add to our list of addresses + if not words[1].startswith('127.'): + ips['all_ipv4_addresses'].append(address['address']) + current_if['ipv4'].append(address) + + def parse_inet6_line(self, words, current_if, ips): + address = {'address': words[1]} + if (len(words) >= 4) and (words[2] == 'prefixlen'): + address['prefix'] = words[3] + if (len(words) >= 6) and (words[4] == 'scopeid'): + address['scope'] = words[5] + localhost6 = ['::1', '::1/128', 'fe80::1%lo0'] + if address['address'] not in localhost6: + ips['all_ipv6_addresses'].append(address['address']) + current_if['ipv6'].append(address) + + def parse_unknown_line(self, words, current_if, ips): + # we are going to ignore unknown lines here - this may be + # a bad idea - but you can override it in your subclass + pass + + def get_options(self, option_string): + start = option_string.find('<') + 1 + end = option_string.rfind('>') + if (start > 0) and (end > 0) and (end > start + 1): + option_csv = option_string[start:end] + return option_csv.split(',') + else: + return [] + + def merge_default_interface(self, defaults, interfaces, ip_type): + if not 'interface' in defaults.keys(): + return + if not defaults['interface'] in interfaces: + return + ifinfo = interfaces[defaults['interface']] + # copy all the interface values across except addresses + for item in ifinfo.keys(): + if item != 'ipv4' and item != 'ipv6': + defaults[item] = ifinfo[item] + if len(ifinfo[ip_type]) > 0: + for item in ifinfo[ip_type][0].keys(): + defaults[item] = ifinfo[ip_type][0][item] + +class DarwinNetwork(GenericBsdIfconfigNetwork, Network): + """ + This is the Mac OS X/Darwin Network Class. + It uses the GenericBsdIfconfigNetwork unchanged + """ + platform = 'Darwin' + + # media line is different to the default FreeBSD one + def parse_media_line(self, words, current_if, ips): + # not sure if this is useful - we also drop information + current_if['media'] = 'Unknown' # Mac does not give us this + current_if['media_select'] = words[1] + if len(words) > 2: + current_if['media_type'] = words[2][1:] + if len(words) > 3: + current_if['media_options'] = self.get_options(words[3]) + + +class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network): + """ + This is the FreeBSD Network Class. + It uses the GenericBsdIfconfigNetwork unchanged. + """ + platform = 'FreeBSD' + +class AIXNetwork(GenericBsdIfconfigNetwork, Network): + """ + This is the AIX Network Class. + It uses the GenericBsdIfconfigNetwork unchanged. + """ + platform = 'AIX' + + # AIX 'ifconfig -a' does not have three words in the interface line + def get_interfaces_info(self, ifconfig_path): + interfaces = {} + current_if = {} + ips = dict( + all_ipv4_addresses = [], + all_ipv6_addresses = [], + ) + rc, out, err = module.run_command([ifconfig_path, '-a']) + + for line in out.split('\n'): + + if line: + words = line.split() + + # only this condition differs from GenericBsdIfconfigNetwork + if re.match('^\w*\d*:', line): + current_if = self.parse_interface_line(words) + interfaces[ current_if['device'] ] = current_if + elif words[0].startswith('options='): + self.parse_options_line(words, current_if, ips) + elif words[0] == 'nd6': + self.parse_nd6_line(words, current_if, ips) + elif words[0] == 'ether': + self.parse_ether_line(words, current_if, ips) + elif words[0] == 'media:': + self.parse_media_line(words, current_if, ips) + elif words[0] == 'status:': + self.parse_status_line(words, current_if, ips) + elif words[0] == 'lladdr': + self.parse_lladdr_line(words, current_if, ips) + elif words[0] == 'inet': + self.parse_inet_line(words, current_if, ips) + elif words[0] == 'inet6': + self.parse_inet6_line(words, current_if, ips) + else: + self.parse_unknown_line(words, current_if, ips) + + return interfaces, ips + + # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here + def parse_interface_line(self, words): + device = words[0][0:-1] + current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} + current_if['flags'] = self.get_options(words[1]) + current_if['macaddress'] = 'unknown' # will be overwritten later + return current_if + +class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network): + """ + This is the OpenBSD Network Class. + It uses the GenericBsdIfconfigNetwork. + """ + platform = 'OpenBSD' + + # Return macaddress instead of lladdr + def parse_lladdr_line(self, words, current_if, ips): + current_if['macaddress'] = words[1] + +class SunOSNetwork(GenericBsdIfconfigNetwork, Network): + """ + This is the SunOS Network Class. + It uses the GenericBsdIfconfigNetwork. + + Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface + so these facts have been moved inside the 'ipv4' and 'ipv6' lists. + """ + platform = 'SunOS' + + # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6. + # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface. + # 'parse_interface_line()' checks for previously seen interfaces before defining + # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa). + def get_interfaces_info(self, ifconfig_path): + interfaces = {} + current_if = {} + ips = dict( + all_ipv4_addresses = [], + all_ipv6_addresses = [], + ) + rc, out, err = module.run_command([ifconfig_path, '-a']) + + for line in out.split('\n'): + + if line: + words = line.split() + + if re.match('^\S', line) and len(words) > 3: + current_if = self.parse_interface_line(words, current_if, interfaces) + interfaces[ current_if['device'] ] = current_if + elif words[0].startswith('options='): + self.parse_options_line(words, current_if, ips) + elif words[0] == 'nd6': + self.parse_nd6_line(words, current_if, ips) + elif words[0] == 'ether': + self.parse_ether_line(words, current_if, ips) + elif words[0] == 'media:': + self.parse_media_line(words, current_if, ips) + elif words[0] == 'status:': + self.parse_status_line(words, current_if, ips) + elif words[0] == 'lladdr': + self.parse_lladdr_line(words, current_if, ips) + elif words[0] == 'inet': + self.parse_inet_line(words, current_if, ips) + elif words[0] == 'inet6': + self.parse_inet6_line(words, current_if, ips) + else: + self.parse_unknown_line(words, current_if, ips) + + # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the + # ipv4/ipv6 lists which is ugly and hard to read. + # This quick hack merges the dictionaries. Purely cosmetic. + for iface in interfaces: + for v in 'ipv4', 'ipv6': + combined_facts = {} + for facts in interfaces[iface][v]: + combined_facts.update(facts) + if len(combined_facts.keys()) > 0: + interfaces[iface][v] = [combined_facts] + + return interfaces, ips + + def parse_interface_line(self, words, current_if, interfaces): + device = words[0][0:-1] + if device not in interfaces.keys(): + current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} + else: + current_if = interfaces[device] + flags = self.get_options(words[1]) + if 'IPv4' in flags: + v = 'ipv4' + if 'IPv6' in flags: + v = 'ipv6' + current_if[v].append({'flags': flags, 'mtu': words[3]}) + current_if['macaddress'] = 'unknown' # will be overwritten later + return current_if + + # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f + # Add leading zero to each octet where needed. + def parse_ether_line(self, words, current_if, ips): + macaddress = '' + for octet in words[1].split(':'): + octet = ('0' + octet)[-2:None] + macaddress += (octet + ':') + current_if['macaddress'] = macaddress[0:-1] + +class Virtual(Facts): + """ + This is a generic Virtual subclass of Facts. This should be further + subclassed to implement per platform. If you subclass this, + you should define: + - virtualization_type + - virtualization_role + - container (e.g. solaris zones, freebsd jails, linux containers) + + All subclasses MUST define platform. + """ + + def __new__(cls, *arguments, **keyword): + subclass = cls + for sc in Virtual.__subclasses__(): + if sc.platform == platform.system(): + subclass = sc + return super(cls, subclass).__new__(subclass, *arguments, **keyword) + + def __init__(self): + Facts.__init__(self) + + def populate(self): + return self.facts + +class LinuxVirtual(Virtual): + """ + This is a Linux-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'Linux' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + # For more information, check: http://people.redhat.com/~rjones/virt-what/ + def get_virtual_facts(self): + if os.path.exists("/proc/xen"): + self.facts['virtualization_type'] = 'xen' + self.facts['virtualization_role'] = 'guest' + try: + for line in open('/proc/xen/capabilities'): + if "control_d" in line: + self.facts['virtualization_role'] = 'host' + except IOError: + pass + return + + if os.path.exists('/proc/vz'): + self.facts['virtualization_type'] = 'openvz' + if os.path.exists('/proc/bc'): + self.facts['virtualization_role'] = 'host' + else: + self.facts['virtualization_role'] = 'guest' + return + + if os.path.exists('/proc/1/cgroup'): + for line in open('/proc/1/cgroup').readlines(): + if re.search('/lxc/', line): + self.facts['virtualization_type'] = 'lxc' + self.facts['virtualization_role'] = 'guest' + return + + product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name') + + if product_name in ['KVM', 'Bochs']: + self.facts['virtualization_type'] = 'kvm' + self.facts['virtualization_role'] = 'guest' + return + + if product_name == 'RHEV Hypervisor': + self.facts['virtualization_type'] = 'RHEV' + self.facts['virtualization_role'] = 'guest' + return + + if product_name == 'VMware Virtual Platform': + self.facts['virtualization_type'] = 'VMware' + self.facts['virtualization_role'] = 'guest' + return + + bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') + + if bios_vendor == 'Xen': + self.facts['virtualization_type'] = 'xen' + self.facts['virtualization_role'] = 'guest' + return + + if bios_vendor == 'innotek GmbH': + self.facts['virtualization_type'] = 'virtualbox' + self.facts['virtualization_role'] = 'guest' + return + + sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor') + + # FIXME: This does also match hyperv + if sys_vendor == 'Microsoft Corporation': + self.facts['virtualization_type'] = 'VirtualPC' + self.facts['virtualization_role'] = 'guest' + return + + if sys_vendor == 'Parallels Software International Inc.': + self.facts['virtualization_type'] = 'parallels' + self.facts['virtualization_role'] = 'guest' + return + + if os.path.exists('/proc/self/status'): + for line in open('/proc/self/status').readlines(): + if re.match('^VxID: \d+', line): + self.facts['virtualization_type'] = 'linux_vserver' + if re.match('^VxID: 0', line): + self.facts['virtualization_role'] = 'host' + else: + self.facts['virtualization_role'] = 'guest' + return + + if os.path.exists('/proc/cpuinfo'): + for line in open('/proc/cpuinfo').readlines(): + if re.match('^model name.*QEMU Virtual CPU', line): + self.facts['virtualization_type'] = 'kvm' + elif re.match('^vendor_id.*User Mode Linux', line): + self.facts['virtualization_type'] = 'uml' + elif re.match('^model name.*UML', line): + self.facts['virtualization_type'] = 'uml' + elif re.match('^vendor_id.*PowerVM Lx86', line): + self.facts['virtualization_type'] = 'powervm_lx86' + elif re.match('^vendor_id.*IBM/S390', line): + self.facts['virtualization_type'] = 'ibm_systemz' + else: + continue + self.facts['virtualization_role'] = 'guest' + return + + # Beware that we can have both kvm and virtualbox running on a single system + if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK): + modules = [] + for line in open("/proc/modules").readlines(): + data = line.split(" ", 1) + modules.append(data[0]) + + if 'kvm' in modules: + self.facts['virtualization_type'] = 'kvm' + self.facts['virtualization_role'] = 'host' + return + + if 'vboxdrv' in modules: + self.facts['virtualization_type'] = 'virtualbox' + self.facts['virtualization_role'] = 'host' + return + + # If none of the above matches, return 'NA' for virtualization_type + # and virtualization_role. This allows for proper grouping. + self.facts['virtualization_type'] = 'NA' + self.facts['virtualization_role'] = 'NA' + return + + +class HPUXVirtual(Virtual): + """ + This is a HP-UX specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'HP-UX' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + if os.path.exists('/usr/sbin/vecheck'): + rc, out, err = module.run_command("/usr/sbin/vecheck") + if rc == 0: + self.facts['virtualization_type'] = 'guest' + self.facts['virtualization_role'] = 'HP vPar' + if os.path.exists('/opt/hpvm/bin/hpvminfo'): + rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo") + if rc == 0 and re.match('.*Running.*HPVM vPar.*', out): + self.facts['virtualization_type'] = 'guest' + self.facts['virtualization_role'] = 'HPVM vPar' + elif rc == 0 and re.match('.*Running.*HPVM guest.*', out): + self.facts['virtualization_type'] = 'guest' + self.facts['virtualization_role'] = 'HPVM IVM' + elif rc == 0 and re.match('.*Running.*HPVM host.*', out): + self.facts['virtualization_type'] = 'host' + self.facts['virtualization_role'] = 'HPVM' + if os.path.exists('/usr/sbin/parstatus'): + rc, out, err = module.run_command("/usr/sbin/parstatus") + if rc == 0: + self.facts['virtualization_type'] = 'guest' + self.facts['virtualization_role'] = 'HP nPar' + + +class SunOSVirtual(Virtual): + """ + This is a SunOS-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + - container + """ + platform = 'SunOS' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + rc, out, err = module.run_command("/usr/sbin/prtdiag") + for line in out.split('\n'): + if 'VMware' in line: + self.facts['virtualization_type'] = 'vmware' + self.facts['virtualization_role'] = 'guest' + if 'Parallels' in line: + self.facts['virtualization_type'] = 'parallels' + self.facts['virtualization_role'] = 'guest' + if 'VirtualBox' in line: + self.facts['virtualization_type'] = 'virtualbox' + self.facts['virtualization_role'] = 'guest' + if 'HVM domU' in line: + self.facts['virtualization_type'] = 'xen' + self.facts['virtualization_role'] = 'guest' + # Check if it's a zone + if os.path.exists("/usr/bin/zonename"): + rc, out, err = module.run_command("/usr/bin/zonename") + if out.rstrip() != "global": + self.facts['container'] = 'zone' + # Check if it's a branded zone (i.e. Solaris 8/9 zone) + if os.path.isdir('/.SUNWnative'): + self.facts['container'] = 'zone' + # If it's a zone check if we can detect if our global zone is itself virtualized. + # Relies on the "guest tools" (e.g. vmware tools) to be installed + if 'container' in self.facts and self.facts['container'] == 'zone': + rc, out, err = module.run_command("/usr/sbin/modinfo") + for line in out.split('\n'): + if 'VMware' in line: + self.facts['virtualization_type'] = 'vmware' + self.facts['virtualization_role'] = 'guest' + if 'VirtualBox' in line: + self.facts['virtualization_type'] = 'virtualbox' + self.facts['virtualization_role'] = 'guest' + +def get_file_content(path, default=None): + data = default + if os.path.exists(path) and os.access(path, os.R_OK): + data = open(path).read().strip() + if len(data) == 0: + data = default + return data + +def ansible_facts(module): + facts = {} + facts.update(Facts().populate()) + facts.update(Hardware().populate()) + facts.update(Network(module).populate()) + facts.update(Virtual().populate()) + return facts + +# =========================================== + +def get_all_facts(module): + + setup_options = dict(module_setup=True) + facts = ansible_facts(module) + + for (k, v) in facts.items(): + setup_options["ansible_%s" % k.replace('-', '_')] = v + + # Look for the path to the facter and ohai binary and set + # the variable to that path. + + facter_path = module.get_bin_path('facter') + ohai_path = module.get_bin_path('ohai') + + # if facter is installed, and we can use --json because + # ruby-json is ALSO installed, include facter data in the JSON + + if facter_path is not None: + rc, out, err = module.run_command(facter_path + " --json") + facter = True + try: + facter_ds = json.loads(out) + except: + facter = False + if facter: + for (k,v) in facter_ds.items(): + setup_options["facter_%s" % k] = v + + # ditto for ohai + + if ohai_path is not None: + rc, out, err = module.run_command(ohai_path) + ohai = True + try: + ohai_ds = json.loads(out) + except: + ohai = False + if ohai: + for (k,v) in ohai_ds.items(): + k2 = "ohai_%s" % k.replace('-', '_') + setup_options[k2] = v + + setup_result = { 'ansible_facts': {} } + + for (k,v) in setup_options.items(): + if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']): + setup_result['ansible_facts'][k] = v + + # hack to keep --verbose from showing all the setup module results + setup_result['verbose_override'] = True + + return setup_result + diff --git a/library/system/setup b/library/system/setup index ae03ad8d74d..cc3a5855f1e 100644 --- a/library/system/setup +++ b/library/system/setup @@ -18,21 +18,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import array -import fcntl -import fnmatch -import glob -import platform -import re -import socket -import struct -import datetime -import getpass -import ConfigParser -import StringIO - - DOCUMENTATION = ''' --- module: setup @@ -86,2222 +71,6 @@ ansible all -m setup -a 'filter=facter_*' ansible all -m setup -a 'filter=ansible_eth[0-2]' """ -try: - import selinux - HAVE_SELINUX=True -except ImportError: - HAVE_SELINUX=False - -try: - import json -except ImportError: - import simplejson as json - -class Facts(object): - """ - This class should only attempt to populate those facts that - are mostly generic to all systems. This includes platform facts, - service facts (eg. ssh keys or selinux), and distribution facts. - Anything that requires extensive code or may have more than one - possible implementation to establish facts for a given topic should - subclass Facts. - """ - - _I386RE = re.compile(r'i[3456]86') - # For the most part, we assume that platform.dist() will tell the truth. - # This is the fallback to handle unknowns or exceptions - OSDIST_DICT = { '/etc/redhat-release': 'RedHat', - '/etc/vmware-release': 'VMwareESX', - '/etc/openwrt_release': 'OpenWrt', - '/etc/system-release': 'OtherLinux', - '/etc/alpine-release': 'Alpine', - '/etc/release': 'Solaris', - '/etc/arch-release': 'Archlinux', - '/etc/SuSE-release': 'SuSE', - '/etc/gentoo-release': 'Gentoo', - '/etc/os-release': 'Debian' } - SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } - - # A list of dicts. If there is a platform with more than one - # package manager, put the preferred one last. If there is an - # ansible module, use that as the value for the 'name' key. - PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' }, - { 'path' : '/usr/bin/apt-get', 'name' : 'apt' }, - { 'path' : '/usr/bin/zypper', 'name' : 'zypper' }, - { 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' }, - { 'path' : '/usr/bin/pacman', 'name' : 'pacman' }, - { 'path' : '/bin/opkg', 'name' : 'opkg' }, - { 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' }, - { 'path' : '/opt/local/bin/port', 'name' : 'macports' }, - { 'path' : '/sbin/apk', 'name' : 'apk' }, - { 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' }, - { 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' }, - { 'path' : '/usr/bin/emerge', 'name' : 'portage' }, - ] - - def __init__(self): - self.facts = {} - self.get_platform_facts() - self.get_distribution_facts() - self.get_cmdline() - self.get_public_ssh_host_keys() - self.get_selinux_facts() - self.get_pkg_mgr_facts() - self.get_lsb_facts() - self.get_date_time_facts() - self.get_user_facts() - self.get_local_facts() - self.get_env_facts() - - def populate(self): - return self.facts - - # Platform - # platform.system() can be Linux, Darwin, Java, or Windows - def get_platform_facts(self): - self.facts['system'] = platform.system() - self.facts['kernel'] = platform.release() - self.facts['machine'] = platform.machine() - self.facts['python_version'] = platform.python_version() - self.facts['fqdn'] = socket.getfqdn() - self.facts['hostname'] = platform.node().split('.')[0] - self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:]) - arch_bits = platform.architecture()[0] - self.facts['userspace_bits'] = arch_bits.replace('bit', '') - if self.facts['machine'] == 'x86_64': - self.facts['architecture'] = self.facts['machine'] - if self.facts['userspace_bits'] == '64': - self.facts['userspace_architecture'] = 'x86_64' - elif self.facts['userspace_bits'] == '32': - self.facts['userspace_architecture'] = 'i386' - elif Facts._I386RE.search(self.facts['machine']): - self.facts['architecture'] = 'i386' - if self.facts['userspace_bits'] == '64': - self.facts['userspace_architecture'] = 'x86_64' - elif self.facts['userspace_bits'] == '32': - self.facts['userspace_architecture'] = 'i386' - else: - self.facts['architecture'] = self.facts['machine'] - if self.facts['system'] == 'Linux': - self.get_distribution_facts() - elif self.facts['system'] == 'AIX': - rc, out, err = module.run_command("/usr/sbin/bootinfo -p") - data = out.split('\n') - self.facts['architecture'] = data[0] - - - def get_local_facts(self): - - fact_path = module.params.get('fact_path', None) - if not fact_path or not os.path.exists(fact_path): - return - - local = {} - for fn in sorted(glob.glob(fact_path + '/*.fact')): - # where it will sit under local facts - fact_base = os.path.basename(fn).replace('.fact','') - if os.access(fn, os.X_OK): - # run it - # try to read it as json first - # if that fails read it with ConfigParser - # if that fails, skip it - rc, out, err = module.run_command(fn) - else: - out = open(fn).read() - - # load raw json - fact = 'loading %s' % fact_base - try: - fact = json.loads(out) - except ValueError, e: - # load raw ini - cp = ConfigParser.ConfigParser() - try: - cp.readfp(StringIO.StringIO(out)) - except ConfigParser.Error, e: - fact="error loading fact - please check content" - else: - fact = {} - #print cp.sections() - for sect in cp.sections(): - if sect not in fact: - fact[sect] = {} - for opt in cp.options(sect): - val = cp.get(sect, opt) - fact[sect][opt]=val - - local[fact_base] = fact - if not local: - return - self.facts['local'] = local - - # platform.dist() is deprecated in 2.6 - # in 2.6 and newer, you should use platform.linux_distribution() - def get_distribution_facts(self): - - # A list with OS Family members - OS_FAMILY = dict( - RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat', - SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat', - OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', - XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse', - SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo', - Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', - Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris', - SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin', - FreeBSD = 'FreeBSD', HPUX = 'HP-UX' - ) - - if self.facts['system'] == 'AIX': - self.facts['distribution'] = 'AIX' - rc, out, err = module.run_command("/usr/bin/oslevel") - data = out.split('.') - self.facts['distribution_version'] = data[0] - self.facts['distribution_release'] = data[1] - elif self.facts['system'] == 'HP-UX': - self.facts['distribution'] = 'HP-UX' - rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True) - data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out) - if data: - self.facts['distribution_version'] = data.groups()[0] - self.facts['distribution_release'] = data.groups()[1] - elif self.facts['system'] == 'Darwin': - self.facts['distribution'] = 'MacOSX' - rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion") - data = out.split()[-1] - self.facts['distribution_version'] = data - elif self.facts['system'] == 'FreeBSD': - self.facts['distribution'] = 'FreeBSD' - self.facts['distribution_release'] = platform.release() - self.facts['distribution_version'] = platform.version() - elif self.facts['system'] == 'OpenBSD': - self.facts['distribution'] = 'OpenBSD' - self.facts['distribution_release'] = platform.release() - rc, out, err = module.run_command("/sbin/sysctl -n kern.version") - match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out) - if match: - self.facts['distribution_version'] = match.groups()[0] - else: - self.facts['distribution_version'] = 'release' - else: - dist = platform.dist() - self.facts['distribution'] = dist[0].capitalize() or 'NA' - self.facts['distribution_version'] = dist[1] or 'NA' - self.facts['distribution_release'] = dist[2] or 'NA' - # Try to handle the exceptions now ... - for (path, name) in Facts.OSDIST_DICT.items(): - if os.path.exists(path): - if self.facts['distribution'] == 'Fedora': - pass - elif name == 'RedHat': - data = get_file_content(path) - if 'Red Hat' in data: - self.facts['distribution'] = name - else: - self.facts['distribution'] = data.split()[0] - elif name == 'OtherLinux': - data = get_file_content(path) - if 'Amazon' in data: - self.facts['distribution'] = 'Amazon' - self.facts['distribution_version'] = data.split()[-1] - elif name == 'OpenWrt': - data = get_file_content(path) - if 'OpenWrt' in data: - self.facts['distribution'] = name - version = re.search('DISTRIB_RELEASE="(.*)"', data) - if version: - self.facts['distribution_version'] = version.groups()[0] - release = re.search('DISTRIB_CODENAME="(.*)"', data) - if release: - self.facts['distribution_release'] = release.groups()[0] - elif name == 'Alpine': - data = get_file_content(path) - self.facts['distribution'] = 'Alpine' - self.facts['distribution_version'] = data - elif name == 'Solaris': - data = get_file_content(path).split('\n')[0] - ora_prefix = '' - if 'Oracle Solaris' in data: - data = data.replace('Oracle ','') - ora_prefix = 'Oracle ' - self.facts['distribution'] = data.split()[0] - self.facts['distribution_version'] = data.split()[1] - self.facts['distribution_release'] = ora_prefix + data - elif name == 'SuSE': - data = get_file_content(path).splitlines() - self.facts['distribution_release'] = data[2].split('=')[1].strip() - elif name == 'Debian': - data = get_file_content(path).split('\n')[0] - release = re.search("PRETTY_NAME.+ \(?([^ ]+?)\)?\"", data) - if release: - self.facts['distribution_release'] = release.groups()[0] - else: - self.facts['distribution'] = name - - self.facts['os_family'] = self.facts['distribution'] - if self.facts['distribution'] in OS_FAMILY: - self.facts['os_family'] = OS_FAMILY[self.facts['distribution']] - - def get_cmdline(self): - data = get_file_content('/proc/cmdline') - if data: - self.facts['cmdline'] = {} - for piece in shlex.split(data): - item = piece.split('=', 1) - if len(item) == 1: - self.facts['cmdline'][item[0]] = True - else: - self.facts['cmdline'][item[0]] = item[1] - - def get_public_ssh_host_keys(self): - dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub' - rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub' - ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub' - - if self.facts['system'] == 'Darwin': - dsa_filename = '/etc/ssh_host_dsa_key.pub' - rsa_filename = '/etc/ssh_host_rsa_key.pub' - ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub' - dsa = get_file_content(dsa_filename) - rsa = get_file_content(rsa_filename) - ecdsa = get_file_content(ecdsa_filename) - if dsa is None: - dsa = 'NA' - else: - self.facts['ssh_host_key_dsa_public'] = dsa.split()[1] - if rsa is None: - rsa = 'NA' - else: - self.facts['ssh_host_key_rsa_public'] = rsa.split()[1] - if ecdsa is None: - ecdsa = 'NA' - else: - self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1] - - def get_pkg_mgr_facts(self): - self.facts['pkg_mgr'] = 'unknown' - for pkg in Facts.PKG_MGRS: - if os.path.exists(pkg['path']): - self.facts['pkg_mgr'] = pkg['name'] - if self.facts['system'] == 'OpenBSD': - self.facts['pkg_mgr'] = 'openbsd_pkg' - - def get_lsb_facts(self): - lsb_path = module.get_bin_path('lsb_release') - if lsb_path: - rc, out, err = module.run_command([lsb_path, "-a"]) - if rc == 0: - self.facts['lsb'] = {} - for line in out.split('\n'): - if len(line) < 1: - continue - value = line.split(':', 1)[1].strip() - if 'LSB Version:' in line: - self.facts['lsb']['release'] = value - elif 'Distributor ID:' in line: - self.facts['lsb']['id'] = value - elif 'Description:' in line: - self.facts['lsb']['description'] = value - elif 'Release:' in line: - self.facts['lsb']['release'] = value - elif 'Codename:' in line: - self.facts['lsb']['codename'] = value - if 'lsb' in self.facts and 'release' in self.facts['lsb']: - self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0] - elif lsb_path is None and os.path.exists('/etc/lsb-release'): - self.facts['lsb'] = {} - f = open('/etc/lsb-release', 'r') - try: - for line in f.readlines(): - value = line.split('=',1)[1].strip() - if 'DISTRIB_ID' in line: - self.facts['lsb']['id'] = value - elif 'DISTRIB_RELEASE' in line: - self.facts['lsb']['release'] = value - elif 'DISTRIB_DESCRIPTION' in line: - self.facts['lsb']['description'] = value - elif 'DISTRIB_CODENAME' in line: - self.facts['lsb']['codename'] = value - finally: - f.close() - else: - return self.facts - - if 'lsb' in self.facts and 'release' in self.facts['lsb']: - self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0] - - - def get_selinux_facts(self): - if not HAVE_SELINUX: - self.facts['selinux'] = False - return - self.facts['selinux'] = {} - if not selinux.is_selinux_enabled(): - self.facts['selinux']['status'] = 'disabled' - else: - self.facts['selinux']['status'] = 'enabled' - try: - self.facts['selinux']['policyvers'] = selinux.security_policyvers() - except OSError, e: - self.facts['selinux']['policyvers'] = 'unknown' - try: - (rc, configmode) = selinux.selinux_getenforcemode() - if rc == 0: - self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown') - else: - self.facts['selinux']['config_mode'] = 'unknown' - except OSError, e: - self.facts['selinux']['config_mode'] = 'unknown' - try: - mode = selinux.security_getenforce() - self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown') - except OSError, e: - self.facts['selinux']['mode'] = 'unknown' - try: - (rc, policytype) = selinux.selinux_getpolicytype() - if rc == 0: - self.facts['selinux']['type'] = policytype - else: - self.facts['selinux']['type'] = 'unknown' - except OSError, e: - self.facts['selinux']['type'] = 'unknown' - - - def get_date_time_facts(self): - self.facts['date_time'] = {} - - now = datetime.datetime.now() - self.facts['date_time']['year'] = now.strftime('%Y') - self.facts['date_time']['month'] = now.strftime('%m') - self.facts['date_time']['day'] = now.strftime('%d') - self.facts['date_time']['hour'] = now.strftime('%H') - self.facts['date_time']['minute'] = now.strftime('%M') - self.facts['date_time']['second'] = now.strftime('%S') - self.facts['date_time']['epoch'] = now.strftime('%s') - if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%': - self.facts['date_time']['epoch'] = str(int(time.time())) - self.facts['date_time']['date'] = now.strftime('%Y-%m-%d') - self.facts['date_time']['time'] = now.strftime('%H:%M:%S') - self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ") - self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") - self.facts['date_time']['tz'] = time.strftime("%Z") - self.facts['date_time']['tz_offset'] = time.strftime("%z") - - - # User - def get_user_facts(self): - self.facts['user_id'] = getpass.getuser() - - def get_env_facts(self): - self.facts['env'] = {} - for k,v in os.environ.iteritems(): - self.facts['env'][k] = v - -class Hardware(Facts): - """ - This is a generic Hardware subclass of Facts. This should be further - subclassed to implement per platform. If you subclass this, it - should define: - - memfree_mb - - memtotal_mb - - swapfree_mb - - swaptotal_mb - - processor (a list) - - processor_cores - - processor_count - - All subclasses MUST define platform. - """ - platform = 'Generic' - - def __new__(cls, *arguments, **keyword): - subclass = cls - for sc in Hardware.__subclasses__(): - if sc.platform == platform.system(): - subclass = sc - return super(cls, subclass).__new__(subclass, *arguments, **keyword) - - def __init__(self): - Facts.__init__(self) - - def populate(self): - return self.facts - -class LinuxHardware(Hardware): - """ - Linux-specific subclass of Hardware. Defines memory and CPU facts: - - memfree_mb - - memtotal_mb - - swapfree_mb - - swaptotal_mb - - processor (a list) - - processor_cores - - processor_count - - In addition, it also defines number of DMI facts and device facts. - """ - - platform = 'Linux' - MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'] - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.get_cpu_facts() - self.get_memory_facts() - self.get_dmi_facts() - self.get_device_facts() - self.get_mount_facts() - return self.facts - - def get_memory_facts(self): - if not os.access("/proc/meminfo", os.R_OK): - return - for line in open("/proc/meminfo").readlines(): - data = line.split(":", 1) - key = data[0] - if key in LinuxHardware.MEMORY_FACTS: - val = data[1].strip().split(' ')[0] - self.facts["%s_mb" % key.lower()] = long(val) / 1024 - - def get_cpu_facts(self): - i = 0 - physid = 0 - coreid = 0 - sockets = {} - cores = {} - if not os.access("/proc/cpuinfo", os.R_OK): - return - self.facts['processor'] = [] - for line in open("/proc/cpuinfo").readlines(): - data = line.split(":", 1) - key = data[0].strip() - # model name is for Intel arch, Processor (mind the uppercase P) - # works for some ARM devices, like the Sheevaplug. - if key == 'model name' or key == 'Processor': - if 'processor' not in self.facts: - self.facts['processor'] = [] - self.facts['processor'].append(data[1].strip()) - i += 1 - elif key == 'physical id': - physid = data[1].strip() - if physid not in sockets: - sockets[physid] = 1 - elif key == 'core id': - coreid = data[1].strip() - if coreid not in sockets: - cores[coreid] = 1 - elif key == 'cpu cores': - sockets[physid] = int(data[1].strip()) - elif key == 'siblings': - cores[coreid] = int(data[1].strip()) - self.facts['processor_count'] = sockets and len(sockets) or i - self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1 - self.facts['processor_threads_per_core'] = ((cores.values() and - cores.values()[0] or 1) / self.facts['processor_cores']) - self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] * - self.facts['processor_count'] * self.facts['processor_cores']) - - def get_dmi_facts(self): - ''' learn dmi facts from system - - Try /sys first for dmi related facts. - If that is not available, fall back to dmidecode executable ''' - - if os.path.exists('/sys/devices/virtual/dmi/id/product_name'): - # Use kernel DMI info, if available - - # DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf - FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop", - "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower", - "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station", - "All In One", "Sub Notebook", "Space-saving", "Lunch Box", - "Main Server Chassis", "Expansion Chassis", "Sub Chassis", - "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis", - "Rack Mount Chassis", "Sealed-case PC", "Multi-system", - "CompactPCI", "AdvancedTCA", "Blade" ] - - DMI_DICT = { - 'bios_date': '/sys/devices/virtual/dmi/id/bios_date', - 'bios_version': '/sys/devices/virtual/dmi/id/bios_version', - 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type', - 'product_name': '/sys/devices/virtual/dmi/id/product_name', - 'product_serial': '/sys/devices/virtual/dmi/id/product_serial', - 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid', - 'product_version': '/sys/devices/virtual/dmi/id/product_version', - 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor' - } - - for (key,path) in DMI_DICT.items(): - data = get_file_content(path) - if data is not None: - if key == 'form_factor': - try: - self.facts['form_factor'] = FORM_FACTOR[int(data)] - except IndexError, e: - self.facts['form_factor'] = 'unknown (%s)' % data - else: - self.facts[key] = data - else: - self.facts[key] = 'NA' - - else: - # Fall back to using dmidecode, if available - dmi_bin = module.get_bin_path('dmidecode') - DMI_DICT = { - 'bios_date': 'bios-release-date', - 'bios_version': 'bios-version', - 'form_factor': 'chassis-type', - 'product_name': 'system-product-name', - 'product_serial': 'system-serial-number', - 'product_uuid': 'system-uuid', - 'product_version': 'system-version', - 'system_vendor': 'system-manufacturer' - } - for (k, v) in DMI_DICT.items(): - if dmi_bin is not None: - (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v)) - if rc == 0: - # Strip out commented lines (specific dmidecode output) - thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ]) - try: - json.dumps(thisvalue) - except UnicodeDecodeError: - thisvalue = "NA" - - self.facts[k] = thisvalue - else: - self.facts[k] = 'NA' - else: - self.facts[k] = 'NA' - - def get_mount_facts(self): - self.facts['mounts'] = [] - mtab = get_file_content('/etc/mtab', '') - for line in mtab.split('\n'): - if line.startswith('/'): - fields = line.rstrip('\n').split() - if(fields[2] != 'none'): - size_total = None - size_available = None - try: - statvfs_result = os.statvfs(fields[1]) - size_total = statvfs_result.f_bsize * statvfs_result.f_blocks - size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail) - except OSError, e: - continue - - self.facts['mounts'].append( - {'mount': fields[1], - 'device':fields[0], - 'fstype': fields[2], - 'options': fields[3], - # statvfs data - 'size_total': size_total, - 'size_available': size_available, - }) - - def get_device_facts(self): - self.facts['devices'] = {} - lspci = module.get_bin_path('lspci') - if lspci: - rc, pcidata, err = module.run_command([lspci, '-D']) - else: - pcidata = None - - try: - block_devs = os.listdir("/sys/block") - except OSError: - return - - for block in block_devs: - virtual = 1 - sysfs_no_links = 0 - try: - path = os.readlink(os.path.join("/sys/block/", block)) - except OSError, e: - if e.errno == errno.EINVAL: - path = block - sysfs_no_links = 1 - else: - continue - if "virtual" in path: - continue - sysdir = os.path.join("/sys/block", path) - if sysfs_no_links == 1: - for folder in os.listdir(sysdir): - if "device" in folder: - virtual = 0 - break - if virtual: - continue - d = {} - diskname = os.path.basename(sysdir) - for key in ['vendor', 'model']: - d[key] = get_file_content(sysdir + "/device/" + key) - - for key,test in [ ('removable','/removable'), \ - ('support_discard','/queue/discard_granularity'), - ]: - d[key] = get_file_content(sysdir + test) - - d['partitions'] = {} - for folder in os.listdir(sysdir): - m = re.search("(" + diskname + "\d+)", folder) - if m: - part = {} - partname = m.group(1) - part_sysdir = sysdir + "/" + partname - - part['start'] = get_file_content(part_sysdir + "/start",0) - part['sectors'] = get_file_content(part_sysdir + "/size",0) - part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512) - part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize']))) - d['partitions'][partname] = part - - d['rotational'] = get_file_content(sysdir + "/queue/rotational") - d['scheduler_mode'] = "" - scheduler = get_file_content(sysdir + "/queue/scheduler") - if scheduler is not None: - m = re.match(".*?(\[(.*)\])", scheduler) - if m: - d['scheduler_mode'] = m.group(2) - - d['sectors'] = get_file_content(sysdir + "/size") - if not d['sectors']: - d['sectors'] = 0 - d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size") - if not d['sectorsize']: - d['sectorsize'] = 512 - d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize'])) - - d['host'] = "" - - # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7). - m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir) - if m and pcidata: - pciid = m.group(1) - did = re.escape(pciid) - m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE) - d['host'] = m.group(1) - - d['holders'] = [] - if os.path.isdir(sysdir + "/holders"): - for folder in os.listdir(sysdir + "/holders"): - if not folder.startswith("dm-"): - continue - name = get_file_content(sysdir + "/holders/" + folder + "/dm/name") - if name: - d['holders'].append(name) - else: - d['holders'].append(folder) - - self.facts['devices'][diskname] = d - - -class SunOSHardware(Hardware): - """ - In addition to the generic memory and cpu facts, this also sets - swap_reserved_mb and swap_allocated_mb that is available from *swap -s*. - """ - platform = 'SunOS' - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.get_cpu_facts() - self.get_memory_facts() - return self.facts - - def get_cpu_facts(self): - physid = 0 - sockets = {} - rc, out, err = module.run_command("/usr/bin/kstat cpu_info") - self.facts['processor'] = [] - for line in out.split('\n'): - if len(line) < 1: - continue - data = line.split(None, 1) - key = data[0].strip() - # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9. - if key == 'module:': - brand = '' - elif key == 'brand': - brand = data[1].strip() - elif key == 'clock_MHz': - clock_mhz = data[1].strip() - elif key == 'implementation': - processor = brand or data[1].strip() - # Add clock speed to description for SPARC CPU - if self.facts['machine'] != 'i86pc': - processor += " @ " + clock_mhz + "MHz" - if 'processor' not in self.facts: - self.facts['processor'] = [] - self.facts['processor'].append(processor) - elif key == 'chip_id': - physid = data[1].strip() - if physid not in sockets: - sockets[physid] = 1 - else: - sockets[physid] += 1 - # Counting cores on Solaris can be complicated. - # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu - # Treat 'processor_count' as physical sockets and 'processor_cores' as - # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as - # these processors have: sockets -> cores -> threads/virtual CPU. - if len(sockets) > 0: - self.facts['processor_count'] = len(sockets) - self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values()) - else: - self.facts['processor_cores'] = 'NA' - self.facts['processor_count'] = len(self.facts['processor']) - - def get_memory_facts(self): - rc, out, err = module.run_command(["/usr/sbin/prtconf"]) - for line in out.split('\n'): - if 'Memory size' in line: - self.facts['memtotal_mb'] = line.split()[2] - rc, out, err = module.run_command("/usr/sbin/swap -s") - allocated = long(out.split()[1][:-1]) - reserved = long(out.split()[5][:-1]) - used = long(out.split()[8][:-1]) - free = long(out.split()[10][:-1]) - self.facts['swapfree_mb'] = free / 1024 - self.facts['swaptotal_mb'] = (free + used) / 1024 - self.facts['swap_allocated_mb'] = allocated / 1024 - self.facts['swap_reserved_mb'] = reserved / 1024 - -class OpenBSDHardware(Hardware): - """ - OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts: - - memfree_mb - - memtotal_mb - - swapfree_mb - - swaptotal_mb - - processor (a list) - - processor_cores - - processor_count - - processor_speed - - devices - """ - platform = 'OpenBSD' - DMESG_BOOT = '/var/run/dmesg.boot' - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.sysctl = self.get_sysctl() - self.get_memory_facts() - self.get_processor_facts() - self.get_device_facts() - return self.facts - - def get_sysctl(self): - rc, out, err = module.run_command(["/sbin/sysctl", "hw"]) - if rc != 0: - return dict() - sysctl = dict() - for line in out.splitlines(): - (key, value) = line.split('=') - sysctl[key] = value.strip() - return sysctl - - def get_memory_facts(self): - # Get free memory. vmstat output looks like: - # procs memory page disks traps cpu - # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id - # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99 - rc, out, err = module.run_command("/usr/bin/vmstat") - if rc == 0: - self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024 - self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024 - - # Get swapctl info. swapctl output looks like: - # total: 69268 1K-blocks allocated, 0 used, 69268 available - # And for older OpenBSD: - # total: 69268k bytes allocated = 0k used, 69268k available - rc, out, err = module.run_command("/sbin/swapctl -sk") - if rc == 0: - data = out.split() - self.facts['swapfree_mb'] = long(data[-2].translate(None, "kmg")) / 1024 - self.facts['swaptotal_mb'] = long(data[1].translate(None, "kmg")) / 1024 - - def get_processor_facts(self): - processor = [] - dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT) - if not dmesg_boot: - rc, dmesg_boot, err = module.run_command("/sbin/dmesg") - i = 0 - for line in dmesg_boot.splitlines(): - if line.split(' ', 1)[0] == 'cpu%i:' % i: - processor.append(line.split(' ', 1)[1]) - i = i + 1 - processor_count = i - self.facts['processor'] = processor - self.facts['processor_count'] = processor_count - # I found no way to figure out the number of Cores per CPU in OpenBSD - self.facts['processor_cores'] = 'NA' - - def get_device_facts(self): - devices = [] - devices.extend(self.sysctl['hw.disknames'].split(',')) - self.facts['devices'] = devices - -class FreeBSDHardware(Hardware): - """ - FreeBSD-specific subclass of Hardware. Defines memory and CPU facts: - - memfree_mb - - memtotal_mb - - swapfree_mb - - swaptotal_mb - - processor (a list) - - processor_cores - - processor_count - - devices - """ - platform = 'FreeBSD' - DMESG_BOOT = '/var/run/dmesg.boot' - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.get_cpu_facts() - self.get_memory_facts() - self.get_dmi_facts() - self.get_device_facts() - self.get_mount_facts() - return self.facts - - def get_cpu_facts(self): - self.facts['processor'] = [] - rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu") - self.facts['processor_count'] = out.strip() - - dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT) - if not dmesg_boot: - rc, dmesg_boot, err = module.run_command("/sbin/dmesg") - for line in dmesg_boot.split('\n'): - if 'CPU:' in line: - cpu = re.sub(r'CPU:\s+', r"", line) - self.facts['processor'].append(cpu.strip()) - if 'Logical CPUs per core' in line: - self.facts['processor_cores'] = line.split()[4] - - - def get_memory_facts(self): - rc, out, err = module.run_command("/sbin/sysctl vm.stats") - for line in out.split('\n'): - data = line.split() - if 'vm.stats.vm.v_page_size' in line: - pagesize = long(data[1]) - if 'vm.stats.vm.v_page_count' in line: - pagecount = long(data[1]) - if 'vm.stats.vm.v_free_count' in line: - freecount = long(data[1]) - self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024 - self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024 - # Get swapinfo. swapinfo output looks like: - # Device 1M-blocks Used Avail Capacity - # /dev/ada0p3 314368 0 314368 0% - # - rc, out, err = module.run_command("/usr/sbin/swapinfo -m") - lines = out.split('\n') - if len(lines[-1]) == 0: - lines.pop() - data = lines[-1].split() - self.facts['swaptotal_mb'] = data[1] - self.facts['swapfree_mb'] = data[3] - - def get_mount_facts(self): - self.facts['mounts'] = [] - fstab = get_file_content('/etc/fstab') - if fstab: - for line in fstab.split('\n'): - if line.startswith('#') or line.strip() == '': - continue - fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() - self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) - - def get_device_facts(self): - sysdir = '/dev' - self.facts['devices'] = {} - drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks") - slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)') - if os.path.isdir(sysdir): - dirlist = sorted(os.listdir(sysdir)) - for device in dirlist: - d = drives.match(device) - if d: - self.facts['devices'][d.group(1)] = [] - s = slices.match(device) - if s: - self.facts['devices'][d.group(1)].append(s.group(1)) - - def get_dmi_facts(self): - ''' learn dmi facts from system - - Use dmidecode executable if available''' - - # Fall back to using dmidecode, if available - dmi_bin = module.get_bin_path('dmidecode') - DMI_DICT = dict( - bios_date='bios-release-date', - bios_version='bios-version', - form_factor='chassis-type', - product_name='system-product-name', - product_serial='system-serial-number', - product_uuid='system-uuid', - product_version='system-version', - system_vendor='system-manufacturer' - ) - for (k, v) in DMI_DICT.items(): - if dmi_bin is not None: - (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v)) - if rc == 0: - # Strip out commented lines (specific dmidecode output) - self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ]) - try: - json.dumps(self.facts[k]) - except UnicodeDecodeError: - self.facts[k] = 'NA' - else: - self.facts[k] = 'NA' - else: - self.facts[k] = 'NA' - - -class NetBSDHardware(Hardware): - """ - NetBSD-specific subclass of Hardware. Defines memory and CPU facts: - - memfree_mb - - memtotal_mb - - swapfree_mb - - swaptotal_mb - - processor (a list) - - processor_cores - - processor_count - - devices - """ - platform = 'NetBSD' - MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'] - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.get_cpu_facts() - self.get_memory_facts() - self.get_mount_facts() - return self.facts - - def get_cpu_facts(self): - - i = 0 - physid = 0 - sockets = {} - if not os.access("/proc/cpuinfo", os.R_OK): - return - self.facts['processor'] = [] - for line in open("/proc/cpuinfo").readlines(): - data = line.split(":", 1) - key = data[0].strip() - # model name is for Intel arch, Processor (mind the uppercase P) - # works for some ARM devices, like the Sheevaplug. - if key == 'model name' or key == 'Processor': - if 'processor' not in self.facts: - self.facts['processor'] = [] - self.facts['processor'].append(data[1].strip()) - i += 1 - elif key == 'physical id': - physid = data[1].strip() - if physid not in sockets: - sockets[physid] = 1 - elif key == 'cpu cores': - sockets[physid] = int(data[1].strip()) - if len(sockets) > 0: - self.facts['processor_count'] = len(sockets) - self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values()) - else: - self.facts['processor_count'] = i - self.facts['processor_cores'] = 'NA' - - def get_memory_facts(self): - if not os.access("/proc/meminfo", os.R_OK): - return - for line in open("/proc/meminfo").readlines(): - data = line.split(":", 1) - key = data[0] - if key in NetBSDHardware.MEMORY_FACTS: - val = data[1].strip().split(' ')[0] - self.facts["%s_mb" % key.lower()] = long(val) / 1024 - - def get_mount_facts(self): - self.facts['mounts'] = [] - fstab = get_file_content('/etc/fstab') - if fstab: - for line in fstab.split('\n'): - if line.startswith('#') or line.strip() == '': - continue - fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() - self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) - -class AIX(Hardware): - """ - AIX-specific subclass of Hardware. Defines memory and CPU facts: - - memfree_mb - - memtotal_mb - - swapfree_mb - - swaptotal_mb - - processor (a list) - - processor_cores - - processor_count - """ - platform = 'AIX' - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.get_cpu_facts() - self.get_memory_facts() - self.get_dmi_facts() - return self.facts - - def get_cpu_facts(self): - self.facts['processor'] = [] - - - rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor") - if out: - i = 0 - for line in out.split('\n'): - - if 'Available' in line: - if i == 0: - data = line.split(' ') - cpudev = data[0] - - i += 1 - self.facts['processor_count'] = int(i) - - rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type") - - data = out.split(' ') - self.facts['processor'] = data[1] - - rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads") - - data = out.split(' ') - self.facts['processor_cores'] = int(data[1]) - - def get_memory_facts(self): - pagesize = 4096 - rc, out, err = module.run_command("/usr/bin/vmstat -v") - for line in out.split('\n'): - data = line.split() - if 'memory pages' in line: - pagecount = long(data[0]) - if 'free pages' in line: - freecount = long(data[0]) - self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024 - self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024 - # Get swapinfo. swapinfo output looks like: - # Device 1M-blocks Used Avail Capacity - # /dev/ada0p3 314368 0 314368 0% - # - rc, out, err = module.run_command("/usr/sbin/lsps -s") - if out: - lines = out.split('\n') - data = lines[1].split() - swaptotal_mb = long(data[0].rstrip('MB')) - percused = int(data[1].rstrip('%')) - self.facts['swaptotal_mb'] = swaptotal_mb - self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100) - - def get_dmi_facts(self): - rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion") - data = out.split() - self.facts['firmware_version'] = data[1].strip('IBM,') - -class HPUX(Hardware): - """ - HP-UX-specifig subclass of Hardware. Defines memory and CPU facts: - - memfree_mb - - memtotal_mb - - swapfree_mb - - swaptotal_mb - - processor - - processor_cores - - processor_count - - model - - firmware - """ - - platform = 'HP-UX' - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.get_cpu_facts() - self.get_memory_facts() - self.get_hw_facts() - return self.facts - - def get_cpu_facts(self): - if self.facts['architecture'] == '9000/800': - rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) - self.facts['processor_count'] = int(out.strip()) - #Working with machinfo mess - elif self.facts['architecture'] == 'ia64': - if self.facts['distribution_version'] == "B.11.23": - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True) - self.facts['processor_count'] = int(out.strip().split('=')[1]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True) - self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip() - rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) - self.facts['processor_cores'] = int(out.strip()) - if self.facts['distribution_version'] == "B.11.31": - #if machinfo return cores strings release B.11.31 > 1204 - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True) - if out.strip()== '0': - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) - self.facts['processor_count'] = int(out.strip().split(" ")[0]) - #If hyperthreading is active divide cores by 2 - rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True) - data = re.sub(' +',' ',out).strip().split(' ') - if len(data) == 1: - hyperthreading = 'OFF' - else: - hyperthreading = data[1] - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True) - data = out.strip().split(" ") - if hyperthreading == 'ON': - self.facts['processor_cores'] = int(data[0])/2 - else: - if len(data) == 1: - self.facts['processor_cores'] = self.facts['processor_count'] - else: - self.facts['processor_cores'] = int(data[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True) - self.facts['processor'] = out.strip() - else: - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True) - self.facts['processor_count'] = int(out.strip().split(" ")[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True) - self.facts['processor_cores'] = int(out.strip().split(" ")[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) - self.facts['processor'] = out.strip() - - def get_memory_facts(self): - pagesize = 4096 - rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True) - data = int(re.sub(' +',' ',out).split(' ')[5].strip()) - self.facts['memfree_mb'] = pagesize * data / 1024 / 1024 - if self.facts['architecture'] == '9000/800': - rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log") - data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip() - self.facts['memtotal_mb'] = int(data) / 1024 - else: - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True) - data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip() - self.facts['memtotal_mb'] = int(data) - rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q") - self.facts['swaptotal_mb'] = int(out.strip()) - rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True) - swap = 0 - for line in out.strip().split('\n'): - swap += int(re.sub(' +',' ',line).split(' ')[3].strip()) - self.facts['swapfree_mb'] = swap - - def get_hw_facts(self): - rc, out, err = module.run_command("model") - self.facts['model'] = out.strip() - if self.facts['architecture'] == 'ia64': - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True) - self.facts['firmware_version'] = out.split(':')[1].strip() - - -class Darwin(Hardware): - """ - Darwin-specific subclass of Hardware. Defines memory and CPU facts: - - processor - - processor_cores - - memtotal_mb - - memfree_mb - - model - - osversion - - osrevision - """ - platform = 'Darwin' - - def __init__(self): - Hardware.__init__(self) - - def populate(self): - self.sysctl = self.get_sysctl() - self.get_mac_facts() - self.get_cpu_facts() - self.get_memory_facts() - return self.facts - - def get_sysctl(self): - rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"]) - if rc != 0: - return dict() - sysctl = dict() - for line in out.splitlines(): - if line.rstrip("\n"): - (key, value) = re.split(' = |: ', line, maxsplit=1) - sysctl[key] = value.strip() - return sysctl - - def get_system_profile(self): - rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"]) - if rc != 0: - return dict() - system_profile = dict() - for line in out.splitlines(): - if ': ' in line: - (key, value) = line.split(': ', 1) - system_profile[key.strip()] = ' '.join(value.strip().split()) - return system_profile - - def get_mac_facts(self): - self.facts['model'] = self.sysctl['hw.model'] - self.facts['osversion'] = self.sysctl['kern.osversion'] - self.facts['osrevision'] = self.sysctl['kern.osrevision'] - - def get_cpu_facts(self): - if 'machdep.cpu.brand_string' in self.sysctl: # Intel - self.facts['processor'] = self.sysctl['machdep.cpu.brand_string'] - self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count'] - else: # PowerPC - system_profile = self.get_system_profile() - self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed']) - self.facts['processor_cores'] = self.sysctl['hw.physicalcpu'] - - def get_memory_facts(self): - self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024 - self.facts['memfree_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024 - -class Network(Facts): - """ - This is a generic Network subclass of Facts. This should be further - subclassed to implement per platform. If you subclass this, - you must define: - - interfaces (a list of interface names) - - interface_ dictionary of ipv4, ipv6, and mac address information. - - All subclasses MUST define platform. - """ - platform = 'Generic' - - IPV6_SCOPE = { '0' : 'global', - '10' : 'host', - '20' : 'link', - '40' : 'admin', - '50' : 'site', - '80' : 'organization' } - - def __new__(cls, *arguments, **keyword): - subclass = cls - for sc in Network.__subclasses__(): - if sc.platform == platform.system(): - subclass = sc - return super(cls, subclass).__new__(subclass, *arguments, **keyword) - - def __init__(self, module): - self.module = module - Facts.__init__(self) - - def populate(self): - return self.facts - -class LinuxNetwork(Network): - """ - This is a Linux-specific subclass of Network. It defines - - interfaces (a list of interface names) - - interface_ dictionary of ipv4, ipv6, and mac address information. - - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses. - - ipv4_address and ipv6_address: the first non-local address for each family. - """ - platform = 'Linux' - - def __init__(self, module): - Network.__init__(self, module) - - def populate(self): - ip_path = self.module.get_bin_path('ip') - if ip_path is None: - return self.facts - default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path) - interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6) - self.facts['interfaces'] = interfaces.keys() - for iface in interfaces: - self.facts[iface] = interfaces[iface] - self.facts['default_ipv4'] = default_ipv4 - self.facts['default_ipv6'] = default_ipv6 - self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses'] - self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses'] - return self.facts - - def get_default_interfaces(self, ip_path): - # Use the commands: - # ip -4 route get 8.8.8.8 -> Google public DNS - # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com - # to find out the default outgoing interface, address, and gateway - command = dict( - v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'], - v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012'] - ) - interface = dict(v4 = {}, v6 = {}) - for v in 'v4', 'v6': - if v == 'v6' and self.facts['os_family'] == 'RedHat' \ - and self.facts['distribution_version'].startswith('4.'): - continue - if v == 'v6' and not socket.has_ipv6: - continue - rc, out, err = module.run_command(command[v]) - if not out: - # v6 routing may result in - # RTNETLINK answers: Invalid argument - continue - words = out.split('\n')[0].split() - # A valid output starts with the queried address on the first line - if len(words) > 0 and words[0] == command[v][-1]: - for i in range(len(words) - 1): - if words[i] == 'dev': - interface[v]['interface'] = words[i+1] - elif words[i] == 'src': - interface[v]['address'] = words[i+1] - elif words[i] == 'via' and words[i+1] != command[v][-1]: - interface[v]['gateway'] = words[i+1] - return interface['v4'], interface['v6'] - - def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6): - interfaces = {} - ips = dict( - all_ipv4_addresses = [], - all_ipv6_addresses = [], - ) - - for path in glob.glob('/sys/class/net/*'): - if not os.path.isdir(path): - continue - device = os.path.basename(path) - interfaces[device] = { 'device': device } - if os.path.exists(os.path.join(path, 'address')): - macaddress = open(os.path.join(path, 'address')).read().strip() - if macaddress and macaddress != '00:00:00:00:00:00': - interfaces[device]['macaddress'] = macaddress - if os.path.exists(os.path.join(path, 'mtu')): - interfaces[device]['mtu'] = int(open(os.path.join(path, 'mtu')).read().strip()) - if os.path.exists(os.path.join(path, 'operstate')): - interfaces[device]['active'] = open(os.path.join(path, 'operstate')).read().strip() != 'down' -# if os.path.exists(os.path.join(path, 'carrier')): -# interfaces[device]['link'] = open(os.path.join(path, 'carrier')).read().strip() == '1' - if os.path.exists(os.path.join(path, 'device','driver', 'module')): - interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module'))) - if os.path.exists(os.path.join(path, 'type')): - type = open(os.path.join(path, 'type')).read().strip() - if type == '1': - interfaces[device]['type'] = 'ether' - elif type == '512': - interfaces[device]['type'] = 'ppp' - elif type == '772': - interfaces[device]['type'] = 'loopback' - if os.path.exists(os.path.join(path, 'bridge')): - interfaces[device]['type'] = 'bridge' - interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ] - if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')): - interfaces[device]['id'] = open(os.path.join(path, 'bridge', 'bridge_id')).read().strip() - if os.path.exists(os.path.join(path, 'bridge', 'stp_state')): - interfaces[device]['stp'] = open(os.path.join(path, 'bridge', 'stp_state')).read().strip() == '1' - if os.path.exists(os.path.join(path, 'bonding')): - interfaces[device]['type'] = 'bonding' - interfaces[device]['slaves'] = open(os.path.join(path, 'bonding', 'slaves')).read().split() - interfaces[device]['mode'] = open(os.path.join(path, 'bonding', 'mode')).read().split()[0] - interfaces[device]['miimon'] = open(os.path.join(path, 'bonding', 'miimon')).read().split()[0] - interfaces[device]['lacp_rate'] = open(os.path.join(path, 'bonding', 'lacp_rate')).read().split()[0] - primary = open(os.path.join(path, 'bonding', 'primary')).read() - if primary: - interfaces[device]['primary'] = primary - path = os.path.join(path, 'bonding', 'all_slaves_active') - if os.path.exists(path): - interfaces[device]['all_slaves_active'] = open(path).read() == '1' - - # Check whether a interface is in promiscuous mode - if os.path.exists(os.path.join(path,'flags')): - promisc_mode = False - # The second byte indicates whether the interface is in promiscuous mode. - # 1 = promisc - # 0 = no promisc - data = int(open(os.path.join(path, 'flags')).read().strip(),16) - promisc_mode = (data & 0x0100 > 0) - interfaces[device]['promisc'] = promisc_mode - - def parse_ip_output(output, secondary=False): - for line in output.split('\n'): - if not line: - continue - words = line.split() - if words[0] == 'inet': - if '/' in words[1]: - address, netmask_length = words[1].split('/') - else: - # pointopoint interfaces do not have a prefix - address = words[1] - netmask_length = "32" - address_bin = struct.unpack('!L', socket.inet_aton(address))[0] - netmask_bin = (1<<32) - (1<<32>>int(netmask_length)) - netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin)) - network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin)) - iface = words[-1] - if iface != device: - interfaces[iface] = {} - if not secondary or "ipv4" not in interfaces[iface]: - interfaces[iface]['ipv4'] = {'address': address, - 'netmask': netmask, - 'network': network} - else: - if "ipv4_secondaries" not in interfaces[iface]: - interfaces[iface]["ipv4_secondaries"] = [] - interfaces[iface]["ipv4_secondaries"].append({ - 'address': address, - 'netmask': netmask, - 'network': network, - }) - - # add this secondary IP to the main device - if secondary: - if "ipv4_secondaries" not in interfaces[device]: - interfaces[device]["ipv4_secondaries"] = [] - interfaces[device]["ipv4_secondaries"].append({ - 'address': address, - 'netmask': netmask, - 'network': network, - }) - - # If this is the default address, update default_ipv4 - if 'address' in default_ipv4 and default_ipv4['address'] == address: - default_ipv4['netmask'] = netmask - default_ipv4['network'] = network - default_ipv4['macaddress'] = macaddress - default_ipv4['mtu'] = interfaces[device]['mtu'] - default_ipv4['type'] = interfaces[device].get("type", "unknown") - default_ipv4['alias'] = words[-1] - if not address.startswith('127.'): - ips['all_ipv4_addresses'].append(address) - elif words[0] == 'inet6': - address, prefix = words[1].split('/') - scope = words[3] - if 'ipv6' not in interfaces[device]: - interfaces[device]['ipv6'] = [] - interfaces[device]['ipv6'].append({ - 'address' : address, - 'prefix' : prefix, - 'scope' : scope - }) - # If this is the default address, update default_ipv6 - if 'address' in default_ipv6 and default_ipv6['address'] == address: - default_ipv6['prefix'] = prefix - default_ipv6['scope'] = scope - default_ipv6['macaddress'] = macaddress - default_ipv6['mtu'] = interfaces[device]['mtu'] - default_ipv6['type'] = interfaces[device].get("type", "unknown") - if not address == '::1': - ips['all_ipv6_addresses'].append(address) - - ip_path = module.get_bin_path("ip") - - args = [ip_path, 'addr', 'show', 'primary', device] - rc, stdout, stderr = self.module.run_command(args) - primary_data = stdout - - args = [ip_path, 'addr', 'show', 'secondary', device] - rc, stdout, stderr = self.module.run_command(args) - secondary_data = stdout - - parse_ip_output(primary_data) - parse_ip_output(secondary_data, secondary=True) - - # replace : by _ in interface name since they are hard to use in template - new_interfaces = {} - for i in interfaces: - if ':' in i: - new_interfaces[i.replace(':','_')] = interfaces[i] - else: - new_interfaces[i] = interfaces[i] - return new_interfaces, ips - -class GenericBsdIfconfigNetwork(Network): - """ - This is a generic BSD subclass of Network using the ifconfig command. - It defines - - interfaces (a list of interface names) - - interface_ dictionary of ipv4, ipv6, and mac address information. - - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses. - It currently does not define - - default_ipv4 and default_ipv6 - - type, mtu and network on interfaces - """ - platform = 'Generic_BSD_Ifconfig' - - def __init__(self, module): - Network.__init__(self, module) - - def populate(self): - - ifconfig_path = module.get_bin_path('ifconfig') - - if ifconfig_path is None: - return self.facts - route_path = module.get_bin_path('route') - - if route_path is None: - return self.facts - - default_ipv4, default_ipv6 = self.get_default_interfaces(route_path) - interfaces, ips = self.get_interfaces_info(ifconfig_path) - self.merge_default_interface(default_ipv4, interfaces, 'ipv4') - self.merge_default_interface(default_ipv6, interfaces, 'ipv6') - self.facts['interfaces'] = interfaces.keys() - - for iface in interfaces: - self.facts[iface] = interfaces[iface] - - self.facts['default_ipv4'] = default_ipv4 - self.facts['default_ipv6'] = default_ipv6 - self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses'] - self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses'] - - return self.facts - - def get_default_interfaces(self, route_path): - - # Use the commands: - # route -n get 8.8.8.8 -> Google public DNS - # route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com - # to find out the default outgoing interface, address, and gateway - - command = dict( - v4 = [route_path, '-n', 'get', '8.8.8.8'], - v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012'] - ) - - interface = dict(v4 = {}, v6 = {}) - - for v in 'v4', 'v6': - - if v == 'v6' and not socket.has_ipv6: - continue - rc, out, err = module.run_command(command[v]) - if not out: - # v6 routing may result in - # RTNETLINK answers: Invalid argument - continue - lines = out.split('\n') - for line in lines: - words = line.split() - # Collect output from route command - if len(words) > 1: - if words[0] == 'interface:': - interface[v]['interface'] = words[1] - if words[0] == 'gateway:': - interface[v]['gateway'] = words[1] - - return interface['v4'], interface['v6'] - - def get_interfaces_info(self, ifconfig_path): - interfaces = {} - current_if = {} - ips = dict( - all_ipv4_addresses = [], - all_ipv6_addresses = [], - ) - # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a' - # when running the command 'ifconfig'. - # Solaris must explicitly run the command 'ifconfig -a'. - rc, out, err = module.run_command([ifconfig_path, '-a']) - - for line in out.split('\n'): - - if line: - words = line.split() - - if re.match('^\S', line) and len(words) > 3: - current_if = self.parse_interface_line(words) - interfaces[ current_if['device'] ] = current_if - elif words[0].startswith('options='): - self.parse_options_line(words, current_if, ips) - elif words[0] == 'nd6': - self.parse_nd6_line(words, current_if, ips) - elif words[0] == 'ether': - self.parse_ether_line(words, current_if, ips) - elif words[0] == 'media:': - self.parse_media_line(words, current_if, ips) - elif words[0] == 'status:': - self.parse_status_line(words, current_if, ips) - elif words[0] == 'lladdr': - self.parse_lladdr_line(words, current_if, ips) - elif words[0] == 'inet': - self.parse_inet_line(words, current_if, ips) - elif words[0] == 'inet6': - self.parse_inet6_line(words, current_if, ips) - else: - self.parse_unknown_line(words, current_if, ips) - - return interfaces, ips - - def parse_interface_line(self, words): - device = words[0][0:-1] - current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} - current_if['flags'] = self.get_options(words[1]) - current_if['mtu'] = words[3] - current_if['macaddress'] = 'unknown' # will be overwritten later - return current_if - - def parse_options_line(self, words, current_if, ips): - # Mac has options like this... - current_if['options'] = self.get_options(words[0]) - - def parse_nd6_line(self, words, current_if, ips): - # FreBSD has options like this... - current_if['options'] = self.get_options(words[1]) - - def parse_ether_line(self, words, current_if, ips): - current_if['macaddress'] = words[1] - - def parse_media_line(self, words, current_if, ips): - # not sure if this is useful - we also drop information - current_if['media'] = words[1] - if len(words) > 2: - current_if['media_select'] = words[2] - if len(words) > 3: - current_if['media_type'] = words[3][1:] - if len(words) > 4: - current_if['media_options'] = self.get_options(words[4]) - - def parse_status_line(self, words, current_if, ips): - current_if['status'] = words[1] - - def parse_lladdr_line(self, words, current_if, ips): - current_if['lladdr'] = words[1] - - def parse_inet_line(self, words, current_if, ips): - address = {'address': words[1]} - # deal with hex netmask - if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8: - words[3] = '0x' + words[3] - if words[3].startswith('0x'): - address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16))) - else: - # otherwise assume this is a dotted quad - address['netmask'] = words[3] - # calculate the network - address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0] - netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0] - address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin)) - # broadcast may be given or we need to calculate - if len(words) > 5: - address['broadcast'] = words[5] - else: - address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff))) - # add to our list of addresses - if not words[1].startswith('127.'): - ips['all_ipv4_addresses'].append(address['address']) - current_if['ipv4'].append(address) - - def parse_inet6_line(self, words, current_if, ips): - address = {'address': words[1]} - if (len(words) >= 4) and (words[2] == 'prefixlen'): - address['prefix'] = words[3] - if (len(words) >= 6) and (words[4] == 'scopeid'): - address['scope'] = words[5] - localhost6 = ['::1', '::1/128', 'fe80::1%lo0'] - if address['address'] not in localhost6: - ips['all_ipv6_addresses'].append(address['address']) - current_if['ipv6'].append(address) - - def parse_unknown_line(self, words, current_if, ips): - # we are going to ignore unknown lines here - this may be - # a bad idea - but you can override it in your subclass - pass - - def get_options(self, option_string): - start = option_string.find('<') + 1 - end = option_string.rfind('>') - if (start > 0) and (end > 0) and (end > start + 1): - option_csv = option_string[start:end] - return option_csv.split(',') - else: - return [] - - def merge_default_interface(self, defaults, interfaces, ip_type): - if not 'interface' in defaults.keys(): - return - if not defaults['interface'] in interfaces: - return - ifinfo = interfaces[defaults['interface']] - # copy all the interface values across except addresses - for item in ifinfo.keys(): - if item != 'ipv4' and item != 'ipv6': - defaults[item] = ifinfo[item] - if len(ifinfo[ip_type]) > 0: - for item in ifinfo[ip_type][0].keys(): - defaults[item] = ifinfo[ip_type][0][item] - -class DarwinNetwork(GenericBsdIfconfigNetwork, Network): - """ - This is the Mac OS X/Darwin Network Class. - It uses the GenericBsdIfconfigNetwork unchanged - """ - platform = 'Darwin' - - # media line is different to the default FreeBSD one - def parse_media_line(self, words, current_if, ips): - # not sure if this is useful - we also drop information - current_if['media'] = 'Unknown' # Mac does not give us this - current_if['media_select'] = words[1] - if len(words) > 2: - current_if['media_type'] = words[2][1:] - if len(words) > 3: - current_if['media_options'] = self.get_options(words[3]) - - -class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network): - """ - This is the FreeBSD Network Class. - It uses the GenericBsdIfconfigNetwork unchanged. - """ - platform = 'FreeBSD' - -class AIXNetwork(GenericBsdIfconfigNetwork, Network): - """ - This is the AIX Network Class. - It uses the GenericBsdIfconfigNetwork unchanged. - """ - platform = 'AIX' - - # AIX 'ifconfig -a' does not have three words in the interface line - def get_interfaces_info(self, ifconfig_path): - interfaces = {} - current_if = {} - ips = dict( - all_ipv4_addresses = [], - all_ipv6_addresses = [], - ) - rc, out, err = module.run_command([ifconfig_path, '-a']) - - for line in out.split('\n'): - - if line: - words = line.split() - - # only this condition differs from GenericBsdIfconfigNetwork - if re.match('^\w*\d*:', line): - current_if = self.parse_interface_line(words) - interfaces[ current_if['device'] ] = current_if - elif words[0].startswith('options='): - self.parse_options_line(words, current_if, ips) - elif words[0] == 'nd6': - self.parse_nd6_line(words, current_if, ips) - elif words[0] == 'ether': - self.parse_ether_line(words, current_if, ips) - elif words[0] == 'media:': - self.parse_media_line(words, current_if, ips) - elif words[0] == 'status:': - self.parse_status_line(words, current_if, ips) - elif words[0] == 'lladdr': - self.parse_lladdr_line(words, current_if, ips) - elif words[0] == 'inet': - self.parse_inet_line(words, current_if, ips) - elif words[0] == 'inet6': - self.parse_inet6_line(words, current_if, ips) - else: - self.parse_unknown_line(words, current_if, ips) - - return interfaces, ips - - # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here - def parse_interface_line(self, words): - device = words[0][0:-1] - current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} - current_if['flags'] = self.get_options(words[1]) - current_if['macaddress'] = 'unknown' # will be overwritten later - return current_if - -class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network): - """ - This is the OpenBSD Network Class. - It uses the GenericBsdIfconfigNetwork. - """ - platform = 'OpenBSD' - - # Return macaddress instead of lladdr - def parse_lladdr_line(self, words, current_if, ips): - current_if['macaddress'] = words[1] - -class SunOSNetwork(GenericBsdIfconfigNetwork, Network): - """ - This is the SunOS Network Class. - It uses the GenericBsdIfconfigNetwork. - - Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface - so these facts have been moved inside the 'ipv4' and 'ipv6' lists. - """ - platform = 'SunOS' - - # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6. - # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface. - # 'parse_interface_line()' checks for previously seen interfaces before defining - # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa). - def get_interfaces_info(self, ifconfig_path): - interfaces = {} - current_if = {} - ips = dict( - all_ipv4_addresses = [], - all_ipv6_addresses = [], - ) - rc, out, err = module.run_command([ifconfig_path, '-a']) - - for line in out.split('\n'): - - if line: - words = line.split() - - if re.match('^\S', line) and len(words) > 3: - current_if = self.parse_interface_line(words, current_if, interfaces) - interfaces[ current_if['device'] ] = current_if - elif words[0].startswith('options='): - self.parse_options_line(words, current_if, ips) - elif words[0] == 'nd6': - self.parse_nd6_line(words, current_if, ips) - elif words[0] == 'ether': - self.parse_ether_line(words, current_if, ips) - elif words[0] == 'media:': - self.parse_media_line(words, current_if, ips) - elif words[0] == 'status:': - self.parse_status_line(words, current_if, ips) - elif words[0] == 'lladdr': - self.parse_lladdr_line(words, current_if, ips) - elif words[0] == 'inet': - self.parse_inet_line(words, current_if, ips) - elif words[0] == 'inet6': - self.parse_inet6_line(words, current_if, ips) - else: - self.parse_unknown_line(words, current_if, ips) - - # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the - # ipv4/ipv6 lists which is ugly and hard to read. - # This quick hack merges the dictionaries. Purely cosmetic. - for iface in interfaces: - for v in 'ipv4', 'ipv6': - combined_facts = {} - for facts in interfaces[iface][v]: - combined_facts.update(facts) - if len(combined_facts.keys()) > 0: - interfaces[iface][v] = [combined_facts] - - return interfaces, ips - - def parse_interface_line(self, words, current_if, interfaces): - device = words[0][0:-1] - if device not in interfaces.keys(): - current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'} - else: - current_if = interfaces[device] - flags = self.get_options(words[1]) - if 'IPv4' in flags: - v = 'ipv4' - if 'IPv6' in flags: - v = 'ipv6' - current_if[v].append({'flags': flags, 'mtu': words[3]}) - current_if['macaddress'] = 'unknown' # will be overwritten later - return current_if - - # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f - # Add leading zero to each octet where needed. - def parse_ether_line(self, words, current_if, ips): - macaddress = '' - for octet in words[1].split(':'): - octet = ('0' + octet)[-2:None] - macaddress += (octet + ':') - current_if['macaddress'] = macaddress[0:-1] - -class Virtual(Facts): - """ - This is a generic Virtual subclass of Facts. This should be further - subclassed to implement per platform. If you subclass this, - you should define: - - virtualization_type - - virtualization_role - - container (e.g. solaris zones, freebsd jails, linux containers) - - All subclasses MUST define platform. - """ - - def __new__(cls, *arguments, **keyword): - subclass = cls - for sc in Virtual.__subclasses__(): - if sc.platform == platform.system(): - subclass = sc - return super(cls, subclass).__new__(subclass, *arguments, **keyword) - - def __init__(self): - Facts.__init__(self) - - def populate(self): - return self.facts - -class LinuxVirtual(Virtual): - """ - This is a Linux-specific subclass of Virtual. It defines - - virtualization_type - - virtualization_role - """ - platform = 'Linux' - - def __init__(self): - Virtual.__init__(self) - - def populate(self): - self.get_virtual_facts() - return self.facts - - # For more information, check: http://people.redhat.com/~rjones/virt-what/ - def get_virtual_facts(self): - if os.path.exists("/proc/xen"): - self.facts['virtualization_type'] = 'xen' - self.facts['virtualization_role'] = 'guest' - try: - for line in open('/proc/xen/capabilities'): - if "control_d" in line: - self.facts['virtualization_role'] = 'host' - except IOError: - pass - return - - if os.path.exists('/proc/vz'): - self.facts['virtualization_type'] = 'openvz' - if os.path.exists('/proc/bc'): - self.facts['virtualization_role'] = 'host' - else: - self.facts['virtualization_role'] = 'guest' - return - - if os.path.exists('/proc/1/cgroup'): - for line in open('/proc/1/cgroup').readlines(): - if re.search('/lxc/', line): - self.facts['virtualization_type'] = 'lxc' - self.facts['virtualization_role'] = 'guest' - return - - product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name') - - if product_name in ['KVM', 'Bochs']: - self.facts['virtualization_type'] = 'kvm' - self.facts['virtualization_role'] = 'guest' - return - - if product_name == 'RHEV Hypervisor': - self.facts['virtualization_type'] = 'RHEV' - self.facts['virtualization_role'] = 'guest' - return - - if product_name == 'VMware Virtual Platform': - self.facts['virtualization_type'] = 'VMware' - self.facts['virtualization_role'] = 'guest' - return - - bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') - - if bios_vendor == 'Xen': - self.facts['virtualization_type'] = 'xen' - self.facts['virtualization_role'] = 'guest' - return - - if bios_vendor == 'innotek GmbH': - self.facts['virtualization_type'] = 'virtualbox' - self.facts['virtualization_role'] = 'guest' - return - - sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor') - - # FIXME: This does also match hyperv - if sys_vendor == 'Microsoft Corporation': - self.facts['virtualization_type'] = 'VirtualPC' - self.facts['virtualization_role'] = 'guest' - return - - if sys_vendor == 'Parallels Software International Inc.': - self.facts['virtualization_type'] = 'parallels' - self.facts['virtualization_role'] = 'guest' - return - - if os.path.exists('/proc/self/status'): - for line in open('/proc/self/status').readlines(): - if re.match('^VxID: \d+', line): - self.facts['virtualization_type'] = 'linux_vserver' - if re.match('^VxID: 0', line): - self.facts['virtualization_role'] = 'host' - else: - self.facts['virtualization_role'] = 'guest' - return - - if os.path.exists('/proc/cpuinfo'): - for line in open('/proc/cpuinfo').readlines(): - if re.match('^model name.*QEMU Virtual CPU', line): - self.facts['virtualization_type'] = 'kvm' - elif re.match('^vendor_id.*User Mode Linux', line): - self.facts['virtualization_type'] = 'uml' - elif re.match('^model name.*UML', line): - self.facts['virtualization_type'] = 'uml' - elif re.match('^vendor_id.*PowerVM Lx86', line): - self.facts['virtualization_type'] = 'powervm_lx86' - elif re.match('^vendor_id.*IBM/S390', line): - self.facts['virtualization_type'] = 'ibm_systemz' - else: - continue - self.facts['virtualization_role'] = 'guest' - return - - # Beware that we can have both kvm and virtualbox running on a single system - if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK): - modules = [] - for line in open("/proc/modules").readlines(): - data = line.split(" ", 1) - modules.append(data[0]) - - if 'kvm' in modules: - self.facts['virtualization_type'] = 'kvm' - self.facts['virtualization_role'] = 'host' - return - - if 'vboxdrv' in modules: - self.facts['virtualization_type'] = 'virtualbox' - self.facts['virtualization_role'] = 'host' - return - - # If none of the above matches, return 'NA' for virtualization_type - # and virtualization_role. This allows for proper grouping. - self.facts['virtualization_type'] = 'NA' - self.facts['virtualization_role'] = 'NA' - return - - -class HPUXVirtual(Virtual): - """ - This is a HP-UX specific subclass of Virtual. It defines - - virtualization_type - - virtualization_role - """ - platform = 'HP-UX' - - def __init__(self): - Virtual.__init__(self) - - def populate(self): - self.get_virtual_facts() - return self.facts - - def get_virtual_facts(self): - if os.path.exists('/usr/sbin/vecheck'): - rc, out, err = module.run_command("/usr/sbin/vecheck") - if rc == 0: - self.facts['virtualization_type'] = 'guest' - self.facts['virtualization_role'] = 'HP vPar' - if os.path.exists('/opt/hpvm/bin/hpvminfo'): - rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo") - if rc == 0 and re.match('.*Running.*HPVM vPar.*', out): - self.facts['virtualization_type'] = 'guest' - self.facts['virtualization_role'] = 'HPVM vPar' - elif rc == 0 and re.match('.*Running.*HPVM guest.*', out): - self.facts['virtualization_type'] = 'guest' - self.facts['virtualization_role'] = 'HPVM IVM' - elif rc == 0 and re.match('.*Running.*HPVM host.*', out): - self.facts['virtualization_type'] = 'host' - self.facts['virtualization_role'] = 'HPVM' - if os.path.exists('/usr/sbin/parstatus'): - rc, out, err = module.run_command("/usr/sbin/parstatus") - if rc == 0: - self.facts['virtualization_type'] = 'guest' - self.facts['virtualization_role'] = 'HP nPar' - - -class SunOSVirtual(Virtual): - """ - This is a SunOS-specific subclass of Virtual. It defines - - virtualization_type - - virtualization_role - - container - """ - platform = 'SunOS' - - def __init__(self): - Virtual.__init__(self) - - def populate(self): - self.get_virtual_facts() - return self.facts - - def get_virtual_facts(self): - rc, out, err = module.run_command("/usr/sbin/prtdiag") - for line in out.split('\n'): - if 'VMware' in line: - self.facts['virtualization_type'] = 'vmware' - self.facts['virtualization_role'] = 'guest' - if 'Parallels' in line: - self.facts['virtualization_type'] = 'parallels' - self.facts['virtualization_role'] = 'guest' - if 'VirtualBox' in line: - self.facts['virtualization_type'] = 'virtualbox' - self.facts['virtualization_role'] = 'guest' - if 'HVM domU' in line: - self.facts['virtualization_type'] = 'xen' - self.facts['virtualization_role'] = 'guest' - # Check if it's a zone - if os.path.exists("/usr/bin/zonename"): - rc, out, err = module.run_command("/usr/bin/zonename") - if out.rstrip() != "global": - self.facts['container'] = 'zone' - # Check if it's a branded zone (i.e. Solaris 8/9 zone) - if os.path.isdir('/.SUNWnative'): - self.facts['container'] = 'zone' - # If it's a zone check if we can detect if our global zone is itself virtualized. - # Relies on the "guest tools" (e.g. vmware tools) to be installed - if 'container' in self.facts and self.facts['container'] == 'zone': - rc, out, err = module.run_command("/usr/sbin/modinfo") - for line in out.split('\n'): - if 'VMware' in line: - self.facts['virtualization_type'] = 'vmware' - self.facts['virtualization_role'] = 'guest' - if 'VirtualBox' in line: - self.facts['virtualization_type'] = 'virtualbox' - self.facts['virtualization_role'] = 'guest' - -def get_file_content(path, default=None): - data = default - if os.path.exists(path) and os.access(path, os.R_OK): - data = open(path).read().strip() - if len(data) == 0: - data = default - return data - -def ansible_facts(module): - facts = {} - facts.update(Facts().populate()) - facts.update(Hardware().populate()) - facts.update(Network(module).populate()) - facts.update(Virtual().populate()) - return facts - -# =========================================== def run_setup(module): @@ -2313,13 +82,11 @@ def run_setup(module): # Look for the path to the facter and ohai binary and set # the variable to that path. - facter_path = module.get_bin_path('facter') ohai_path = module.get_bin_path('ohai') # if facter is installed, and we can use --json because # ruby-json is ALSO installed, include facter data in the JSON - if facter_path is not None: rc, out, err = module.run_command(facter_path + " --json") facter = True @@ -2332,7 +99,6 @@ def run_setup(module): setup_options["facter_%s" % k] = v # ditto for ohai - if ohai_path is not None: rc, out, err = module.run_command(ohai_path) ohai = True @@ -2369,5 +135,9 @@ def main(): module.exit_json(**data) # import module snippets + from ansible.module_utils.basic import * + +from ansible.module_utils.facts import * + main() From f700da82490ad91778068414be7f575416d98336 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:11:45 -0400 Subject: [PATCH 391/772] Bump version_added field on composer module. --- library/packaging/composer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/composer b/library/packaging/composer index 9220ed16bcc..983a38dec64 100644 --- a/library/packaging/composer +++ b/library/packaging/composer @@ -24,7 +24,7 @@ DOCUMENTATION = ''' module: composer author: Dimitrios Tydeas Mengidis short_description: Dependency Manager for PHP -version_added: "1.4" +version_added: "1.6" description: - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you options: From 7192343d6eec1ff1b1ca10ffc9aedaf2472fa717 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:14:11 -0400 Subject: [PATCH 392/772] Changelog updates. --- CHANGELOG.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f0615b6dce..085e543de72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,8 +10,10 @@ Major features/changes: New Modules: -* packaging: cpanm +* packaging: cpanm (Perl) * packaging: portage +* packaging: composer (PHP) +* notification: nexmo (SMS) * system: debconf * system: ufw * system: locale_gen @@ -22,11 +24,13 @@ New Modules: Other notable changes: +* example callback plugin added for hipchat * libvirt module now supports destroyed and paused as states * s3 module can specify metadata * security token additions to ec2 modules +* setup module code moved into module_utils/, facts now accessible by other modules * misc bugfixes and other parameters - + ## 1.5.3 "Love Walks In" - March 13, 2014 - Fix validate_certs and run_command errors from previous release From e7787607d0843f9268118656c7f6d403a70a59dc Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Wed, 19 Feb 2014 14:47:48 -0500 Subject: [PATCH 393/772] Add homebrew_tap module. --- library/packaging/homebrew_tap | 215 +++++++++++++++++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 library/packaging/homebrew_tap diff --git a/library/packaging/homebrew_tap b/library/packaging/homebrew_tap new file mode 100644 index 00000000000..03047ba79f9 --- /dev/null +++ b/library/packaging/homebrew_tap @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Daniel Jaouen +# Based on homebrew (Andrew Dunham ) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import re + +DOCUMENTATION = ''' +--- +module: homebrew_tap +author: Daniel Jaouen +short_description: Tap a Homebrew repository. +description: + - Tap external Homebrew repositories. +version_added: "1.4" +options: + tap: + description: + - The repository to tap. + required: true + state: + description: + - state of the repository. + choices: [ 'present', 'absent' ] + required: false + default: 'present' +requirements: [ homebrew ] +''' + +EXAMPLES = ''' +homebrew_tap: tap=homebrew/dupes state=present +homebrew_tap: tap=homebrew/dupes state=absent +homebrew_tap: tap=homebrew/dupes,homebrew/science state=present +''' + + +def a_valid_tap(tap): + '''Returns True if the tap is valid.''' + regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') + return regex.match(tap) + + +def already_tapped(module, brew_path, tap): + '''Returns True if already tapped.''' + + rc, out, err = module.run_command([ + brew_path, + 'tap', + ]) + taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] + return tap.lower() in taps + + +def add_tap(module, brew_path, tap): + '''Adds a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif not already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'tap', + tap, + ]) + if already_tapped(module, brew_path, tap): + changed = True + msg = 'successfully tapped: %s' % tap + else: + failed = True + msg = 'failed to tap: %s' % tap + + else: + msg = 'already tapped: %s' % tap + + return (failed, changed, msg) + + +def add_taps(module, brew_path, taps): + '''Adds one or more taps.''' + failed, unchanged, added, msg = False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = add_tap(module, brew_path, tap) + if failed: + break + if changed: + added += 1 + else: + unchanged += 1 + + if failed: + msg = 'added: %d, unchanged: %d, error: ' + msg + msg = msg % (added, unchanged) + elif added: + changed = True + msg = 'added: %d, unchanged: %d' % (added, unchanged) + else: + msg = 'added: %d, unchanged: %d' % (added, unchanged) + + return (failed, changed, msg) + + +def remove_tap(module, brew_path, tap): + '''Removes a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'untap', + tap, + ]) + if not already_tapped(module, brew_path, tap): + changed = True + msg = 'successfully untapped: %s' % tap + else: + failed = True + msg = 'failed to untap: %s' % tap + + else: + msg = 'already untapped: %s' % tap + + return (failed, changed, msg) + + +def remove_taps(module, brew_path, taps): + '''Removes one or more taps.''' + failed, unchanged, removed, msg = False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = remove_tap(module, brew_path, tap) + if failed: + break + if changed: + removed += 1 + else: + unchanged += 1 + + if failed: + msg = 'removed: %d, unchanged: %d, error: ' + msg + msg = msg % (removed, unchanged) + elif removed: + changed = True + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + else: + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + + return (failed, changed, msg) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['tap'], required=True), + state=dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True, + ) + + brew_path = module.get_bin_path( + 'brew', + required=True, + opt_dirs=['/usr/local/bin'] + ) + + taps = module.params['name'].split(',') + + if module.params['state'] == 'present': + failed, changed, msg = add_taps(module, brew_path, taps) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + elif module.params['state'] == 'absent': + failed, changed, msg = remove_taps(module, brew_path, taps) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + +# this is magic, see lib/ansible/module_common.py +#<> +main() From 1a90890fd0d0999f04ab3324e8494060990e9dae Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:16:55 -0400 Subject: [PATCH 394/772] Update tap version, changelog --- CHANGELOG.md | 1 + library/packaging/homebrew_tap | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 085e543de72..10c1f486258 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ New Modules: * packaging: cpanm (Perl) * packaging: portage * packaging: composer (PHP) +* packaging: homebrew_tap (OS X) * notification: nexmo (SMS) * system: debconf * system: ufw diff --git a/library/packaging/homebrew_tap b/library/packaging/homebrew_tap index 03047ba79f9..a79ba076a8a 100644 --- a/library/packaging/homebrew_tap +++ b/library/packaging/homebrew_tap @@ -28,7 +28,7 @@ author: Daniel Jaouen short_description: Tap a Homebrew repository. description: - Tap external Homebrew repositories. -version_added: "1.4" +version_added: "1.6" options: tap: description: From 855ccb62c01b2c9bc4fc01975341daa690c1a51a Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:18:27 -0400 Subject: [PATCH 395/772] Update cask version, changelog. --- CHANGELOG.md | 1 + library/packaging/homebrew_cask | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10c1f486258..f9a077ee7e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ New Modules: * packaging: portage * packaging: composer (PHP) * packaging: homebrew_tap (OS X) +* packaging: homebrew_cask (OS X) * notification: nexmo (SMS) * system: debconf * system: ufw diff --git a/library/packaging/homebrew_cask b/library/packaging/homebrew_cask index 9954da47a26..fa85931afc9 100644 --- a/library/packaging/homebrew_cask +++ b/library/packaging/homebrew_cask @@ -23,7 +23,7 @@ author: Daniel Jaouen short_description: Install/uninstall homebrew casks. description: - Manages Homebrew casks. -version_added: "1.5" +version_added: "1.6" options: name: description: From bbf211f80f5660f00c57f86ef89b4a425e0d76cb Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:23:46 -0400 Subject: [PATCH 396/772] Changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9a077ee7e3..9bfa2cbb66f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ New Modules: Other notable changes: * example callback plugin added for hipchat +* added example inventory plugin for doing really trivial inventory from SSH config files * libvirt module now supports destroyed and paused as states * s3 module can specify metadata * security token additions to ec2 modules From a1f6f600bb751eb325d719bd7b765f6329c95453 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:29:06 -0400 Subject: [PATCH 397/772] Changelog. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bfa2cbb66f..7d9200d212b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ New Modules: Other notable changes: * example callback plugin added for hipchat +* added example inventory plugin for vcenter/vsphere * added example inventory plugin for doing really trivial inventory from SSH config files * libvirt module now supports destroyed and paused as states * s3 module can specify metadata From 5aba903dcfd074e057ec570ae12be324cf9bd360 Mon Sep 17 00:00:00 2001 From: Drew Stokes Date: Tue, 28 Jan 2014 15:49:18 -0800 Subject: [PATCH 398/772] add registry option to npm module --- library/packaging/npm | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/library/packaging/npm b/library/packaging/npm index d241f61deb2..d61aa673035 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -56,6 +56,10 @@ options: required: false choices: [ "yes", "no" ] default: no + registry: + description: + - The registry to install modules from. + required: false state: description: - The state of the node.js library @@ -77,6 +81,9 @@ description: Install "coffee-script" node.js package globally. description: Remove the globally package "coffee-script". - npm: name=coffee-script global=yes state=absent +description: Install "coffee-script" node.js package from custom registry. +- npm: name=coffee-script registry=http://registry.mysite.com + description: Install packages based on package.json. - npm: path=/app/location @@ -101,6 +108,7 @@ class Npm(object): self.name = kwargs['name'] self.version = kwargs['version'] self.path = kwargs['path'] + self.registry = kwargs['registry'] self.production = kwargs['production'] if kwargs['executable']: @@ -123,6 +131,9 @@ class Npm(object): cmd.append('--production') if self.name: cmd.append(self.name_version) + if self.registry: + cmd.append('--registry') + cmd.append(self.registry) #If path is specified, cd into that path and run the command. cwd = None @@ -182,6 +193,7 @@ def main(): version=dict(default=None), production=dict(default='no', type='bool'), executable=dict(default=None), + registry=dict(default=None), state=dict(default='present', choices=['present', 'absent', 'latest']) ) arg_spec['global'] = dict(default='no', type='bool') @@ -196,6 +208,7 @@ def main(): glbl = module.params['global'] production = module.params['production'] executable = module.params['executable'] + registry = module.params['registry'] state = module.params['state'] if not path and not glbl: @@ -204,7 +217,7 @@ def main(): module.fail_json(msg='uninstalling a package is only available for named packages') npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \ - executable=executable) + executable=executable, registry=registry) changed = False if state == 'present': From 16ca4c2195868d6a9efc267eea4757af4f003c00 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:35:43 -0400 Subject: [PATCH 399/772] Update version_added --- library/packaging/npm | 1 + 1 file changed, 1 insertion(+) diff --git a/library/packaging/npm b/library/packaging/npm index d61aa673035..1157d8f6636 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -60,6 +60,7 @@ options: description: - The registry to install modules from. required: false + version_added: "1.6" state: description: - The state of the node.js library From 42d6fc2b5411fc587beb827a33bd96e450f05255 Mon Sep 17 00:00:00 2001 From: Matt Hite Date: Fri, 6 Dec 2013 15:01:27 -0800 Subject: [PATCH 400/772] New bigip_facts module --- library/net_infrastructure/bigip_facts | 1670 ++++++++++++++++++++++++ 1 file changed, 1670 insertions(+) create mode 100644 library/net_infrastructure/bigip_facts diff --git a/library/net_infrastructure/bigip_facts b/library/net_infrastructure/bigip_facts new file mode 100644 index 00000000000..9be820dc764 --- /dev/null +++ b/library/net_infrastructure/bigip_facts @@ -0,0 +1,1670 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Matt Hite +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: bigip_facts +short_description: "Collect facts from F5 BIG-IP devices" +description: + - "Collect facts from F5 BIG-IP devices via iControl SOAP API" +version_added: "1.5" +author: Matt Hite +notes: + - "Requires BIG-IP software version >= 11.4" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Tested with manager and above account privilege level" + +requirements: + - bigsuds +options: + server: + description: + - BIG-IP host + required: true + default: null + choices: [] + aliases: [] + user: + description: + - BIG-IP username + required: true + default: null + choices: [] + aliases: [] + password: + description: + - BIG-IP password + required: true + default: null + choices: [] + aliases: [] + session: + description: + - BIG-IP session support; may be useful to avoid concurrency + issues in certain circumstances. + required: false + default: true + choices: [] + aliases: [] + include: + description: + - Fact category or list of categories to collect + required: true + default: null + choices: ['address_class', 'certificate', 'client_ssl_profile', + 'device_group', 'interface', 'key', 'node', 'pool', 'rule', + 'self_ip', 'software', 'system_info', 'traffic_group', + 'trunk', 'virtual_address', 'virtual_server', 'vlan'] + aliases: [] + filter: + description: + - Shell-style glob matching string used to filter fact keys. Not + applicable for software and system_info fact categories. + required: false + default: null + choices: [] + aliases: [] +''' + +EXAMPLES = ''' + +## playbook task examples: + +--- +# file bigip-test.yml +# ... +- hosts: bigip-test + tasks: + - name: Collect BIG-IP facts + local_action: > + bigip_facts + server=lb.mydomain.com + user=admin + password=mysecret + include=interface,vlan + +''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +import fnmatch +import traceback +import re +from suds import MethodNotFound + +# =========================================== +# bigip_facts module specific support methods. +# + +class F5(object): + """F5 iControl class. + + F5 BIG-IP iControl API class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, host, user, password, session=False): + self.api = bigsuds.BIGIP(hostname=host, username=user, password=password) + if session: + self.start_session() + + def start_session(self): + self.api = self.api.with_session_id() + + def get_api(self): + return self.api + + def set_recursive_query_state(self, state): + self.api.System.Session.set_recursive_query_state(state) + + def get_recursive_query_state(self): + return self.api.System.Session.get_recursive_query_state() + + def enable_recursive_query_state(self): + self.set_recursive_query_state('STATE_ENABLED') + + def disable_recursive_query_state(self): + self.set_recursive_query_state('STATE_DISABLED') + + def set_active_folder(self, folder): + self.api.System.Session.set_active_folder(folder=folder) + + def get_active_folder(self): + return self.api.System.Session.get_active_folder() + + +class Interfaces(object): + """Interfaces class. + + F5 BIG-IP interfaces class. + + Attributes: + api: iControl API instance. + interfaces: A list of BIG-IP interface names. + """ + + def __init__(self, api, regex=None): + self.api = api + self.interfaces = api.Networking.Interfaces.get_list() + if regex: + re_filter = re.compile(regex) + self.interfaces = filter(re_filter.search, self.interfaces) + + def get_list(self): + return self.interfaces + + def get_active_media(self): + return self.api.Networking.Interfaces.get_active_media(self.interfaces) + + def get_actual_flow_control(self): + return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces) + + def get_bundle_state(self): + return self.api.Networking.Interfaces.get_bundle_state(self.interfaces) + + def get_description(self): + return self.api.Networking.Interfaces.get_description(self.interfaces) + + def get_dual_media_state(self): + return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces) + + def get_enabled_state(self): + return self.api.Networking.Interfaces.get_enabled_state(self.interfaces) + + def get_if_index(self): + return self.api.Networking.Interfaces.get_if_index(self.interfaces) + + def get_learning_mode(self): + return self.api.Networking.Interfaces.get_learning_mode(self.interfaces) + + def get_lldp_admin_status(self): + return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces) + + def get_lldp_tlvmap(self): + return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces) + + def get_mac_address(self): + return self.api.Networking.Interfaces.get_mac_address(self.interfaces) + + def get_media(self): + return self.api.Networking.Interfaces.get_media(self.interfaces) + + def get_media_option(self): + return self.api.Networking.Interfaces.get_media_option(self.interfaces) + + def get_media_option_sfp(self): + return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces) + + def get_media_sfp(self): + return self.api.Networking.Interfaces.get_media_sfp(self.interfaces) + + def get_media_speed(self): + return self.api.Networking.Interfaces.get_media_speed(self.interfaces) + + def get_media_status(self): + return self.api.Networking.Interfaces.get_media_status(self.interfaces) + + def get_mtu(self): + return self.api.Networking.Interfaces.get_mtu(self.interfaces) + + def get_phy_master_slave_mode(self): + return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces) + + def get_prefer_sfp_state(self): + return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces) + + def get_flow_control(self): + return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces) + + def get_sflow_poll_interval(self): + return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces) + + def get_sflow_poll_interval_global(self): + return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces) + + def get_sfp_media_state(self): + return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces) + + def get_stp_active_edge_port_state(self): + return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces) + + def get_stp_enabled_state(self): + return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces) + + def get_stp_link_type(self): + return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces) + + def get_stp_protocol_detection_reset_state(self): + return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces) + + +class SelfIPs(object): + """Self IPs class. + + F5 BIG-IP Self IPs class. + + Attributes: + api: iControl API instance. + self_ips: List of self IPs. + """ + + def __init__(self, api, regex=None): + self.api = api + self.self_ips = api.Networking.SelfIPV2.get_list() + if regex: + re_filter = re.compile(regex) + self.self_ips = filter(re_filter.search, self.self_ips) + + def get_list(self): + return self.self_ips + + def get_address(self): + return self.api.Networking.SelfIPV2.get_address(self.self_ips) + + def get_allow_access_list(self): + return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips) + + def get_description(self): + return self.api.Networking.SelfIPV2.get_description(self.self_ips) + + def get_enforced_firewall_policy(self): + return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips) + + def get_floating_state(self): + return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips) + + def get_fw_rule(self): + return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips) + + def get_netmask(self): + return self.api.Networking.SelfIPV2.get_netmask(self.self_ips) + + def get_staged_firewall_policy(self): + return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips) + + def get_traffic_group(self): + return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips) + + def get_vlan(self): + return self.api.Networking.SelfIPV2.get_vlan(self.self_ips) + + def get_is_traffic_group_inherited(self): + return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips) + + +class Trunks(object): + """Trunks class. + + F5 BIG-IP trunks class. + + Attributes: + api: iControl API instance. + trunks: List of trunks. + """ + + def __init__(self, api, regex=None): + self.api = api + self.trunks = api.Networking.Trunk.get_list() + if regex: + re_filter = re.compile(regex) + self.trunks = filter(re_filter.search, self.trunks) + + def get_list(self): + return self.trunks + + def get_active_lacp_state(self): + return self.api.Networking.Trunk.get_active_lacp_state(self.trunks) + + def get_configured_member_count(self): + return self.api.Networking.Trunk.get_configured_member_count(self.trunks) + + def get_description(self): + return self.api.Networking.Trunk.get_description(self.trunks) + + def get_distribution_hash_option(self): + return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks) + + def get_interface(self): + return self.api.Networking.Trunk.get_interface(self.trunks) + + def get_lacp_enabled_state(self): + return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks) + + def get_lacp_timeout_option(self): + return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks) + + def get_link_selection_policy(self): + return self.api.Networking.Trunk.get_link_selection_policy(self.trunks) + + def get_media_speed(self): + return self.api.Networking.Trunk.get_media_speed(self.trunks) + + def get_media_status(self): + return self.api.Networking.Trunk.get_media_status(self.trunks) + + def get_operational_member_count(self): + return self.api.Networking.Trunk.get_operational_member_count(self.trunks) + + def get_stp_enabled_state(self): + return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks) + + def get_stp_protocol_detection_reset_state(self): + return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks) + + +class Vlans(object): + """Vlans class. + + F5 BIG-IP Vlans class. + + Attributes: + api: iControl API instance. + vlans: List of VLANs. + """ + + def __init__(self, api, regex=None): + self.api = api + self.vlans = api.Networking.VLAN.get_list() + if regex: + re_filter = re.compile(regex) + self.vlans = filter(re_filter.search, self.vlans) + + def get_list(self): + return self.vlans + + def get_auto_lasthop(self): + return self.api.Networking.VLAN.get_auto_lasthop(self.vlans) + + def get_cmp_hash_algorithm(self): + return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans) + + def get_description(self): + return self.api.Networking.VLAN.get_description(self.vlans) + + def get_dynamic_forwarding(self): + return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans) + + def get_failsafe_action(self): + return self.api.Networking.VLAN.get_failsafe_action(self.vlans) + + def get_failsafe_state(self): + return self.api.Networking.VLAN.get_failsafe_state(self.vlans) + + def get_failsafe_timeout(self): + return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans) + + def get_if_index(self): + return self.api.Networking.VLAN.get_if_index(self.vlans) + + def get_learning_mode(self): + return self.api.Networking.VLAN.get_learning_mode(self.vlans) + + def get_mac_masquerade_address(self): + return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans) + + def get_member(self): + return self.api.Networking.VLAN.get_member(self.vlans) + + def get_mtu(self): + return self.api.Networking.VLAN.get_mtu(self.vlans) + + def get_sflow_poll_interval(self): + return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans) + + def get_sflow_poll_interval_global(self): + return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans) + + def get_sflow_sampling_rate(self): + return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans) + + def get_sflow_sampling_rate_global(self): + return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans) + + def get_source_check_state(self): + return self.api.Networking.VLAN.get_source_check_state(self.vlans) + + def get_true_mac_address(self): + return self.api.Networking.VLAN.get_true_mac_address(self.vlans) + + def get_vlan_id(self): + return self.api.Networking.VLAN.get_vlan_id(self.vlans) + + +class Software(object): + """Software class. + + F5 BIG-IP software class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_all_software_status(self): + return self.api.System.SoftwareManagement.get_all_software_status() + + +class VirtualServers(object): + """Virtual servers class. + + F5 BIG-IP virtual servers class. + + Attributes: + api: iControl API instance. + virtual_servers: List of virtual servers. + """ + + def __init__(self, api, regex=None): + self.api = api + self.virtual_servers = api.LocalLB.VirtualServer.get_list() + if regex: + re_filter = re.compile(regex) + self.virtual_servers = filter(re_filter.search, self.virtual_servers) + + def get_list(self): + return self.virtual_servers + + def get_actual_hardware_acceleration(self): + return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers) + + def get_authentication_profile(self): + return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers) + + def get_auto_lasthop(self): + return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers) + + def get_bw_controller_policy(self): + return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers) + + def get_clone_pool(self): + return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers) + + def get_cmp_enable_mode(self): + return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers) + + def get_connection_limit(self): + return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers) + + def get_connection_mirror_state(self): + return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers) + + def get_default_pool_name(self): + return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers) + + def get_description(self): + return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers) + + def get_destination(self): + return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers) + + def get_enabled_state(self): + return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers) + + def get_enforced_firewall_policy(self): + return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers) + + def get_fallback_persistence_profile(self): + return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers) + + def get_fw_rule(self): + return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers) + + def get_gtm_score(self): + return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers) + + def get_last_hop_pool(self): + return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers) + + def get_nat64_state(self): + return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers) + + def get_object_status(self): + return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers) + + def get_persistence_profile(self): + return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers) + + def get_profile(self): + return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers) + + def get_protocol(self): + return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers) + + def get_rate_class(self): + return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers) + + def get_rate_limit(self): + return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers) + + def get_rate_limit_destination_mask(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers) + + def get_rate_limit_mode(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers) + + def get_rate_limit_source_mask(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers) + + def get_related_rule(self): + return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers) + + def get_rule(self): + return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers) + + def get_security_log_profile(self): + return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers) + + def get_snat_pool(self): + return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers) + + def get_snat_type(self): + return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers) + + def get_source_address(self): + return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers) + + def get_source_address_translation_lsn_pool(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers) + + def get_source_address_translation_snat_pool(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers) + + def get_source_address_translation_type(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers) + + def get_source_port_behavior(self): + return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers) + + def get_staged_firewall_policy(self): + return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers) + + def get_translate_address_state(self): + return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers) + + def get_translate_port_state(self): + return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers) + + def get_type(self): + return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers) + + def get_vlan(self): + return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers) + + def get_wildmask(self): + return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers) + + +class Pools(object): + """Pools class. + + F5 BIG-IP pools class. + + Attributes: + api: iControl API instance. + pool_names: List of pool names. + """ + + def __init__(self, api, regex=None): + self.api = api + self.pool_names = api.LocalLB.Pool.get_list() + if regex: + re_filter = re.compile(regex) + self.pool_names = filter(re_filter.search, self.pool_names) + + def get_list(self): + return self.pool_names + + def get_action_on_service_down(self): + return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names) + + def get_active_member_count(self): + return self.api.LocalLB.Pool.get_active_member_count(self.pool_names) + + def get_aggregate_dynamic_ratio(self): + return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names) + + def get_allow_nat_state(self): + return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names) + + def get_allow_snat_state(self): + return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names) + + def get_client_ip_tos(self): + return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names) + + def get_client_link_qos(self): + return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names) + + def get_description(self): + return self.api.LocalLB.Pool.get_description(self.pool_names) + + def get_gateway_failsafe_device(self): + return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names) + + def get_ignore_persisted_weight_state(self): + return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names) + + def get_lb_method(self): + return self.api.LocalLB.Pool.get_lb_method(self.pool_names) + + def get_member(self): + return self.api.LocalLB.Pool.get_member_v2(self.pool_names) + + def get_minimum_active_member(self): + return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names) + + def get_minimum_up_member(self): + return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names) + + def get_minimum_up_member_action(self): + return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names) + + def get_minimum_up_member_enabled_state(self): + return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names) + + def get_monitor_association(self): + return self.api.LocalLB.Pool.get_monitor_association(self.pool_names) + + def get_monitor_instance(self): + return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names) + + def get_object_status(self): + return self.api.LocalLB.Pool.get_object_status(self.pool_names) + + def get_profile(self): + return self.api.LocalLB.Pool.get_profile(self.pool_names) + + def get_queue_depth_limit(self): + return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names) + + def get_queue_on_connection_limit_state(self): + return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names) + + def get_queue_time_limit(self): + return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names) + + def get_reselect_tries(self): + return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names) + + def get_server_ip_tos(self): + return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names) + + def get_server_link_qos(self): + return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names) + + def get_simple_timeout(self): + return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names) + + def get_slow_ramp_time(self): + return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names) + + +class Devices(object): + """Devices class. + + F5 BIG-IP devices class. + + Attributes: + api: iControl API instance. + devices: List of devices. + """ + + def __init__(self, api, regex=None): + self.api = api + self.devices = api.Management.Device.get_list() + if regex: + re_filter = re.compile(regex) + self.devices = filter(re_filter.search, self.devices) + + def get_list(self): + return self.devices + + def get_active_modules(self): + return self.api.Management.Device.get_active_modules(self.devices) + + def get_base_mac_address(self): + return self.api.Management.Device.get_base_mac_address(self.devices) + + def get_blade_addresses(self): + return self.api.Management.Device.get_blade_addresses(self.devices) + + def get_build(self): + return self.api.Management.Device.get_build(self.devices) + + def get_chassis_id(self): + return self.api.Management.Device.get_chassis_id(self.devices) + + def get_chassis_type(self): + return self.api.Management.Device.get_chassis_type(self.devices) + + def get_comment(self): + return self.api.Management.Device.get_comment(self.devices) + + def get_configsync_address(self): + return self.api.Management.Device.get_configsync_address(self.devices) + + def get_contact(self): + return self.api.Management.Device.get_contact(self.devices) + + def get_description(self): + return self.api.Management.Device.get_description(self.devices) + + def get_edition(self): + return self.api.Management.Device.get_edition(self.devices) + + def get_failover_state(self): + return self.api.Management.Device.get_failover_state(self.devices) + + def get_local_device(self): + return self.api.Management.Device.get_local_device() + + def get_hostname(self): + return self.api.Management.Device.get_hostname(self.devices) + + def get_inactive_modules(self): + return self.api.Management.Device.get_inactive_modules(self.devices) + + def get_location(self): + return self.api.Management.Device.get_location(self.devices) + + def get_management_address(self): + return self.api.Management.Device.get_management_address(self.devices) + + def get_marketing_name(self): + return self.api.Management.Device.get_marketing_name(self.devices) + + def get_multicast_address(self): + return self.api.Management.Device.get_multicast_address(self.devices) + + def get_optional_modules(self): + return self.api.Management.Device.get_optional_modules(self.devices) + + def get_platform_id(self): + return self.api.Management.Device.get_platform_id(self.devices) + + def get_primary_mirror_address(self): + return self.api.Management.Device.get_primary_mirror_address(self.devices) + + def get_product(self): + return self.api.Management.Device.get_product(self.devices) + + def get_secondary_mirror_address(self): + return self.api.Management.Device.get_secondary_mirror_address(self.devices) + + def get_software_version(self): + return self.api.Management.Device.get_software_version(self.devices) + + def get_timelimited_modules(self): + return self.api.Management.Device.get_timelimited_modules(self.devices) + + def get_timezone(self): + return self.api.Management.Device.get_timezone(self.devices) + + def get_unicast_addresses(self): + return self.api.Management.Device.get_unicast_addresses(self.devices) + + +class DeviceGroups(object): + """Device groups class. + + F5 BIG-IP device groups class. + + Attributes: + api: iControl API instance. + device_groups: List of device groups. + """ + + def __init__(self, api, regex=None): + self.api = api + self.device_groups = api.Management.DeviceGroup.get_list() + if regex: + re_filter = re.compile(regex) + self.device_groups = filter(re_filter.search, self.device_groups) + + def get_list(self): + return self.device_groups + + def get_all_preferred_active(self): + return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups) + + def get_autosync_enabled_state(self): + return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups) + + def get_description(self): + return self.api.Management.DeviceGroup.get_description(self.device_groups) + + def get_device(self): + return self.api.Management.DeviceGroup.get_device(self.device_groups) + + def get_full_load_on_sync_state(self): + return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups) + + def get_incremental_config_sync_size_maximum(self): + return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups) + + def get_network_failover_enabled_state(self): + return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups) + + def get_sync_status(self): + return self.api.Management.DeviceGroup.get_sync_status(self.device_groups) + + def get_type(self): + return self.api.Management.DeviceGroup.get_type(self.device_groups) + + +class TrafficGroups(object): + """Traffic groups class. + + F5 BIG-IP traffic groups class. + + Attributes: + api: iControl API instance. + traffic_groups: List of traffic groups. + """ + + def __init__(self, api, regex=None): + self.api = api + self.traffic_groups = api.Management.TrafficGroup.get_list() + if regex: + re_filter = re.compile(regex) + self.traffic_groups = filter(re_filter.search, self.traffic_groups) + + def get_list(self): + return self.traffic_groups + + def get_auto_failback_enabled_state(self): + return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups) + + def get_auto_failback_time(self): + return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups) + + def get_default_device(self): + return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups) + + def get_description(self): + return self.api.Management.TrafficGroup.get_description(self.traffic_groups) + + def get_ha_load_factor(self): + return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups) + + def get_ha_order(self): + return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups) + + def get_is_floating(self): + return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups) + + def get_mac_masquerade_address(self): + return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups) + + def get_unit_id(self): + return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups) + + +class Rules(object): + """Rules class. + + F5 BIG-IP iRules class. + + Attributes: + api: iControl API instance. + rules: List of iRules. + """ + + def __init__(self, api, regex=None): + self.api = api + self.rules = api.LocalLB.Rule.get_list() + if regex: + re_filter = re.compile(regex) + self.traffic_groups = filter(re_filter.search, self.rules) + + def get_list(self): + return self.rules + + def get_description(self): + return self.api.LocalLB.Rule.get_description(rule_names=self.rules) + + def get_ignore_vertification(self): + return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules) + + def get_verification_status(self): + return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules) + + def get_definition(self): + return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)] + +class Nodes(object): + """Nodes class. + + F5 BIG-IP nodes class. + + Attributes: + api: iControl API instance. + nodes: List of nodes. + """ + + def __init__(self, api, regex=None): + self.api = api + self.nodes = api.LocalLB.NodeAddressV2.get_list() + if regex: + re_filter = re.compile(regex) + self.nodes = filter(re_filter.search, self.nodes) + + def get_list(self): + return self.nodes + + def get_address(self): + return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes) + + def get_connection_limit(self): + return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes) + + def get_description(self): + return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes) + + def get_dynamic_ratio(self): + return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes) + + def get_monitor_instance(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes) + + def get_monitor_rule(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes) + + def get_monitor_status(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes) + + def get_object_status(self): + return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes) + + def get_rate_limit(self): + return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes) + + def get_ratio(self): + return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes) + + def get_session_status(self): + return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes) + + +class VirtualAddresses(object): + """Virtual addresses class. + + F5 BIG-IP virtual addresses class. + + Attributes: + api: iControl API instance. + virtual_addresses: List of virtual addresses. + """ + + def __init__(self, api, regex=None): + self.api = api + self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list() + if regex: + re_filter = re.compile(regex) + self.virtual_addresses = filter(re_filter.search, self.virtual_addresses) + + def get_list(self): + return self.virtual_addresses + + def get_address(self): + return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses) + + def get_arp_state(self): + return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses) + + def get_auto_delete_state(self): + return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses) + + def get_connection_limit(self): + return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses) + + def get_description(self): + return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses) + + def get_enabled_state(self): + return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses) + + def get_icmp_echo_state(self): + return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses) + + def get_is_floating_state(self): + return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses) + + def get_netmask(self): + return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses) + + def get_object_status(self): + return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses) + + def get_route_advertisement_state(self): + return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses) + + def get_traffic_group(self): + return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses) + + +class AddressClasses(object): + """Address group/class class. + + F5 BIG-IP address group/class class. + + Attributes: + api: iControl API instance. + address_classes: List of address classes. + """ + + def __init__(self, api, regex=None): + self.api = api + self.address_classes = api.LocalLB.Class.get_address_class_list() + if regex: + re_filter = re.compile(regex) + self.address_classes = filter(re_filter.search, self.address_classes) + + def get_list(self): + return self.address_classes + + def get_address_class(self): + key = self.api.LocalLB.Class.get_address_class(self.address_classes) + value = self.api.LocalLB.Class.get_address_class_member_data_value(key) + result = map(zip, [x['members'] for x in key], value) + return result + + def get_description(self): + return self.api.LocalLB.Class.get_description(self.address_classes) + + +class Certificates(object): + """Certificates class. + + F5 BIG-IP certificates class. + + Attributes: + api: iControl API instance. + certificates: List of certificate identifiers. + certificate_list: List of certificate information structures. + """ + + def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): + self.api = api + self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode) + self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list] + if regex: + re_filter = re.compile(regex) + self.certificates = filter(re_filter.search, self.certificates) + self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates] + + def get_list(self): + return self.certificates + + def get_certificate_list(self): + return self.certificate_list + + +class Keys(object): + """Keys class. + + F5 BIG-IP keys class. + + Attributes: + api: iControl API instance. + keys: List of key identifiers. + key_list: List of key information structures. + """ + + def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): + self.api = api + self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode) + self.keys = [x['key_info']['id'] for x in self.key_list] + if regex: + re_filter = re.compile(regex) + self.keys = filter(re_filter.search, self.keys) + self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys] + + def get_list(self): + return self.keys + + def get_key_list(self): + return self.key_list + + +class ProfileClientSSL(object): + """Client SSL profiles class. + + F5 BIG-IP client SSL profiles class. + + Attributes: + api: iControl API instance. + profiles: List of client SSL profiles. + """ + + def __init__(self, api, regex=None): + self.api = api + self.profiles = api.LocalLB.ProfileClientSSL.get_list() + if regex: + re_filter = re.compile(regex) + self.profiles = filter(re_filter.search, self.profiles) + + def get_list(self): + return self.profiles + + def get_alert_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles) + + def get_allow_nonssl_state(self): + return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles) + + def get_authenticate_depth(self): + return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles) + + def get_authenticate_once_state(self): + return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles) + + def get_ca_file(self): + return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles) + + def get_cache_size(self): + return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles) + + def get_cache_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles) + + def get_certificate_file(self): + return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles) + + def get_chain_file(self): + return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles) + + def get_cipher_list(self): + return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles) + + def get_client_certificate_ca_file(self): + return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles) + + def get_crl_file(self): + return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles) + + def get_default_profile(self): + return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles) + + def get_description(self): + return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles) + + def get_forward_proxy_ca_certificate_file(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles) + + def get_forward_proxy_ca_key_file(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles) + + def get_forward_proxy_ca_passphrase(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles) + + def get_forward_proxy_certificate_extension_include(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles) + + def get_forward_proxy_certificate_lifespan(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles) + + def get_forward_proxy_enabled_state(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles) + + def get_forward_proxy_lookup_by_ipaddr_port_state(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles) + + def get_handshake_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles) + + def get_key_file(self): + return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles) + + def get_modssl_emulation_state(self): + return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles) + + def get_passphrase(self): + return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles) + + def get_peer_certification_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles) + + def get_profile_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles) + + def get_renegotiation_maximum_record_delay(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles) + + def get_renegotiation_period(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles) + + def get_renegotiation_state(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles) + + def get_renegotiation_throughput(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles) + + def get_retain_certificate_state(self): + return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles) + + def get_secure_renegotiation_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles) + + def get_server_name(self): + return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles) + + def get_session_ticket_state(self): + return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles) + + def get_sni_default_state(self): + return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles) + + def get_sni_require_state(self): + return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles) + + def get_ssl_option(self): + return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles) + + def get_strict_resume_state(self): + return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles) + + def get_unclean_shutdown_state(self): + return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles) + + def get_is_base_profile(self): + return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles) + + def get_is_system_profile(self): + return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles) + + +class SystemInfo(object): + """System information class. + + F5 BIG-IP system information class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_base_mac_address(self): + return self.api.System.SystemInfo.get_base_mac_address() + + def get_blade_temperature(self): + return self.api.System.SystemInfo.get_blade_temperature() + + def get_chassis_slot_information(self): + return self.api.System.SystemInfo.get_chassis_slot_information() + + def get_globally_unique_identifier(self): + return self.api.System.SystemInfo.get_globally_unique_identifier() + + def get_group_id(self): + return self.api.System.SystemInfo.get_group_id() + + def get_hardware_information(self): + return self.api.System.SystemInfo.get_hardware_information() + + def get_marketing_name(self): + return self.api.System.SystemInfo.get_marketing_name() + + def get_product_information(self): + return self.api.System.SystemInfo.get_product_information() + + def get_pva_version(self): + return self.api.System.SystemInfo.get_pva_version() + + def get_system_id(self): + return self.api.System.SystemInfo.get_system_id() + + def get_system_information(self): + return self.api.System.SystemInfo.get_system_information() + + def get_time(self): + return self.api.System.SystemInfo.get_time() + + def get_time_zone(self): + return self.api.System.SystemInfo.get_time_zone() + + def get_uptime(self): + return self.api.System.SystemInfo.get_uptime() + + +def generate_dict(api_obj, fields): + result_dict = {} + lists = [] + supported_fields = [] + if api_obj.get_list(): + for field in fields: + try: + api_response = getattr(api_obj, "get_" + field)() + except MethodNotFound: + pass + else: + lists.append(api_response) + supported_fields.append(field) + for i, j in enumerate(api_obj.get_list()): + temp = {} + temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)]) + result_dict[j] = temp + return result_dict + +def generate_simple_dict(api_obj, fields): + result_dict = {} + for field in fields: + try: + api_response = getattr(api_obj, "get_" + field)() + except MethodNotFound: + pass + else: + result_dict[field] = api_response + return result_dict + +def generate_interface_dict(f5, regex): + interfaces = Interfaces(f5.get_api(), regex) + fields = ['active_media', 'actual_flow_control', 'bundle_state', + 'description', 'dual_media_state', 'enabled_state', 'if_index', + 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap', + 'mac_address', 'media', 'media_option', 'media_option_sfp', + 'media_sfp', 'media_speed', 'media_status', 'mtu', + 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control', + 'sflow_poll_interval', 'sflow_poll_interval_global', + 'sfp_media_state', 'stp_active_edge_port_state', + 'stp_enabled_state', 'stp_link_type', + 'stp_protocol_detection_reset_state'] + return generate_dict(interfaces, fields) + +def generate_self_ip_dict(f5, regex): + self_ips = SelfIPs(f5.get_api(), regex) + fields = ['address', 'allow_access_list', 'description', + 'enforced_firewall_policy', 'floating_state', 'fw_rule', + 'netmask', 'staged_firewall_policy', 'traffic_group', + 'vlan', 'is_traffic_group_inherited'] + return generate_dict(self_ips, fields) + +def generate_trunk_dict(f5, regex): + trunks = Trunks(f5.get_api(), regex) + fields = ['active_lacp_state', 'configured_member_count', 'description', + 'distribution_hash_option', 'interface', 'lacp_enabled_state', + 'lacp_timeout_option', 'link_selection_policy', 'media_speed', + 'media_status', 'operational_member_count', 'stp_enabled_state', + 'stp_protocol_detection_reset_state'] + return generate_dict(trunks, fields) + +def generate_vlan_dict(f5, regex): + vlans = Vlans(f5.get_api(), regex) + fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description', + 'dynamic_forwarding', 'failsafe_action', 'failsafe_state', + 'failsafe_timeout', 'if_index', 'learning_mode', + 'mac_masquerade_address', 'member', 'mtu', + 'sflow_poll_interval', 'sflow_poll_interval_global', + 'sflow_sampling_rate', 'sflow_sampling_rate_global', + 'source_check_state', 'true_mac_address', 'vlan_id'] + return generate_dict(vlans, fields) + +def generate_vs_dict(f5, regex): + virtual_servers = VirtualServers(f5.get_api(), regex) + fields = ['actual_hardware_acceleration', 'authentication_profile', + 'auto_lasthop', 'bw_controller_policy', 'clone_pool', + 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state', + 'default_pool_name', 'description', 'destination', + 'enabled_state', 'enforced_firewall_policy', + 'fallback_persistence_profile', 'fw_rule', 'gtm_score', + 'last_hop_pool', 'nat64_state', 'object_status', + 'persistence_profile', 'profile', 'protocol', + 'rate_class', 'rate_limit', 'rate_limit_destination_mask', + 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule', + 'rule', 'security_log_profile', 'snat_pool', 'snat_type', + 'source_address', 'source_address_translation_lsn_pool', + 'source_address_translation_snat_pool', + 'source_address_translation_type', 'source_port_behavior', + 'staged_firewall_policy', 'translate_address_state', + 'translate_port_state', 'type', 'vlan', 'wildmask'] + return generate_dict(virtual_servers, fields) + +def generate_pool_dict(f5, regex): + pools = Pools(f5.get_api(), regex) + fields = ['action_on_service_down', 'active_member_count', + 'aggregate_dynamic_ratio', 'allow_nat_state', + 'allow_snat_state', 'client_ip_tos', 'client_link_qos', + 'description', 'gateway_failsafe_device', + 'ignore_persisted_weight_state', 'lb_method', 'member', + 'minimum_active_member', 'minimum_up_member', + 'minimum_up_member_action', 'minimum_up_member_enabled_state', + 'monitor_association', 'monitor_instance', 'object_status', + 'profile', 'queue_depth_limit', + 'queue_on_connection_limit_state', 'queue_time_limit', + 'reselect_tries', 'server_ip_tos', 'server_link_qos', + 'simple_timeout', 'slow_ramp_time'] + return generate_dict(pools, fields) + +def generate_device_dict(f5, regex): + devices = Devices(f5.get_api(), regex) + fields = ['active_modules', 'base_mac_address', 'blade_addresses', + 'build', 'chassis_id', 'chassis_type', 'comment', + 'configsync_address', 'contact', 'description', 'edition', + 'failover_state', 'hostname', 'inactive_modules', 'location', + 'management_address', 'marketing_name', 'multicast_address', + 'optional_modules', 'platform_id', 'primary_mirror_address', + 'product', 'secondary_mirror_address', 'software_version', + 'timelimited_modules', 'timezone', 'unicast_addresses'] + return generate_dict(devices, fields) + +def generate_device_group_dict(f5, regex): + device_groups = DeviceGroups(f5.get_api(), regex) + fields = ['all_preferred_active', 'autosync_enabled_state','description', + 'device', 'full_load_on_sync_state', + 'incremental_config_sync_size_maximum', + 'network_failover_enabled_state', 'sync_status', 'type'] + return generate_dict(device_groups, fields) + +def generate_traffic_group_dict(f5, regex): + traffic_groups = TrafficGroups(f5.get_api(), regex) + fields = ['auto_failback_enabled_state', 'auto_failback_time', + 'default_device', 'description', 'ha_load_factor', + 'ha_order', 'is_floating', 'mac_masquerade_address', + 'unit_id'] + return generate_dict(traffic_groups, fields) + +def generate_rule_dict(f5, regex): + rules = Rules(f5.get_api(), regex) + fields = ['definition', 'description', 'ignore_vertification', + 'verification_status'] + return generate_dict(rules, fields) + +def generate_node_dict(f5, regex): + nodes = Nodes(f5.get_api(), regex) + fields = ['address', 'connection_limit', 'description', 'dynamic_ratio', + 'monitor_instance', 'monitor_rule', 'monitor_status', + 'object_status', 'rate_limit', 'ratio', 'session_status'] + return generate_dict(nodes, fields) + +def generate_virtual_address_dict(f5, regex): + virtual_addresses = VirtualAddresses(f5.get_api(), regex) + fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit', + 'description', 'enabled_state', 'icmp_echo_state', + 'is_floating_state', 'netmask', 'object_status', + 'route_advertisement_state', 'traffic_group'] + return generate_dict(virtual_addresses, fields) + +def generate_address_class_dict(f5, regex): + address_classes = AddressClasses(f5.get_api(), regex) + fields = ['address_class', 'description'] + return generate_dict(address_classes, fields) + +def generate_certificate_dict(f5, regex): + certificates = Certificates(f5.get_api(), regex) + return dict(zip(certificates.get_list(), certificates.get_certificate_list())) + +def generate_key_dict(f5, regex): + keys = Keys(f5.get_api(), regex) + return dict(zip(keys.get_list(), keys.get_key_list())) + +def generate_client_ssl_profile_dict(f5, regex): + profiles = ProfileClientSSL(f5.get_api(), regex) + fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth', + 'authenticate_once_state', 'ca_file', 'cache_size', + 'cache_timeout', 'certificate_file', 'chain_file', + 'cipher_list', 'client_certificate_ca_file', 'crl_file', + 'default_profile', 'description', + 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file', + 'forward_proxy_ca_passphrase', + 'forward_proxy_certificate_extension_include', + 'forward_proxy_certificate_lifespan', + 'forward_proxy_enabled_state', + 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout', + 'key_file', 'modssl_emulation_state', 'passphrase', + 'peer_certification_mode', 'profile_mode', + 'renegotiation_maximum_record_delay', 'renegotiation_period', + 'renegotiation_state', 'renegotiation_throughput', + 'retain_certificate_state', 'secure_renegotiation_mode', + 'server_name', 'session_ticket_state', 'sni_default_state', + 'sni_require_state', 'ssl_option', 'strict_resume_state', + 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile'] + return generate_dict(profiles, fields) + +def generate_system_info_dict(f5): + system_info = SystemInfo(f5.get_api()) + fields = ['base_mac_address', + 'blade_temperature', 'chassis_slot_information', + 'globally_unique_identifier', 'group_id', + 'hardware_information', + 'marketing_name', + 'product_information', 'pva_version', 'system_id', + 'system_information', 'time', + 'time_zone', 'uptime'] + return generate_simple_dict(system_info, fields) + +def generate_software_list(f5): + software = Software(f5.get_api()) + software_list = software.get_all_software_status() + return software_list + + +def main(): + module = AnsibleModule( + argument_spec = dict( + server = dict(type='str', required=True), + user = dict(type='str', required=True), + password = dict(type='str', required=True), + session = dict(type='bool', default=False), + include = dict(type='list', required=True), + filter = dict(type='str', required=False), + ) + ) + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + server = module.params['server'] + user = module.params['user'] + password = module.params['password'] + session = module.params['session'] + fact_filter = module.params['filter'] + if fact_filter: + regex = fnmatch.translate(fact_filter) + else: + regex = None + include = map(lambda x: x.lower(), module.params['include']) + valid_includes = ('address_class', 'certificate', 'client_ssl_profile', + 'device_group', 'interface', 'key', 'node', 'pool', + 'rule', 'self_ip', 'software', 'system_info', + 'traffic_group', 'trunk', 'virtual_address', + 'virtual_server', 'vlan') + include_test = map(lambda x: x in valid_includes, include) + if not all(include_test): + module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) + + try: + facts = {} + + if len(include) > 0: + f5 = F5(server, user, password, session) + saved_active_folder = f5.get_active_folder() + saved_recursive_query_state = f5.get_recursive_query_state() + if saved_active_folder != "/": + f5.set_active_folder("/") + if saved_recursive_query_state != "STATE_ENABLED": + f5.enable_recursive_query_state() + + if 'interface' in include: + facts['interface'] = generate_interface_dict(f5, regex) + if 'self_ip' in include: + facts['self_ip'] = generate_self_ip_dict(f5, regex) + if 'trunk' in include: + facts['trunk'] = generate_trunk_dict(f5, regex) + if 'vlan' in include: + facts['vlan'] = generate_vlan_dict(f5, regex) + if 'virtual_server' in include: + facts['virtual_server'] = generate_vs_dict(f5, regex) + if 'pool' in include: + facts['pool'] = generate_pool_dict(f5, regex) + if 'device' in include: + facts['device'] = generate_device_dict(f5, regex) + if 'device_group' in include: + facts['device_group'] = generate_device_group_dict(f5, regex) + if 'traffic_group' in include: + facts['traffic_group'] = generate_traffic_group_dict(f5, regex) + if 'rule' in include: + facts['rule'] = generate_rule_dict(f5, regex) + if 'node' in include: + facts['node'] = generate_node_dict(f5, regex) + if 'virtual_address' in include: + facts['virtual_address'] = generate_virtual_address_dict(f5, regex) + if 'address_class' in include: + facts['address_class'] = generate_address_class_dict(f5, regex) + if 'software' in include: + facts['software'] = generate_software_list(f5) + if 'certificate' in include: + facts['certificate'] = generate_certificate_dict(f5, regex) + if 'key' in include: + facts['key'] = generate_key_dict(f5, regex) + if 'client_ssl_profile' in include: + facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex) + if 'system_info' in include: + facts['system_info'] = generate_system_info_dict(f5) + + # restore saved state + if saved_active_folder and saved_active_folder != "/": + f5.set_active_folder(saved_active_folder) + if saved_recursive_query_state and \ + saved_recursive_query_state != "STATE_ENABLED": + f5.set_recursive_query_state(saved_recursive_query_state) + + result = {'ansible_facts': facts} + + except Exception, e: + module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc())) + + module.exit_json(**result) + +# include magic from lib/ansible/module_common.py +#<> +main() + From 8420211aee2aee5ee3d9a5f9b58f0f1c456b3719 Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Sun, 19 Jan 2014 14:29:31 -0500 Subject: [PATCH 401/772] new module to send text message notifications --- library/notification/text | 147 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 library/notification/text diff --git a/library/notification/text b/library/notification/text new file mode 100644 index 00000000000..3c9a37fb13c --- /dev/null +++ b/library/notification/text @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Matt Makai +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +version_added: "1.2" +module: text +short_description: Sends a text message to a mobile phone. +description: + - Sends a text message to a phone number through an SMS service. Currently + only Twilio is supported. +notes: + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need a Twilio account with + a purchased phone number to send the text message. +options: + sms_service: + description: + the SMS service to use; currently only Twilio is supported + required: false + default: twilio + account_sid: + description: + user's account id for Twilio found on the account page + required: true + auth_token: + description: user's authentication token for Twilio found on the account page + required: true + msg: + description: + the body of the text message + required: true + to_number: + description: + what phone number to send the text message to, format +15551112222 + required: true + from_number: + description: + what phone number to send the text message from, format +15551112222 + required: true + +requirements: [ urllib, urllib2 ] +author: Matt Makai +''' + +EXAMPLES = ''' +# send a text message from the local server about the build status to (555) 303 5681 +# note: you have to have purchased the 'from_number' on your Twilio account +- local_action: text msg="All servers with webserver role are now configured." + account_sid={{ twilio_account_sid }} + auth_token={{ twilio_auth_token }} + from_number=+15552014545 to_number=+15553035681 + +# send a text message from a server to (555) 111 3232 +# note: you have to have purchased the 'from_number' on your Twilio account +- text: msg="This server's configuration is now complete." + account_sid={{ twilio_account_sid }} + auth_token={{ twilio_auth_token }} + from_number=+15553258899 to_number=+15551113232 + +''' + +# ======================================= +# text module support methods +# +try: + import urllib, urllib2 +except ImportError: + module.fail_json(msg="urllib and urllib2 are required") + +import base64 + + +def post_text(module, sms_service, account_sid, auth_token, msg, + from_number, to_number): + if sms_service == 'twilio': + URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ + % (account_sid,) + AGENT = "Ansible/1.6" + + data = {'From':from_number, 'To':to_number, 'Body':msg} + encoded_data = urllib.urlencode(data) + request = urllib2.Request(URI) + base64string = base64.encodestring('%s:%s' % \ + (account_sid, auth_token)).replace('\n', '') + request.add_header('User-Agent', AGENT) + request.add_header('Content-type', 'application/x-www-form-urlencoded') + request.add_header('Accept', 'application/ansible') + request.add_header('Authorization', 'Basic %s' % base64string) + return urllib2.urlopen(request, encoded_data) + else: + raise Exception('unknown messaging service') + + +# ======================================= +# Main +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + sms_service=dict(default='twilio', choices=['twilio', ]), + account_sid=dict(required=True), + auth_token=dict(required=True), + msg=dict(required=True), + from_number=dict(required=True), + to_number=dict(required=True), + ), + supports_check_mode=True + ) + + sms_service = module.params['sms_service'] + account_sid = module.params['account_sid'] + auth_token = module.params['auth_token'] + msg = module.params['msg'] + from_number = module.params['from_number'] + to_number = module.params['to_number'] + + try: + response = post_text(module, sms_service, account_sid, auth_token, + msg, from_number, to_number) + except Exception, e: + module.fail_json(msg="unable to send text message to %s" % to_number) + + module.exit_json(msg=msg, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() From b41541c62a1d078e36e6c597708dd1b25cda0671 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:48:03 -0400 Subject: [PATCH 402/772] Merge conflict. --- lib/ansible/inventory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 171a4f2a04c..b6f644190f1 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -368,9 +368,9 @@ class Inventory(object): vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins ] for updated in vars_results: if updated is not None: - vars.update(updated) + vars = utils.combine_vars(vars, updated) - vars.update(host.get_variables()) + vars = utils.combine_vars(vars, host.get_variables()) if self.parser is not None: vars = utils.combine_vars(vars, self.parser.get_host_variables(host)) return vars From 2215111ec5c954248760fb0c454545101d8a936e Mon Sep 17 00:00:00 2001 From: Matthew Makai Date: Thu, 23 Jan 2014 15:55:23 -0500 Subject: [PATCH 403/772] adding twilio module for sending text notifications in build process --- library/notification/{text => twilio} | 52 +++++++++++---------------- 1 file changed, 20 insertions(+), 32 deletions(-) rename library/notification/{text => twilio} (68%) diff --git a/library/notification/text b/library/notification/twilio similarity index 68% rename from library/notification/text rename to library/notification/twilio index 3c9a37fb13c..70426ba4e36 100644 --- a/library/notification/text +++ b/library/notification/twilio @@ -20,22 +20,16 @@ DOCUMENTATION = ''' --- -version_added: "1.2" +version_added: "1.5" module: text -short_description: Sends a text message to a mobile phone. +short_description: Sends a text message to a mobile phone through Twilio. description: - - Sends a text message to a phone number through an SMS service. Currently - only Twilio is supported. + - Sends a text message to a phone number through an the Twilio SMS service. notes: - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with - a purchased phone number to send the text message. + a purchased or verified phone number to send the text message. options: - sms_service: - description: - the SMS service to use; currently only Twilio is supported - required: false - default: twilio account_sid: description: user's account id for Twilio found on the account page @@ -88,25 +82,21 @@ except ImportError: import base64 -def post_text(module, sms_service, account_sid, auth_token, msg, - from_number, to_number): - if sms_service == 'twilio': - URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ - % (account_sid,) - AGENT = "Ansible/1.6" +def post_text(module, account_sid, auth_token, msg, from_number, to_number): + URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ + % (account_sid,) + AGENT = "Ansible/1.5" - data = {'From':from_number, 'To':to_number, 'Body':msg} - encoded_data = urllib.urlencode(data) - request = urllib2.Request(URI) - base64string = base64.encodestring('%s:%s' % \ - (account_sid, auth_token)).replace('\n', '') - request.add_header('User-Agent', AGENT) - request.add_header('Content-type', 'application/x-www-form-urlencoded') - request.add_header('Accept', 'application/ansible') - request.add_header('Authorization', 'Basic %s' % base64string) - return urllib2.urlopen(request, encoded_data) - else: - raise Exception('unknown messaging service') + data = {'From':from_number, 'To':to_number, 'Body':msg} + encoded_data = urllib.urlencode(data) + request = urllib2.Request(URI) + base64string = base64.encodestring('%s:%s' % \ + (account_sid, auth_token)).replace('\n', '') + request.add_header('User-Agent', AGENT) + request.add_header('Content-type', 'application/x-www-form-urlencoded') + request.add_header('Accept', 'application/ansible') + request.add_header('Authorization', 'Basic %s' % base64string) + return urllib2.urlopen(request, encoded_data) # ======================================= @@ -117,7 +107,6 @@ def main(): module = AnsibleModule( argument_spec=dict( - sms_service=dict(default='twilio', choices=['twilio', ]), account_sid=dict(required=True), auth_token=dict(required=True), msg=dict(required=True), @@ -127,7 +116,6 @@ def main(): supports_check_mode=True ) - sms_service = module.params['sms_service'] account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] @@ -135,8 +123,8 @@ def main(): to_number = module.params['to_number'] try: - response = post_text(module, sms_service, account_sid, auth_token, - msg, from_number, to_number) + response = post_text(module, account_sid, auth_token, msg, + from_number, to_number) except Exception, e: module.fail_json(msg="unable to send text message to %s" % to_number) From 51f90c1712c8575c2cb4d5e70415b0728ca583ad Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:45:38 -0400 Subject: [PATCH 404/772] Bump twilio module version_added, changelog. --- CHANGELOG.md | 1 + library/notification/twilio | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d9200d212b..96a2350e8e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ New Modules: * packaging: homebrew_tap (OS X) * packaging: homebrew_cask (OS X) * notification: nexmo (SMS) +* notification: twilio (SMS) * system: debconf * system: ufw * system: locale_gen diff --git a/library/notification/twilio b/library/notification/twilio index 70426ba4e36..8b9be137747 100644 --- a/library/notification/twilio +++ b/library/notification/twilio @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -version_added: "1.5" +version_added: "1.6" module: text short_description: Sends a text message to a mobile phone through Twilio. description: From e4fcc434d14707b175010947e3be9605f4e80095 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:47:29 -0400 Subject: [PATCH 405/772] Version bump, bigip_facts + changelog --- CHANGELOG.md | 1 + library/net_infrastructure/bigip_facts | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 96a2350e8e5..175904cb9c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ New Modules: * system: debconf * system: ufw * system: locale_gen +* net_infrastructure: bigip_facts * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey * cloud: rax_identity diff --git a/library/net_infrastructure/bigip_facts b/library/net_infrastructure/bigip_facts index 9be820dc764..3a7a4533f69 100644 --- a/library/net_infrastructure/bigip_facts +++ b/library/net_infrastructure/bigip_facts @@ -24,7 +24,7 @@ module: bigip_facts short_description: "Collect facts from F5 BIG-IP devices" description: - "Collect facts from F5 BIG-IP devices via iControl SOAP API" -version_added: "1.5" +version_added: "1.6" author: Matt Hite notes: - "Requires BIG-IP software version >= 11.4" From 1cae0a6801e0f9cfdffca020ee79298060e39718 Mon Sep 17 00:00:00 2001 From: Gabe Mulley Date: Sat, 4 Jan 2014 11:33:43 -0500 Subject: [PATCH 406/772] add a module for update-alternatives --- library/system/alternatives | 137 ++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100755 library/system/alternatives diff --git a/library/system/alternatives b/library/system/alternatives new file mode 100755 index 00000000000..3cbf4c83157 --- /dev/null +++ b/library/system/alternatives @@ -0,0 +1,137 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage symbolic link alternatives. +(c) 2014, Gabe Mulley + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: alternatives +short_description: Manages symbolic links +description: + - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. + - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). +version_added: "1.5" +options: + name: + description: + - The generic name of the link. + required: true + path: + description: + - The path to the real executable that the link should point to. + required: true + link: + description: + - The path to the symbolic link that should point to the real executable. + required: false +requirements: [ update-alternatives ] +''' + +EXAMPLES = ''' +- name: correct java version selected + alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + +- name: alternatives link created + alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible +''' + +UPDATE_ALTERNATIVES = '/usr/sbin/update-alternatives' +DEFAULT_LINK_PRIORITY = 50 + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + path = dict(required=True), + link = dict(required=False), + ) + ) + + params = module.params + name = params['name'] + path = params['path'] + link = params['link'] + + current_path = None + all_alternatives = [] + + (rc, query_output, query_error) = module.run_command( + [UPDATE_ALTERNATIVES, '--query', name] + ) + + # Gather the current setting and all alternatives from the query output. + # Query output should look something like this: + + # Name: java + # Link: /usr/bin/java + # Slaves: + # java.1.gz /usr/share/man/man1/java.1.gz + # Status: manual + # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java + + # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java + # Priority: 1061 + # Slaves: + # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz + + # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + # Priority: 1071 + # Slaves: + # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz + + if rc == 0: + for line in query_output.splitlines(): + split_line = line.split(':') + if len(split_line) == 2: + key = split_line[0] + value = split_line[1].strip() + if key == 'Value': + current_path = value + elif key == 'Alternative': + all_alternatives.append(value) + + if current_path != path: + try: + # install the requested path if necessary + if path not in all_alternatives: + module.run_command( + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], + check_rc=True + ) + + # select the requested path + module.run_command( + [UPDATE_ALTERNATIVES, '--set', name, path], + check_rc=True + ) + + module.exit_json(changed=True) + except subprocess.CalledProcessError as cpe: + module.fail_json(msg=str(dir(cpe))) + else: + module.exit_json(changed=False) + + +# import module snippets +from ansible.module_utils.basic import * +main() From b471d84e522621c65a85b3f100563d33ae436fa7 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 16:54:45 -0400 Subject: [PATCH 407/772] Version bump for alternatives module, changelog. --- CHANGELOG.md | 1 + library/system/alternatives | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 175904cb9c9..bfde85c48e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ New Modules: * system: debconf * system: ufw * system: locale_gen +* system: alternatives * net_infrastructure: bigip_facts * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey diff --git a/library/system/alternatives b/library/system/alternatives index 3cbf4c83157..503f9745f12 100755 --- a/library/system/alternatives +++ b/library/system/alternatives @@ -24,11 +24,11 @@ along with Ansible. If not, see . DOCUMENTATION = ''' --- module: alternatives -short_description: Manages symbolic links +short_description: Manages alternative programs for common commands description: - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). -version_added: "1.5" +version_added: "1.6" options: name: description: From fe696e47200ce068ebcc64c938e4fa4f0d1cb885 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 17:08:26 -0400 Subject: [PATCH 408/772] Merge commit. --- lib/ansible/color.py | 3 +-- lib/ansible/constants.py | 5 +---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/ansible/color.py b/lib/ansible/color.py index e5f6f4d2bae..069684f16c0 100644 --- a/lib/ansible/color.py +++ b/lib/ansible/color.py @@ -15,7 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os import sys import constants @@ -37,7 +36,7 @@ else: # curses returns an error (e.g. could not find terminal) ANSIBLE_COLOR=False -if os.getenv("ANSIBLE_FORCE_COLOR") is not None: +if constants.ANSIBLE_FORCE_COLOR: ANSIBLE_COLOR=True # --- begin "pretty" diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 9d3f37c180d..f9cd208c4ad 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -143,10 +143,7 @@ DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', ' DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) -# URL Arguments for generic module urllib2 use -DEFAULT_HTTP_USER_AGENT = get_config(p, DEFAULTS, 'http_user_agent', 'ANSIBLE_HTTP_USER_AGENT', 'ansible-agent') -DEFAULT_CA_FILE_PATH = shell_expand_path(get_config(p, DEFAULTS, 'ca_file_path', 'ANSIBLE_CA_FILE_PATH', '')) - +ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True) ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) From 47aff528b906657b3a4b2e631c461230552c0e02 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 16 Mar 2014 17:10:37 -0400 Subject: [PATCH 409/772] Amend wait_for docs to reference version of feature addition. --- library/utilities/wait_for | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/utilities/wait_for b/library/utilities/wait_for index 88092d4e6b3..3a381f06944 100644 --- a/library/utilities/wait_for +++ b/library/utilities/wait_for @@ -33,9 +33,11 @@ description: are not immediately available after their init scripts return - which is true of certain Java application servers. It is also useful when starting guests with the M(virt) module and - needing to pause until they are ready. This module can + needing to pause until they are ready. + - This module can also be used to wait for a regex match a string to be present in a file. + - In 1.6 and later, this module can also be used to wait for a file to be available or absent on the - filesystem or with a regex match a string to be present in a file. + filesystem. version_added: "0.7" options: host: From 0981488df74b27beaa222e7cd1b1912a920db568 Mon Sep 17 00:00:00 2001 From: Jonathan Dray Date: Mon, 17 Mar 2014 02:24:12 +0100 Subject: [PATCH 410/772] fix: bug in apt when python-apt is not present apt-get was not called due to unsafe call --- library/packaging/apt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt b/library/packaging/apt index 5e041c769bd..7711da1b1d9 100644 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -377,7 +377,7 @@ def main(): if not HAS_PYTHON_APT: try: - module.run_command('apt-get update && apt-get install python-apt -y -q') + module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True) global apt, apt_pkg import apt import apt_pkg From dae519b723e728d58c4371075190050f054c6f3e Mon Sep 17 00:00:00 2001 From: Zeekin Date: Thu, 6 Mar 2014 12:14:53 +1000 Subject: [PATCH 411/772] Improvements to ec2 autoscaling modules * Added desired_capacity and vpc_zone_identifier to ec2_asg * Use ec2_argument_spec() method and then remove unnecessary declarations from argument_spec * Remove AWS_REGIONS declaration * Rename block_device_mappings to volumes to be consistent with ec2 * Remove all pep8 warnings except line length and continuation indent * Use updated module_utils/ec2.py to add profile and security_token support * Remove mandatory arguments for delete to make launchconfig deletion work * Handle existing launch configurations better * Improve output information * Improve documentation --- library/cloud/ec2_asg | 143 +++++++++++++++++++++++++++--------------- library/cloud/ec2_lc | 118 ++++++++++++++++++++++++---------- 2 files changed, 177 insertions(+), 84 deletions(-) diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg index 15df0ed41b9..0e43e6b2524 100644 --- a/library/cloud/ec2_asg +++ b/library/cloud/ec2_asg @@ -37,23 +37,27 @@ options: load_balancers: description: - List of ELB names to use for the group - required: true + required: false availability_zones: description: - List of availability zone names in which to create the group. - required: true + required: false launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - required: true + required: false min_size: description: - Minimum number of instances in group - required: true + required: false max_size: description: - Maximum number of instances in group - required: true + required: false + desired_capacity: + description: + - Desired number of instances in group + required: false aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. @@ -71,16 +75,23 @@ options: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false aliases: ['aws_region', 'ec2_region'] + vpc_zone_identifier: + description: + - List of VPC subnets to use + required: false + default: None """ EXAMPLES = ''' -- ec2_asg: > +- ec2_asg: name: special load_balancers: 'lb1,lb2' availability_zones: 'eu-west-1a,eu-west-1b' launch_config_name: 'lc-1' min_size: 1 max_size: 10 + desired_capacity: 5 + vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d' ''' import sys @@ -97,40 +108,72 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] + +def enforce_required_arguments(module): + ''' As many arguments are not required for autoscale group deletion + they cannot be mandatory arguments for the module, so we enforce + them here ''' + missing_args = [] + for arg in ('min_size', 'max_size', 'launch_config_name', 'availability_zones'): + if module.params[arg] is None: + missing_args.append(arg) + if missing_args: + module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args)) def create_autoscaling_group(connection, module): + enforce_required_arguments(module) + group_name = module.params.get('name') load_balancers = module.params['load_balancers'] availability_zones = module.params['availability_zones'] launch_config_name = module.params.get('launch_config_name') - min_size = module.params.get('min_size') - max_size = module.params.get('max_size') - - launch_configs = connection.get_all_launch_configurations(name=[launch_config_name]) - - ag = AutoScalingGroup( - group_name=group_name, - load_balancers=load_balancers, - availability_zones=availability_zones, - launch_config=launch_configs[0], - min_size=min_size, - max_size=max_size, - connection=connection) - - try: - connection.create_auto_scaling_group(ag) - module.exit_json(changed=True) - except BotoServerError, e: - module.exit_json(changed=False, msg=str(e)) + min_size = module.params['min_size'] + max_size = module.params['max_size'] + desired_capacity = module.params.get('desired_capacity') + vpc_zone_identifier = module.params.get('vpc_zone_identifier') + + launch_configs = connection.get_all_launch_configurations(names=[launch_config_name]) + + as_groups = connection.get_all_groups(names=[group_name]) + + if not as_groups: + ag = AutoScalingGroup( + group_name=group_name, + load_balancers=load_balancers, + availability_zones=availability_zones, + launch_config=launch_configs[0], + min_size=min_size, + max_size=max_size, + desired_capacity=desired_capacity, + vpc_zone_identifier=vpc_zone_identifier, + connection=connection) + + try: + connection.create_auto_scaling_group(ag) + module.exit_json(changed=True) + except BotoServerError, e: + module.fail_json(msg=str(e)) + else: + as_group = as_groups[0] + changed = False + for attr in ('launch_config_name', 'max_size', 'min_size', 'desired_capacity', + 'vpc_zone_identifier', 'availability_zones'): + if getattr(as_group, attr) != module.params.get(attr): + changed = True + setattr(as_group, attr, module.params.get(attr)) + # handle loadbalancers separately because None != [] + load_balancers = module.params.get('load_balancers') or [] + if as_group.load_balancers != load_balancers: + changed = True + as_group.load_balancers = module.params.get('load_balancers') + + try: + if changed: + as_group.update() + module.exit_json(changed=changed) + except BotoServerError, e: + module.fail_json(msg=str(e)) def delete_autoscaling_group(connection, module): @@ -156,35 +199,33 @@ def delete_autoscaling_group(connection, module): def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, type='str'), - load_balancers = dict(required=False, type='list'), - availability_zones = dict(required=True, type='list'), - launch_config_name = dict(required=True, type='str'), - min_size = dict(required=True, type='int'), - max_size = dict(required=True, type='int'), - state = dict(default='present', choices=['present', 'absent']), - region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - ec2_url = dict(), - ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), - ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + load_balancers=dict(type='list'), + availability_zones=dict(type='list'), + launch_config_name=dict(type='str'), + min_size=dict(type='int'), + max_size=dict(type='int'), + desired_capacity=dict(type='int'), + vpc_zone_identifier=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), ) ) - - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + module = AnsibleModule(argument_spec=argument_spec) state = module.params.get('state') + region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: - connection = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) + connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) + module.fail_json(msg=str(e)) if state == 'present': create_autoscaling_group(connection, module) elif state == 'absent': delete_autoscaling_group(connection, module) - main() diff --git a/library/cloud/ec2_lc b/library/cloud/ec2_lc index 0f2dc26a234..ff24924aaa6 100644 --- a/library/cloud/ec2_lc +++ b/library/cloud/ec2_lc @@ -37,15 +37,15 @@ options: image_id: description: - The AMI unique identifier to be used for the group - required: true + required: false key_name: description: - The SSH key name to be used for access to managed instances - required: true + required: false security_groups: description: - A list of security groups into which instances should be found - required: true + required: false aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. @@ -63,6 +63,18 @@ options: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false aliases: ['aws_region', 'ec2_region'] + volumes: + description: + - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. + required: false + default: null + aliases: [] + user_data: + description: + - opaque blob of data which is made available to the ec2 instance + required: false + default: null + aliases: [] """ EXAMPLES = ''' @@ -81,6 +93,7 @@ from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * try: + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping import boto.ec2.autoscale from boto.ec2.autoscale import LaunchConfiguration from boto.exception import BotoServerError @@ -88,14 +101,26 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] + +def create_block_device(module, volume): + # Not aware of a way to determine this programatically + # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ + MAX_IOPS_TO_SIZE_RATIO = 30 + if 'snapshot' not in volume and 'ephemeral' not in volume: + if 'volume_size' not in volume: + module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume') + if 'snapshot' in volume: + if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: + module.fail_json(msg='io1 volumes must have an iops value set') + if 'ephemeral' in volume: + if 'snapshot' in volume: + module.fail_json(msg='Cannot set both ephemeral and snapshot') + return BlockDeviceType(snapshot_id=volume.get('snapshot'), + ephemeral_name=volume.get('ephemeral'), + size=volume.get('volume_size'), + volume_type=volume.get('device_type'), + delete_on_termination=volume.get('delete_on_termination', False), + iops=volume.get('iops')) def create_launch_config(connection, module): @@ -103,23 +128,48 @@ def create_launch_config(connection, module): image_id = module.params.get('image_id') key_name = module.params.get('key_name') security_groups = module.params['security_groups'] + user_data = module.params.get('user_data') + volumes = module.params['volumes'] + instance_type = module.params.get('instance_type') + bdm = BlockDeviceMapping() + + if volumes: + for volume in volumes: + if 'device_name' not in volume: + module.fail_json(msg='Device name must be set for volume') + # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 + # to be a signal not to create this volume + if 'volume_size' not in volume or int(volume['volume_size']) > 0: + bdm[volume['device_name']] = create_block_device(module, volume) lc = LaunchConfiguration( name=name, image_id=image_id, key_name=key_name, - security_groups=security_groups) - - try: - connection.create_launch_configuration(lc) - module.exit_json(changed=True) - except BotoServerError, e: - module.exit_json(changed=False, msg=str(e)) + security_groups=security_groups, + user_data=user_data, + block_device_mappings=[bdm], + instance_type=instance_type) + + launch_configs = connection.get_all_launch_configurations(names=[name]) + changed = False + if not launch_configs: + try: + connection.create_launch_configuration(lc) + launch_configs = connection.get_all_launch_configurations(names=[name]) + changed = True + except BotoServerError, e: + module.fail_json(msg=str(e)) + result = launch_configs[0] + + module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), + image_id=result.image_id, arn=result.launch_configuration_arn, + security_groups=result.security_groups, instance_type=instance_type) def delete_launch_config(connection, module): name = module.params.get('name') - launch_configs = connection.get_all_launch_configurations(name=[name]) + launch_configs = connection.get_all_launch_configurations(names=[name]) if launch_configs: launch_configs[0].delete() module.exit_json(changed=True) @@ -128,26 +178,28 @@ def delete_launch_config(connection, module): def main(): - module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, type='str'), - image_id = dict(required=True, type='str'), - key_name = dict(required=True, type='str'), - security_groups = dict(required=True, type='list'), - state = dict(default='present', choices=['present', 'absent']), - region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), - ec2_url = dict(), - ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), - ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + image_id=dict(type='str'), + key_name=dict(type='str'), + security_groups=dict(type='list'), + user_data=dict(type='str'), + volumes=dict(type='list'), + instance_type=dict(type='str'), + state=dict(default='present', choices=['present', 'absent']), ) ) - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + module = AnsibleModule(argument_spec=argument_spec) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: - connection = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) + connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) + module.fail_json(msg=str(e)) state = module.params.get('state') From 77229553a3e22fb2f404eb064397875b5268ad0c Mon Sep 17 00:00:00 2001 From: Greg Dallavalle Date: Sun, 16 Mar 2014 20:41:03 -0500 Subject: [PATCH 412/772] fetch_url: Avoid credential stripping for FTP-scheme URLs --- lib/ansible/module_utils/urls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 053dfc041c8..41b1135855f 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -204,7 +204,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, ssl_handler = SSLValidationHandler(module, hostname, port) handlers.append(ssl_handler) - if '@' in parsed[1]: + if parsed[0] != 'ftp' and '@' in parsed[1]: credentials, netloc = parsed[1].split('@', 1) if ':' in credentials: username, password = credentials.split(':', 1) From 280498a7e595bc948bc528b9037f81c539adbcd8 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Mon, 17 Mar 2014 10:51:04 +0100 Subject: [PATCH 413/772] Pass through vault_password when parsing host/group vars as directories. Fixes a bug where vault_password parameter was not passed through in _load_vars_from_folder() modified: lib/ansible/inventory/vars_plugins/group_vars.py --- lib/ansible/inventory/vars_plugins/group_vars.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/inventory/vars_plugins/group_vars.py b/lib/ansible/inventory/vars_plugins/group_vars.py index 6be1d1f6edd..93edceeecb5 100644 --- a/lib/ansible/inventory/vars_plugins/group_vars.py +++ b/lib/ansible/inventory/vars_plugins/group_vars.py @@ -86,7 +86,7 @@ def _load_vars_from_path(path, results, vault_password=None): if stat.S_ISDIR(pathstat.st_mode): # support organizing variables across multiple files in a directory - return True, _load_vars_from_folder(path, results) + return True, _load_vars_from_folder(path, results, vault_password=vault_password) # regular file elif stat.S_ISREG(pathstat.st_mode): @@ -105,7 +105,7 @@ def _load_vars_from_path(path, results, vault_password=None): raise errors.AnsibleError("Expected a variable file or directory " "but found a non-file object at path %s" % (path, )) -def _load_vars_from_folder(folder_path, results): +def _load_vars_from_folder(folder_path, results, vault_password=None): """ Load all variables within a folder recursively. """ @@ -126,7 +126,7 @@ def _load_vars_from_folder(folder_path, results): # do not parse hidden files or dirs, e.g. .svn/ paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')] for path in paths: - _found, results = _load_vars_from_path(path, results) + _found, results = _load_vars_from_path(path, results, vault_password=vault_password) return results From caa0b02962bb3b8680a431595f766a3a3d1f9a4e Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 17 Mar 2014 09:10:33 -0400 Subject: [PATCH 414/772] Update formatting in module example. --- library/packaging/rhn_register | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/rhn_register b/library/packaging/rhn_register index 28d91a6a027..85ccacdac2d 100644 --- a/library/packaging/rhn_register +++ b/library/packaging/rhn_register @@ -58,7 +58,7 @@ EXAMPLES = ''' # Register as user (joe_user) with password (somepass) against a satellite # server specified by (server_url). -- rhn_register: +- rhn_register: > state=present username=joe_user password=somepass From b14c658532a74f1fc5216cd921973b1100e86253 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 17 Mar 2014 10:38:22 -0400 Subject: [PATCH 415/772] Fix concatenation for with_file --- lib/ansible/utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 476622e6766..2543c5776ac 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1023,7 +1023,7 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # not sure why the "/" is in above code :) try: new_terms = template.template(basedir, "{{ %s }}" % terms, inject) - if isinstance(new_terms, basestring) and "{{" in new_terms.find: + if isinstance(new_terms, basestring) and new_terms.find("{{") != -1: pass else: terms = new_terms From 4ef2603a6226e68db4ff50709abd5bb7cf92e064 Mon Sep 17 00:00:00 2001 From: James Laska Date: Mon, 17 Mar 2014 10:41:02 -0400 Subject: [PATCH 416/772] Add yum+apt integration tests for auto dependency installalation The `apt` and `yum` modules will automatically install python dependencies. This change updates the existing integration tests to test whether auto-install of dependencies is functioning properly. --- test/integration/roles/test_apt/tasks/apt.yml | 20 +++++++++++++++---- test/integration/roles/test_yum/tasks/yum.yml | 17 ++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/test/integration/roles/test_apt/tasks/apt.yml b/test/integration/roles/test_apt/tasks/apt.yml index 151f5313595..be0facdf098 100644 --- a/test/integration/roles/test_apt/tasks/apt.yml +++ b/test/integration/roles/test_apt/tasks/apt.yml @@ -1,4 +1,19 @@ -# UNINSTALL +# UNINSTALL 'python-apt' +# The `apt` module has the smarts to auto-install `python-apt`. To test, we +# will first uninstall `python-apt`. +- name: check python-apt with dpkg + shell: dpkg -s python-apt + register: dpkg_result + ignore_errors: true + +- name: uninstall python-apt with apt + apt: pkg=python-apt state=absent purge=yes + register: apt_result + when: dpkg_result|success + +# UNINSTALL 'hello' +# With 'python-apt' uninstalled, the first call to 'apt' should install +# python-apt. - name: uninstall hello with apt apt: pkg=hello state=absent purge=yes register: apt_result @@ -8,9 +23,6 @@ failed_when: False register: dpkg_result -- debug: var=apt_result -- debug: var=dpkg_result - - name: verify uninstallation of hello assert: that: diff --git a/test/integration/roles/test_yum/tasks/yum.yml b/test/integration/roles/test_yum/tasks/yum.yml index 7c0b089ace5..316b8b3a77f 100644 --- a/test/integration/roles/test_yum/tasks/yum.yml +++ b/test/integration/roles/test_yum/tasks/yum.yml @@ -1,4 +1,21 @@ +# UNINSTALL 'yum-utils' +# The `yum` module has the smarts to auto-install `yum-utils`. To test, we +# will first uninstall `yum-utils`. +- name: check yum-utils with rpm + shell: rpm -q yum-utils + register: rpm_result + ignore_errors: true + +# Don't uninstall yum-utils with the `yum` module, it would be bad. The `yum` +# module does some `repoquery` magic after removing a package. It fails when you +# remove `yum-utils. +- name: uninstall yum-utils with shell + shell: yum -y remove yum-utils + when: rpm_result|success + # UNINSTALL +# With 'yum-utils' uninstalled, the first call to 'yum' should install +# yum-utils. - name: uninstall sos yum: name=sos state=removed register: yum_result From 12c812f030a6e5882544567990d81d0c1f882230 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 17 Mar 2014 10:00:51 -0500 Subject: [PATCH 417/772] Adding a man page for ansible-vault Fixes #6471 --- Makefile | 2 +- docs/man/man1/ansible-vault.1 | 103 ++++++++++++++++++ docs/man/man1/ansible-vault.1.asciidoc.in | 126 ++++++++++++++++++++++ 3 files changed, 230 insertions(+), 1 deletion(-) create mode 100644 docs/man/man1/ansible-vault.1 create mode 100644 docs/man/man1/ansible-vault.1.asciidoc.in diff --git a/Makefile b/Makefile index 41d80a13c3b..dc2a910630a 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ OS = $(shell uname -s) # Manpages are currently built with asciidoc -- would like to move to markdown # This doesn't evaluate until it's called. The -D argument is the # directory of the target file ($@), kinda like `dirname`. -MANPAGES := docs/man/man1/ansible.1 docs/man/man1/ansible-playbook.1 docs/man/man1/ansible-pull.1 docs/man/man1/ansible-doc.1 docs/man/man1/ansible-galaxy.1 +MANPAGES := docs/man/man1/ansible.1 docs/man/man1/ansible-playbook.1 docs/man/man1/ansible-pull.1 docs/man/man1/ansible-doc.1 docs/man/man1/ansible-galaxy.1 docs/man/man1/ansible-vault.1 ifneq ($(shell which a2x 2>/dev/null),) ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $< ASCII2HTMLMAN = a2x -D docs/html/man/ -d manpage -f xhtml diff --git a/docs/man/man1/ansible-vault.1 b/docs/man/man1/ansible-vault.1 new file mode 100644 index 00000000000..cced9f1bcfd --- /dev/null +++ b/docs/man/man1/ansible-vault.1 @@ -0,0 +1,103 @@ +'\" t +.\" Title: ansible-vault +.\" Author: [see the "AUTHOR" section] +.\" Generator: DocBook XSL Stylesheets v1.78.1 +.\" Date: 03/17/2014 +.\" Manual: System administration commands +.\" Source: Ansible 1.6 +.\" Language: English +.\" +.TH "ANSIBLE\-VAULT" "1" "03/17/2014" "Ansible 1\&.6" "System administration commands" +.\" ----------------------------------------------------------------- +.\" * Define some portability stuff +.\" ----------------------------------------------------------------- +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.\" http://bugs.debian.org/507673 +.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.\" ----------------------------------------------------------------- +.\" * set default formatting +.\" ----------------------------------------------------------------- +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.\" ----------------------------------------------------------------- +.\" * MAIN CONTENT STARTS HERE * +.\" ----------------------------------------------------------------- +.SH "NAME" +ansible-vault \- manage encrypted YAML data\&. +.SH "SYNOPSIS" +.sp +ansible\-vault [create|decrypt|edit|encrypt|rekey] [\-\-help] [options] file_name +.SH "DESCRIPTION" +.sp +\fBansible\-vault\fR can encrypt any structured data file used by Ansible\&. This can include \fBgroup_vars/\fR or \fBhost_vars/\fR inventory variables, variables loaded by \fBinclude_vars\fR or \fBvars_files\fR, or variable files passed on the ansible\-playbook command line with \fB\-e @file\&.yml\fR or \fB\-e @file\&.json\fR\&. Role variables and defaults are also included! +.sp +Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault\&. If you\(cqd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted\&. +.SH "COMMON OPTIONS" +.sp +The following options are available to all sub\-commands: +.PP +\fB\-\-vault\-password\-file=\fR\fIFILE\fR +.RS 4 +A file containing the vault password to be used during the encryption/decryption steps\&. Be sure to keep this file secured if it is used\&. +.RE +.PP +\fB\-h\fR, \fB\-\-help\fR +.RS 4 +Show a help message related to the given sub\-command\&. +.RE +.PP +\fB\-\-debug\fR +.RS 4 +Enable debugging output for troubleshooting\&. +.RE +.SH "CREATE" +.sp +\fB$ ansible\-vault create [options] FILE\fR +.sp +The \fBcreate\fR sub\-command is used to initialize a new encrypted file\&. +.sp +First you will be prompted for a password\&. The password used with vault currently must be the same for all files you wish to use together at the same time\&. +.sp +After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vim\&. Once you are done with the editor session, the file will be saved as encrypted data\&. +.sp +The default cipher is AES (which is shared\-secret based)\&. +.SH "EDIT" +.sp +\fB$ ansible\-vault edit [options] FILE\fR +.sp +The \fBedit\fR sub\-command is used to modify a file which was previously encrypted using ansible\-vault\&. +.sp +This command will decrypt the file to a temporary file and allow you to edit the file, saving it back when done and removing the temporary file\&. +.SH "REKEY" +.sp +*$ ansible\-vault rekey [options] FILE_1 [FILE_2, \&..., FILE_N] +.sp +The \fBrekey\fR command is used to change the password on a vault\-encrypted files\&. This command can update multiple files at once, and will prompt for both the old and new passwords before modifying any data\&. +.SH "ENCRYPT" +.sp +*$ ansible\-vault encrypt [options] FILE_1 [FILE_2, \&..., FILE_N] +.sp +The \fBencrypt\fR sub\-command is used to encrypt pre\-existing data files\&. As with the \fBrekey\fR command, you can specify multiple files in one command\&. +.SH "DECRYPT" +.sp +*$ ansible\-vault decrypt [options] FILE_1 [FILE_2, \&..., FILE_N] +.sp +The \fBdecrypt\fR sub\-command is used to remove all encryption from data files\&. The files will be stored as plain\-text YAML once again, so be sure that you do not run this command on data files with active passwords or other sensitive data\&. In most cases, users will want to use the \fBedit\fR sub\-command to modify the files securely\&. +.SH "AUTHOR" +.sp +Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. +.SH "COPYRIGHT" +.sp +Copyright \(co 2014, Michael DeHaan +.sp +Ansible is released under the terms of the GPLv3 License\&. +.SH "SEE ALSO" +.sp +\fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) +.sp +Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible-vault.1.asciidoc.in b/docs/man/man1/ansible-vault.1.asciidoc.in new file mode 100644 index 00000000000..daccd8772f4 --- /dev/null +++ b/docs/man/man1/ansible-vault.1.asciidoc.in @@ -0,0 +1,126 @@ +ansible-vault(1) +================ +:doctype: manpage +:man source: Ansible +:man version: %VERSION% +:man manual: System administration commands + +NAME +---- +ansible-vault - manage encrypted YAML data. + + +SYNOPSIS +-------- +ansible-vault [create|decrypt|edit|encrypt|rekey] [--help] [options] file_name + + +DESCRIPTION +----------- + +*ansible-vault* can encrypt any structured data file used by Ansible. This can include +*group_vars/* or *host_vars/* inventory variables, variables loaded by *include_vars* or +*vars_files*, or variable files passed on the ansible-playbook command line with +*-e @file.yml* or *-e @file.json*. Role variables and defaults are also included! + +Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with +vault. If you’d like to not betray what variables you are even using, you can go as far to +keep an individual task file entirely encrypted. + + +COMMON OPTIONS +-------------- + +The following options are available to all sub-commands: + +*--vault-password-file=*'FILE':: + +A file containing the vault password to be used during the encryption/decryption +steps. Be sure to keep this file secured if it is used. + +*-h*, *--help*:: + +Show a help message related to the given sub-command. + +*--debug*:: + +Enable debugging output for troubleshooting. + +CREATE +------ + +*$ ansible-vault create [options] FILE* + +The *create* sub-command is used to initialize a new encrypted file. + +First you will be prompted for a password. The password used with vault currently +must be the same for all files you wish to use together at the same time. + +After providing a password, the tool will launch whatever editor you have defined +with $EDITOR, and defaults to vim. Once you are done with the editor session, the +file will be saved as encrypted data. + +The default cipher is AES (which is shared-secret based). + +EDIT +---- + +*$ ansible-vault edit [options] FILE* + +The *edit* sub-command is used to modify a file which was previously encrypted +using ansible-vault. + +This command will decrypt the file to a temporary file and allow you to edit the +file, saving it back when done and removing the temporary file. + +REKEY +----- + +*$ ansible-vault rekey [options] FILE_1 [FILE_2, ..., FILE_N] + +The *rekey* command is used to change the password on a vault-encrypted files. +This command can update multiple files at once, and will prompt for both the +old and new passwords before modifying any data. + +ENCRYPT +------- + +*$ ansible-vault encrypt [options] FILE_1 [FILE_2, ..., FILE_N] + +The *encrypt* sub-command is used to encrypt pre-existing data files. As with the +*rekey* command, you can specify multiple files in one command. + +DECRYPT +------- + +*$ ansible-vault decrypt [options] FILE_1 [FILE_2, ..., FILE_N] + +The *decrypt* sub-command is used to remove all encryption from data files. The files +will be stored as plain-text YAML once again, so be sure that you do not run this +command on data files with active passwords or other sensitive data. In most cases, +users will want to use the *edit* sub-command to modify the files securely. + + +AUTHOR +------ + +Ansible was originally written by Michael DeHaan. See the AUTHORS file +for a complete list of contributors. + + +COPYRIGHT +--------- + +Copyright © 2014, Michael DeHaan + +Ansible is released under the terms of the GPLv3 License. + + +SEE ALSO +-------- + +*ansible*(1), *ansible-pull*(1), *ansible-doc*(1) + +Extensive documentation is available in the documentation site: +. IRC and mailing list info can be found +in file CONTRIBUTING.md, available in: From a9aab23f1132d14d12abb1adf4e7148f254c630f Mon Sep 17 00:00:00 2001 From: patrickheeney Date: Mon, 17 Mar 2014 09:00:08 -0700 Subject: [PATCH 418/772] Fix for hostkey directory --- lib/ansible/module_utils/known_hosts.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 7ddaf69cebb..14e0deb8fe4 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -119,13 +119,21 @@ def add_host_key(module, fqdn, key_type="rsa"): result = False keyscan_cmd = module.get_bin_path('ssh-keyscan', True) - if not os.path.exists(os.path.expanduser("~/.ssh/")): - module.fail_json(msg="%s does not exist" % os.path.expanduser("~/.ssh/")) + if 'USER' in os.environ: + user_ssh_dir = os.path.expandvars("~${USER}/.ssh/") + user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") + else: + user_ssh_dir = "~/.ssh/" + user_host_file = "~/.ssh/known_hosts" + user_ssh_dir = os.path.expanduser(user_ssh_dir) + + if not os.path.exists(user_ssh_dir): + module.fail_json(msg="%s does not exist" % user_ssh_dir) this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) rc, out, err = module.run_command(this_cmd) - module.append_to_file("~/.ssh/known_hosts", out) + module.append_to_file(user_host_file, out) return rc, out, err From ebb6b8442ba901bb7650419d0fc01c9f2ebc8be2 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 17 Mar 2014 12:14:29 -0400 Subject: [PATCH 419/772] Catch unicode unescape failures in copy action plugin --- lib/ansible/runner/action_plugins/copy.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index d395d1df6f5..f8063862cc4 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -58,7 +58,11 @@ class ActionModule(object): # now we need to unescape it so that the newlines are evaluated properly # when writing the file to disk if content: - content = content.decode('unicode-escape') + if isinstance(content, unicode): + try: + content = content.decode('unicode-escape') + except UnicodeDecodeError: + pass if (source is None and content is None and not 'first_available_file' in inject) or dest is None: result=dict(failed=True, msg="src (or content) and dest are required") From 7edee91abac3aed03ca2b2238d14c9447936b310 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 17 Mar 2014 11:28:01 -0500 Subject: [PATCH 420/772] Fixing a bug in role tag handling where only the first tag was used Fixes #6517 --- lib/ansible/playbook/play.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 3ff855c1cb9..fc5b4997f01 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -228,24 +228,28 @@ class Play(object): if meta_data: allow_dupes = utils.boolean(meta_data.get('allow_duplicates','')) - if "tags" in passed_vars: - if not self._is_valid_tag(passed_vars["tags"]): - # one of the tags specified for this role was in the - # skip list, or we're limiting the tags and it didn't - # match one, so we just skip it completely - continue - # if any tags were specified as role/dep variables, merge # them into the passed_vars so they're passed on to any # further dependencies too, and so we only have one place # (passed_vars) to look for tags going forward def __merge_tags(var_obj): old_tags = passed_vars.get('tags', []) - new_tags = var_obj.get('tags', []) - if isinstance(new_tags, basestring): - new_tags = [new_tags, ] + if isinstance(var_obj, dict): + new_tags = var_obj.get('tags', []) + if isinstance(new_tags, basestring): + new_tags = [new_tags, ] + else: + new_tags = [] return list(set(old_tags).union(set(new_tags))) + if "tags" in passed_vars: + dep_tags = __merge_tags(dep) + if not self._is_valid_tag(dep_tags): + # one of the tags specified for this role was in the + # skip list, or we're limiting the tags and it didn't + # match one, so we just skip it completely + continue + passed_vars['tags'] = __merge_tags(role_vars) passed_vars['tags'] = __merge_tags(dep_vars) From 7af8a33def34519de5ca390c47bdbd432d7b7ee2 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 17 Mar 2014 11:31:04 -0500 Subject: [PATCH 421/772] remove dependency on pycurl from apt_repository by using module_utils/urls.py instead --- library/packaging/apt_repository | 50 +++++++------------------------- 1 file changed, 11 insertions(+), 39 deletions(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index 9e759d0d0ed..9ba2345e825 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -31,7 +31,6 @@ notes: - This module works on Debian and Ubuntu and requires C(python-apt). - This module supports Debian Squeeze (version 6) as well as its successors. - This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines. - Adding PPA repositories requires C(python-pycurl). options: repo: required: true @@ -52,7 +51,7 @@ options: choices: [ "yes", "no" ] author: Alexander Saltanov version_added: "0.7" -requirements: [ python-apt, python-pycurl ] +requirements: [ python-apt ] ''' EXAMPLES = ''' @@ -71,10 +70,6 @@ apt_repository: repo='ppa:nginx/stable' ''' import glob -try: - import json -except ImportError: - import simplejson as json import os import re import tempfile @@ -88,23 +83,10 @@ try: except ImportError: HAVE_PYTHON_APT = False -try: - import pycurl - HAVE_PYCURL = True -except ImportError: - HAVE_PYCURL = False VALID_SOURCE_TYPES = ('deb', 'deb-src') -class CurlCallback: - def __init__(self): - self.contents = '' - - def body_callback(self, buf): - self.contents = self.contents + buf - - class InvalidSource(Exception): pass @@ -291,31 +273,19 @@ class SourcesList(object): class UbuntuSourcesList(SourcesList): - LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s' + LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s' - def __init__(self, add_ppa_signing_keys_callback=None): + def __init__(self, module, add_ppa_signing_keys_callback=None): + self.module = module self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback super(UbuntuSourcesList, self).__init__() def _get_ppa_info(self, owner_name, ppa_name): - # we can not use urllib2 here as it does not do cert verification - if not HAVE_PYCURL: - module.fail_json(msg='Could not import python modules: pycurl. Please install python-pycurl package.') lp_api = self.LP_API % (owner_name, ppa_name) - return self._get_ppa_info_curl(lp_api) - - def _get_ppa_info_curl(self, lp_api): - callback = CurlCallback() - curl = pycurl.Curl() - curl.setopt(pycurl.SSL_VERIFYPEER, 1) - curl.setopt(pycurl.SSL_VERIFYHOST, 2) - curl.setopt(pycurl.WRITEFUNCTION, callback.body_callback) - curl.setopt(pycurl.URL, str(lp_api)) - curl.setopt(pycurl.HTTPHEADER, ["Accept: application/json"]) - curl.perform() - curl.close() - lp_page = callback.contents - return json.loads(lp_page) + + headers = dict(Accept='application/json') + response, info = fetch_url(self.module, lp_api, headers=headers) + return json.load(response) def _expand_ppa(self, path): ppa = path.split(':')[1] @@ -380,7 +350,8 @@ def main(): sourceslist = None if isinstance(distro, aptsources.distro.UbuntuDistribution): - sourceslist = UbuntuSourcesList(add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) + sourceslist = UbuntuSourcesList(module, + add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) elif isinstance(distro, aptsources.distro.DebianDistribution) or \ isinstance(distro, aptsources.distro.Distribution): sourceslist = SourcesList() @@ -413,5 +384,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() From aa87afaa8713283867668f3d383a3d7142607828 Mon Sep 17 00:00:00 2001 From: John Barker Date: Mon, 17 Mar 2014 18:52:44 +0000 Subject: [PATCH 422/772] Add space after full stop in ansible-galaxy help --- bin/ansible-galaxy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index 7b346ac6e44..0a6938ccce4 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -170,7 +170,7 @@ def build_option_parser(action): parser.set_usage("usage: %prog init [options] role_name") parser.add_option( '-p', '--init-path', dest='init_path', default="./", - help='The path in which the skeleton role will be created.' + help='The path in which the skeleton role will be created. ' 'The default is the current working directory.') elif action == "install": parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | tar_file(s)]") @@ -181,7 +181,7 @@ def build_option_parser(action): '-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') parser.add_option( - '-r', '--role-file', dest='role_file', + '-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported') elif action == "remove": parser.set_usage("usage: %prog remove role1 role2 ...") @@ -192,7 +192,7 @@ def build_option_parser(action): if action != "init": parser.add_option( '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, - help='The path to the directory containing your roles.' + help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' 'ansible.cfg file (/etc/ansible/roles if not configured)') From 70f4e0807c28628786601e76ca9ccb27d6807284 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 17 Mar 2014 15:03:05 -0400 Subject: [PATCH 423/772] Fixes #6467 ansible command should not assume sudo if ask_sudo_pass or sudo_user --- bin/ansible | 2 -- 1 file changed, 2 deletions(-) diff --git a/bin/ansible b/bin/ansible index 0189355ddbf..86a91d0b492 100755 --- a/bin/ansible +++ b/bin/ansible @@ -160,8 +160,6 @@ class Cli(object): if options.su_user or options.ask_su_pass: options.su = True - elif options.sudo_user or options.ask_sudo_pass: - options.sudo = True options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER options.su_user = options.su_user or C.DEFAULT_SU_USER if options.tree: From 20262b6c1561dd9e0990cce3e272f07f42d0a893 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 17 Mar 2014 15:53:48 -0400 Subject: [PATCH 424/772] Remove unused variable in _executor_internal --- lib/ansible/runner/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index fb2c1ce0372..8aaf0f8c79d 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -612,7 +612,6 @@ class Runner(object): if self.background > 0: raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks") - aggregrate = {} all_comm_ok = True all_changed = False all_failed = False From ad70e9bcd633e9c088d912d81ef97d56c696e62a Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 17 Mar 2014 16:54:25 -0400 Subject: [PATCH 425/772] Fixes #6227 skip non-unicode strings and catch decode errors silently in template_from_string --- lib/ansible/utils/template.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index fc4ff9fd204..0b86954e882 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -310,7 +310,13 @@ def template_from_string(basedir, data, vars, fail_on_undefined=False): if os.path.exists(filesdir): basedir = filesdir - data = data.decode('utf-8') + # 6227 + if isinstance(data, unicode): + try: + data = data.decode('utf-8') + except UnicodeEncodeError, e: + pass + try: t = environment.from_string(data) except Exception, e: From 94e3350b38973d92bebc9262219efcec5b28d24e Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 17 Mar 2014 17:14:32 -0400 Subject: [PATCH 426/772] Catch a unquoted line error. Fixes #6532 --- lib/ansible/runner/__init__.py | 1 - lib/ansible/runner/action_plugins/assemble.py | 2 ++ lib/ansible/utils/__init__.py | 10 ++++++++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 8aaf0f8c79d..3865b9c0b88 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -28,7 +28,6 @@ import collections import socket import base64 import sys -import shlex import pipes import jinja2 import subprocess diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index c73964cda68..2fd76391769 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -58,6 +58,7 @@ class ActionModule(object): options = {} if complex_args: options.update(complex_args) + options.update(utils.parse_kv(module_args)) src = options.get('src', None) @@ -65,6 +66,7 @@ class ActionModule(object): delimiter = options.get('delimiter', None) remote_src = utils.boolean(options.get('remote_src', 'yes')) + if src is None or dest is None: result = dict(failed=True, msg="src and dest are required") return ReturnData(conn=conn, comm_ok=False, result=result) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 2543c5776ac..2d3fb24db30 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -539,8 +539,14 @@ def parse_kv(args): if args is not None: # attempting to split a unicode here does bad things args = args.encode('utf-8') - vargs = [x.decode('utf-8') for x in shlex.split(args, posix=True)] - #vargs = shlex.split(str(args), posix=True) + try: + vargs = shlex.split(args, posix=True) + except ValueError, ve: + if 'no closing quotation' in str(ve).lower(): + raise errors.AnsibleError("error parsing argument string, try quoting the entire line.") + else: + raise + vargs = [x.decode('utf-8') for x in vargs] for x in vargs: if "=" in x: k, v = x.split("=",1) From f5289deee7bcab63883aee556af28cbbf13d403d Mon Sep 17 00:00:00 2001 From: vyrak bunleang Date: Mon, 17 Mar 2014 16:56:54 -0600 Subject: [PATCH 427/772] prevent state from changing from head to installed allows for --HEAD to be included in brew install command --- library/packaging/homebrew | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index a74091542e2..12fbf89c0f4 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -756,8 +756,10 @@ def main(): path = ['/usr/local/bin'] state = p['state'] - if state in ('present', 'installed', 'head'): + if state in ('present', 'installed'): state = 'installed' + if state in ('head'): + state = 'head' if state in ('latest', 'upgraded'): state = 'upgraded' if state == 'linked': From 5e8641b8346ede1ee68941113e57d8c6397f5479 Mon Sep 17 00:00:00 2001 From: Zeekin Date: Tue, 18 Mar 2014 10:32:55 +1000 Subject: [PATCH 428/772] Added AWS modules ec2_scaling_policy and ec2_metricalarm for configuring scaling policies for autoscaling groups, and metric alarms. --- library/cloud/ec2_metricalarm | 268 +++++++++++++++++++++++++++++++ library/cloud/ec2_scaling_policy | 181 +++++++++++++++++++++ 2 files changed, 449 insertions(+) create mode 100644 library/cloud/ec2_metricalarm create mode 100755 library/cloud/ec2_scaling_policy diff --git a/library/cloud/ec2_metricalarm b/library/cloud/ec2_metricalarm new file mode 100644 index 00000000000..d1f3f8151fa --- /dev/null +++ b/library/cloud/ec2_metricalarm @@ -0,0 +1,268 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ + +--- +module: ec2_metricalarm +short_description: Create/update or delete AWS Cloudwatch 'metric alarms' +description: + - Can create or delete AWS metric alarms + - Metrics you wish to alarm on must already exist +version_added: "1.6" +requirements: [ "boto" ] +author: Zacharie Eakin +options: + state: + description: + - register or deregister the alarm + required: true + choices: ['present', 'absent'] + name: + desciption: + - Unique name for the alarm + required: true + metric: + description: + - Name of the monitored metric (e.g. CPUUtilization) + - Metric must already exist + required: false + namespace: + description: + - Name of the appropriate namespace, which determines the category it will appear under in cloudwatch + required: false + options: ['AWS/AutoScaling','AWS/Billing','AWS/DynamoDB','AWS/ElastiCache','AWS/EBS','AWS/EC2','AWS/ELB','AWS/ElasticMapReduce','AWS/OpsWorks','AWS/Redshift','AWS/RDS','AWS/Route53','AWS/SNS','AWS/SQS','AWS/StorageGateway'] + statistic: + description: + - Operation applied to the metric + - Works in conjunction with period and evaluation_periods to determine the comparison value + required: false + options: ['SampleCount','Average','Sum','Minimum','Maximum'] + comparison: + description: + - Determines how the threshold value is compared + required: false + options: ['<=','<','>','>='] + threshold: + description: + - Sets the min/max bound for triggering the alarm + required: false + period: + description: + - The time (in seconds) between metric evaluations + required: false + evaluation_periods: + description: + - The number of times in which the metric is evaluated before final calculation + required: false + unit: + description: + - The threshold's unit of measurement + required: false + options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None'] + description: + description: + - A longer desciption of the alarm + required: false + dimensions: + description: + - Describes to what the alarm is applied + required: false + alarm_actions: + description: + - A list of the names action(s) taken when the alarm is in the 'alarm' status + required: false + insufficient_data_actions: + description: + - A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status + required: false + ok_actions: + description: + - A list of the names of action(s) to take when the alarm is in the 'ok' status + required: false + +--- +""" + +EXAMPLES = ''' + - name: create alarm + ec2_metricalarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metric: "CPUUtilization" + namespace: "AWS/EC2" + statistic: Average + comparison: "<=" + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: "Percent" + description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes " + dimensions: {'InstanceId':'i-XXX'} + alarm_actions: ["action1","action2"] + + +''' + +import sys + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +try: + import boto.ec2.cloudwatch + from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm + from boto.exception import BotoServerError +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +def create_metric_alarm(connection, module): + + name = module.params.get('name') + metric = module.params.get('metric') + namespace = module.params.get('namespace') + statistic = module.params.get('statistic') + comparison = module.params.get('comparison') + threshold = module.params.get('threshold') + period = module.params.get('period') + evaluation_periods = module.params.get('evaluation_periods') + unit = module.params.get('unit') + description = module.params.get('description') + dimensions = module.params.get('dimensions') + alarm_actions = module.params.get('alarm_actions') + insufficient_data_actions = module.params.get('insufficient_data_actions') + ok_actions = module.params.get('ok_actions') + + alarms = connection.describe_alarms(alarm_names=[name]) + + if not alarms: + + alm = MetricAlarm( + name=name, + metric=metric, + namespace=namespace, + statistic=statistic, + comparison=comparison, + threshold=threshold, + period=period, + evaluation_periods=evaluation_periods, + unit=unit, + description=description, + dimensions=dimensions, + alarm_actions=alarm_actions, + insufficient_data_actions=insufficient_data_actions, + ok_actions=ok_actions + ) + try: + connection.create_alarm(alm) + module.exit_json(changed=True) + except BotoServerError, e: + module.fail_json(msg=str(e)) + + else: + alarm = alarms[0] + changed = False + + for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'): + if getattr(alarm, attr) != module.params.get(attr): + changed = True + setattr(alarm, attr, module.params.get(attr)) + #this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm + comparison = alarm.comparison + comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'} + alarm.comparison = comparisons[comparison] + + dim1 = module.params.get('dimensions') + dim2 = alarm.dimensions + + for keys in dim1: + if not isinstance(dim1[keys], list): + dim1[keys] = [dim1[keys]] + if dim1[keys] != dim2[keys]: + changed=True + setattr(alarm, 'dimensions', dim1) + + for attr in ('alarm_actions','insufficient_data_actions','ok_actions'): + action = module.params.get(attr) or [] + if getattr(alarm, attr) != action: + changed = True + setattr(alarm, attr, module.params.get(attr)) + + try: + if changed: + connection.create_alarm(alarm) + module.exit_json(changed=changed) + except BotoServerError, e: + module.fail_json(msg=str(e)) + + +def delete_metric_alarm(connection, module): + name = module.params.get('name') + + alarms = connection.describe_alarms(alarm_names=[name]) + + if alarms: + try: + connection.delete_alarms([name]) + module.exit_json(changed=True) + except BotoServerError, e: + module.fail_json(msg=str(e)) + else: + module.exit_json(changed=False) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(required=True, type='str'), + metric=dict(type='str'), + namespace=dict(type='str', choices=['AWS/AutoScaling', 'AWS/Billing', 'AWS/DynamoDB', 'AWS/ElastiCache', 'AWS/EBS', 'AWS/EC2', + 'AWS/ELB', 'AWS/ElasticMapReduce', 'AWS/OpsWorks', 'AWS/Redshift', 'AWS/RDS', 'AWS/Route53', 'AWS/SNS', 'AWS/SQS', 'AWS/StorageGateway']), statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), + comparison=dict(type='str', choices=['<=', '<', '>', '>=']), + threshold=dict(type='float'), + period=dict(type='int'), + unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']), + evaluation_periods=dict(type='int'), + description=dict(type='str'), + dimensions=dict(type='dict'), + alarm_actions=dict(type='list'), + insufficient_data_actions=dict(type='list'), + ok_actions=dict(type='list'), + state=dict(default='present', choices=['present', 'absent']), + region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params.get('state') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + try: + connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + if state == 'present': + create_metric_alarm(connection, module) + elif state == 'absent': + delete_metric_alarm(connection, module) + +main() diff --git a/library/cloud/ec2_scaling_policy b/library/cloud/ec2_scaling_policy new file mode 100755 index 00000000000..b2395cd0a3c --- /dev/null +++ b/library/cloud/ec2_scaling_policy @@ -0,0 +1,181 @@ +#!/usr/bin/python + +DOCUMENTATION = """ +--- +module:ec2_scaling_policy +short_description: Create or delete AWS scaling policies for Autoscaling groups +description: + - Can create or delete scaling policies for autoscaling groups + - Referenced autoscaling groups must already exist +version_added: "1.6" +requirements: [ "boto" ] +author: Zacharie Eakin +options: + state: + description: + - register or deregister the policy + required: true + choices: ['present', 'absent'] + name: + description: + - Unique name for the scaling policy + required: true + asg_name: + description: + - Name of the associated autoscaling group + required: true + adjustment_type: + desciption: + - The type of change in capacity of the autoscaling group + required: false + choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity'] + scaling_adjustment: + description: + - The amount by which the autoscaling group is adjusted by the policy + required: false + min_adjustment_step: + description: + - Minimum amount of adjustment when policy is triggered + required: false + cooldown: + description: + - The minimum period of time between which autoscaling actions can take place + required: false +""" + +EXAMPLES = ''' +- ec2_scaling_policy: + state: present + region: US-XXX + name: "scaledown-policy" + adjustment_type: "ChangeInCapacity" + asg_name: "slave-pool" + scaling_adjustment: -1 + min_adjustment_step: 1 + cooldown: 300 +''' + + +import sys + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +try: + import boto.ec2.autoscale + from boto.ec2.autoscale import ScalingPolicy + from boto.exception import BotoServerError + +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +def create_scaling_policy(connection, module): + sp_name = module.params.get('name') + adjustment_type = module.params.get('adjustment_type') + asg_name = module.params.get('asg_name') + scaling_adjustment = module.params.get('scaling_adjustment') + min_adjustment_step = module.params.get('min_adjustment_step') + cooldown = module.params.get('cooldown') + + scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name]) + + if not scalingPolicies: + sp = ScalingPolicy( + name=sp_name, + adjustment_type=adjustment_type, + as_name=asg_name, + scaling_adjustment=scaling_adjustment, + min_adjustment_step=min_adjustment_step, + cooldown=cooldown) + + try: + connection.create_scaling_policy(sp) + module.exit_json(changed=True) + except BotoServerError, e: + module.fail_json(msg=str(e)) + else: + policy = scalingPolicies[0] + changed = False + + #min_adjustment_step attribute is only relevant if the adjustment_type + #is set to percentage change in capacity, so it is a special case + if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity': + if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'): + changed = True + + #set the min adjustment step incase the user decided to change their adjustment type to percentage + setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step')) + + #check the remaining attributes + for attr in ('adjustment_type','scaling_adjustment','cooldown'): + if getattr(policy, attr) != module.params.get(attr): + changed = True + setattr(policy, attr, module.params.get(attr)) + + try: + if changed: + connection.create_scaling_policy(policy) + policy = connection.get_all_policies(policy_names=[sp_name])[0] + module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) + module.exit_json(changed=changed) + except BotoServerError, e: + module.fail_json(msg=str(e)) + + +def delete_scaling_policy(connection, module): + sp_name = module.params.get('name') + asg_name = module.params.get('asg_name') + + scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name]) + + if scalingPolicies: + try: + connection.delete_policy(sp_name, asg_name) + module.exit_json(changed=True) + except BotoServerError, e: + module.exit_json(changed=False, msg=str(e)) + else: + module.exit_json(changed=False) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name = dict(required=True, type='str'), + adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']), + asg_name = dict(required=True, type='str'), + scaling_adjustment = dict(type='int'), + min_adjustment_step = dict(type='int'), + cooldown = dict(type='int'), + region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + state = module.params.get('state') + + try: + connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + + if state == 'present': + create_scaling_policy(connection, module) + elif state == 'absent': + delete_scaling_policy(connection, module) + + +main() + + + + + + From 7c5ad1fa230ff1f64aaf56442926c16ce16ccc5b Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 17 Mar 2014 22:21:30 -0400 Subject: [PATCH 429/772] Fixes #6539 Workaround py26 vs py27 difflib results --- test/units/TestUtils.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index a56a79e4ef2..97553271000 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -662,8 +662,19 @@ class TestUtils(unittest.TestCase): before='fooo', after='foo' ) + standard_expected = """--- before: foo +++ after: bar @@ -1 +1 @@ -fooo+foo""" - self.assertEqual(ansible.utils.get_diff(standard), standard_expected) + + # workaround py26 and py27 difflib differences + standard_expected = """-fooo+foo""" + diff = ansible.utils.get_diff(standard) + diff = diff.split('\n') + del diff[0] + del diff[0] + del diff[0] + diff = '\n'.join(diff) + self.assertEqual(diff, unicode(standard_expected)) + From b4cd4ad15bef8bdd83b5925a9c1bbc2bc54e1fdd Mon Sep 17 00:00:00 2001 From: Ramon de la Fuente Date: Tue, 18 Mar 2014 04:46:08 +0100 Subject: [PATCH 430/772] A module for notifications through the Slack.com platform This module can send messages through Slack.com, and supports all options available in the "incoming webhook integration". - text - channel - username - icon_url / icon_emoji - link_names - parse --- library/notification/slack | 174 +++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 library/notification/slack diff --git a/library/notification/slack b/library/notification/slack new file mode 100644 index 00000000000..e3bdffd216f --- /dev/null +++ b/library/notification/slack @@ -0,0 +1,174 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Ramon de la Fuente +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: slack +short_description: Send notifications to U(Slack.com) +description: + - The M(slack) module sends notifications to U(Slack.com) via the incoming WebHook + See the Slack documentation U() +version_added: 1.6 +author: Ramon de la Fuente +options: + domain: + description: + - Slack (sub)domain for your environment without protocol. + (i.e. C(future500.slack.com)) + required: true + token: + description: + - Slack integration token + required: true + msg: + description: + - Message to send. + required: true + channel: + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). + required: false + username: + description: + - This is the sender of the message. + required: false + default: ansible + icon_url: + description: + - Url for the message sender's icon (default C(http://www.ansibleworks.com/favicon.ico)) + required: false + icon_emoji: + description: + - Emoji for the message sender. See Slack documentation for options. + (if I(icon_emoji) is set, I(icon_url) will not be used) + required: false + link_names: + description: + - Automatically create links for channels and usernames in I(msg). + required: false + default: 1 + choices: + - 1 + - 0 + parse: + description: + - Setting for the message parser at Slack + required: false + choices: + - 'full' + - 'none' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: + - 'yes' + - 'no' +""" + +EXAMPLES = """ +- name: Send notification message via Slack + local_action: + module: slack + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} completed" + +- name: Send notification message via Slack all options + local_action: + module: slack + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} completed" + channel: "#ansible" + username: "Ansible on {{ inventory_hostname }}" + icon_url: "http://www.example.com/some-image-file.png" + link_names: 0 + parse: 'none' + +""" + + +SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' + +def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): + payload = dict(text=text) + + if channel is not None: + payload['channel'] = channel if (channel[0] == '#') else '#'+channel + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + if parse is not None: + payload['parse'] = parse + + payload="payload=" + module.jsonify(payload) + return payload + +def do_notify_slack(module, domain, token, payload): + slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, token) + + response, info = fetch_url(module, slack_incoming_webhook, data=payload) + if info['status'] != 200: + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') + module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg'])) + +def main(): + module = AnsibleModule( + argument_spec = dict( + domain = dict(type='str', required=True), + token = dict(type='str', required=True), + msg = dict(type='str', required=True), + channel = dict(type='str', default=None), + username = dict(type='str', default='Ansible'), + icon_url = dict(type='str', default='http://www.ansibleworks.com/favicon.ico'), + icon_emoji = dict(type='str', default=None), + link_names = dict(type='int', default=1, choices=[0,1]), + parse = dict(type='str', default=None, choices=['none', 'full']), + + validate_certs = dict(default='yes', type='bool'), + ) + ) + + domain = module.params['domain'] + token = module.params['token'] + text = module.params['msg'] + channel = module.params['channel'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + parse = module.params['parse'] + + payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) + do_notify_slack(module, domain, token, payload) + + module.exit_json(msg="OK") + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() \ No newline at end of file From f51872885c3986f25d01e10073071165586baa86 Mon Sep 17 00:00:00 2001 From: Ramon de la Fuente Date: Tue, 18 Mar 2014 04:59:15 +0100 Subject: [PATCH 431/772] mandatory silly mistake --- library/notification/slack | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/library/notification/slack b/library/notification/slack index e3bdffd216f..df069375a83 100644 --- a/library/notification/slack +++ b/library/notification/slack @@ -20,10 +20,9 @@ DOCUMENTATION = """ module: slack -short_description: Send notifications to U(Slack.com) +short_description: Send Slack notifications description: - - The M(slack) module sends notifications to U(Slack.com) via the incoming WebHook - See the Slack documentation U() + - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration version_added: 1.6 author: Ramon de la Fuente options: From 84908a57fce3e6c1919ab7b85839f40d7f7d9585 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 17 Mar 2014 23:00:50 -0500 Subject: [PATCH 432/772] Don't filter role deps on tags Also, fixed a bug where the tags were being merged into the wrong data structure (passed_vars) in _build_role_dependencies() Fixes #6439 Fixes #5709 --- lib/ansible/playbook/play.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index fc5b4997f01..5d2f9e0a731 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -229,11 +229,11 @@ class Play(object): allow_dupes = utils.boolean(meta_data.get('allow_duplicates','')) # if any tags were specified as role/dep variables, merge - # them into the passed_vars so they're passed on to any + # them into the current dep_vars so they're passed on to any # further dependencies too, and so we only have one place - # (passed_vars) to look for tags going forward + # (dep_vars) to look for tags going forward def __merge_tags(var_obj): - old_tags = passed_vars.get('tags', []) + old_tags = dep_vars.get('tags', []) if isinstance(var_obj, dict): new_tags = var_obj.get('tags', []) if isinstance(new_tags, basestring): @@ -242,16 +242,8 @@ class Play(object): new_tags = [] return list(set(old_tags).union(set(new_tags))) - if "tags" in passed_vars: - dep_tags = __merge_tags(dep) - if not self._is_valid_tag(dep_tags): - # one of the tags specified for this role was in the - # skip list, or we're limiting the tags and it didn't - # match one, so we just skip it completely - continue - - passed_vars['tags'] = __merge_tags(role_vars) - passed_vars['tags'] = __merge_tags(dep_vars) + dep_vars['tags'] = __merge_tags(role_vars) + dep_vars['tags'] = __merge_tags(passed_vars) # if tags are set from this role, merge them # into the tags list for the dependent role From 79b3efc9893e7ff7a387f6b21c140ee6bec1fa4d Mon Sep 17 00:00:00 2001 From: Matt Bray Date: Tue, 18 Mar 2014 11:57:01 +0000 Subject: [PATCH 433/772] docker: add tty and stdin_open options Useful for development environments. Setting these options to true allows you to `docker attach` to a docker container started with ansible. --- library/cloud/docker | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/library/cloud/docker b/library/cloud/docker index a1e9a5074c8..0248f5992af 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -169,6 +169,20 @@ options: default: null aliases: [] version_added: "1.5" + stdin_open: + description: + - Keep stdin open + required: false + default: false + aliases: [] + version_added: "1.6" + tty: + description: + - Allocate a pseudo-tty + required: false + default: false + aliases: [] + version_added: "1.6" author: Cove Schneider, Joshua Conner, Pavel Antonov requirements: [ "docker-py >= 0.3.0" ] ''' @@ -529,6 +543,8 @@ class DockerManager: 'hostname': self.module.params.get('hostname'), 'detach': self.module.params.get('detach'), 'name': self.module.params.get('name'), + 'stdin_open': self.module.params.get('stdin_open'), + 'tty': self.module.params.get('tty'), } def do_create(count, params): @@ -636,7 +652,9 @@ def main(): debug = dict(default=False, type='bool'), privileged = dict(default=False, type='bool'), lxc_conf = dict(default=None), - name = dict(default=None) + name = dict(default=None), + stdin_open = dict(default=False, type='bool'), + tty = dict(default=False, type='bool'), ) ) From db3d2e1042c2ac60a899ccc745f579c364ff578a Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 18 Mar 2014 09:03:55 -0400 Subject: [PATCH 434/772] Fixes #6542 rewrite test string for osx compatibility --- test/units/TestModuleUtilsBasic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index 3d85b613525..d1ee95a1a0e 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -95,9 +95,9 @@ class TestModuleUtilsBasic(unittest.TestCase): # test run_command with a command pipe (with both use_unsafe_shell=True|False) def test_run_command_string_unsafe_with_pipe(self): - (rc, out, err) = self.module.run_command('echo -n "foo bar" | cat', use_unsafe_shell=True) + (rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True) self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar') + self.assertEqual(out, 'foo bar\n') # test run_command with a shell redirect in (with both use_unsafe_shell=True|False) def test_run_command_string_unsafe_with_redirect_in(self): From cf0a0ca3d269e259065893d93d87e8763076f796 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 18 Mar 2014 09:30:21 -0400 Subject: [PATCH 435/772] Fixes #6519 Properly handle state=absent in sysctl module --- library/system/sysctl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/library/system/sysctl b/library/system/sysctl index 1b29fed8f1e..ab1da5e0959 100644 --- a/library/system/sysctl +++ b/library/system/sysctl @@ -144,9 +144,13 @@ class SysctlModule(object): if self.file_values[thisname] is None and self.args['state'] == "present": self.changed = True self.write_file = True + elif self.file_values[thisname] is None and self.args['state'] == "absent": + self.changed = False elif self.file_values[thisname] != self.args['value']: self.changed = True self.write_file = True + + # use the sysctl command or not? if self.args['sysctl_set']: if self.proc_value is None: self.changed = True From 750d9e2d59da531ac522e77dbb1f94ae2c8b5a5a Mon Sep 17 00:00:00 2001 From: James Laska Date: Tue, 18 Mar 2014 10:15:02 -0400 Subject: [PATCH 436/772] Update credentials.yml documentation and handling Changes include: * Update Makefile to use credentials.yml when it exists * Add details on the use of the credentials.yml file to README.md. * Update credentials.template comments --- test/integration/Makefile | 30 ++++++++++++++++--------- test/integration/README.md | 32 ++++++++++++++++++++++----- test/integration/credentials.template | 6 ++--- 3 files changed, 48 insertions(+), 20 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 94d97b46a40..ad5e62a91d7 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -6,20 +6,28 @@ ifndef CLOUD_RESOURCE_PREFIX CLOUD_RESOURCE_PREFIX := $(shell python -c "import string,random; print 'ansible-testing-' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(8));") endif +CREDENTIALS_FILE = credentials.yml +# If credentials.yml exists, use it +ifneq ("$(wildcard $(CREDENTIALS_FILE))","") +CREDENTIALS_ARG = -e @$(CREDENTIALS_FILE) +else +CREDENTIALS_ARG = +endif + all: non_destructive destructive check_mode test_hash non_destructive: - ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) + ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) destructive: - ansible-playbook destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) + ansible-playbook destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) check_mode: - ansible-playbook check_mode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --check $(TEST_FLAGS) + ansible-playbook check_mode.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v --check $(TEST_FLAGS) test_hash: - ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' - ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' + ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' + ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' cloud: amazon rackspace @@ -32,18 +40,18 @@ rackspace_cleanup: @echo "FIXME - cleanup_rax.py not yet implemented" @# python cleanup_rax.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" -credentials.yml: - @echo "No credentials.yml file found. A file named 'credentials.yml' is needed to provide credentials needed to run cloud tests. See sample 'credentials.template' file." +$(CREDENTIALS_FILE): + @echo "No credentials file found. A file named '$(CREDENTIALS_FILE)' is needed to provide credentials needed to run cloud tests. See sample 'credentials.template' file." @exit 1 -amazon: credentials.yml - ansible-playbook amazon.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ +amazon: $(CREDENTIALS_FILE) + ansible-playbook amazon.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ RC=$$? ; \ CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make amazon_cleanup ; \ exit $$RC; -rackspace: credentials.yml - ansible-playbook rackspace.yml -i $(INVENTORY) -e @$(VARS_FILE) -e @credentials.yml -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ +rackspace: $(CREDENTIALS_FILE) + ansible-playbook rackspace.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ RC=$$? ; \ CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make rackspace_cleanup ; \ exit $$RC; diff --git a/test/integration/README.md b/test/integration/README.md index 1bdc099cd1d..e05f843ac2f 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -5,15 +5,17 @@ The ansible integration system. Tests for playbooks, by playbooks. -Some tests may require cloud credentials. +Some tests may require credentials. Credentials may be specified with `credentials.yml`. Tests should be run as root. Configuration ============= -Making your own version of integration_config.yml can allow for setting some tunable parameters to help run -the tests better in your environment. +Making your own version of `integration_config.yml` can allow for setting some +tunable parameters to help run the tests better in your environment. Some +tests (e.g. cloud) will only run when access credentials are provided. For +more information about supported credentials, refer to `credentials.template`. Prerequisites ============= @@ -41,12 +43,30 @@ Destructive Tests These tests are allowed to install and remove some trivial packages. You will likely want to devote these to a virtual environment. They won't reformat your filesystem, however :) - + make destructive Cloud Tests =========== -Details pending, but these require cloud credentials. These are not 'tests run in the cloud' so much as tests -that leverage the cloud modules and are organized by cloud provider. +Cloud tests exercise capabilities of cloud modules (e.g. ec2_key). These are +not 'tests run in the cloud' so much as tests that leverage the cloud modules +and are organized by cloud provider. + +In order to run cloud tests, you must provide access credentials in a file +named `credentials.yml`. A sample credentials file named +`credentials.template` is available for syntax help. + + +Provide cloud credentials: + cp credentials.template credentials.yml + ${EDITOR:-vi} credentials.yml + +Run the tests: + make cloud +*WARNING* running cloud integration tests will create and destroy cloud +resources. Running these tests may result in additional fees associated with +your cloud account. Care is taken to ensure that created resources are +removed. However, it is advisable to inspect your AWS console to ensure no +unexpected resources are running. diff --git a/test/integration/credentials.template b/test/integration/credentials.template index 0ca34aff7c6..f21100405fc 100644 --- a/test/integration/credentials.template +++ b/test/integration/credentials.template @@ -1,7 +1,7 @@ --- # AWS Credentials -ec2_access_key: FIXME -ec2_secret_key: FIXME +ec2_access_key: +ec2_secret_key: -# GITHUB Credentials +# GITHUB SSH private key - a path to a SSH private key for use with github.com github_ssh_private_key: "{{ lookup('env','HOME') }}/.ssh/id_rsa" From 3f5440f7dfe098f5eb3c2e3da5a5d23a8cc6b7b7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 18 Mar 2014 10:17:44 -0500 Subject: [PATCH 437/772] Make modules set a valid working directory Fixes #6546 --- lib/ansible/module_utils/basic.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 2f0c0f61aca..2d85245cc20 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -217,6 +217,9 @@ class AnsibleModule(object): if not self.no_log: self._log_invocation() + # finally, make sure we're in a sane working dir + self._set_cwd() + def load_file_common_arguments(self, params): ''' many modules deal with files, this encapsulates common @@ -815,6 +818,26 @@ class AnsibleModule(object): syslog.openlog(str(module), 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, unicode(msg).encode('utf8')) + def _set_cwd(self): + try: + cwd = os.getcwd() + if not os.access(cwd, os.F_OK|os.R_OK): + raise + return cwd + except: + # we don't have access to the cwd, probably because of sudo. + # Try and move to a neutral location to prevent errors + for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]: + try: + if os.access(cwd, os.F_OK|os.R_OK): + os.chdir(cwd) + return cwd + except: + pass + # we won't error here, as it may *not* be a problem, + # and we don't want to break modules unnecessarily + return None + def get_bin_path(self, arg, required=False, opt_dirs=[]): ''' find system executable in PATH. From 49807877fa4e69f6047efa4f7885a3d454a9d6eb Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 18 Mar 2014 12:21:26 -0400 Subject: [PATCH 438/772] Fixes #6550 Use shell for pipe lookup subprocess.popen calls --- lib/ansible/runner/lookup_plugins/pipe.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/lookup_plugins/pipe.py b/lib/ansible/runner/lookup_plugins/pipe.py index 62ec7e129ed..0cd9e1cda5d 100644 --- a/lib/ansible/runner/lookup_plugins/pipe.py +++ b/lib/ansible/runner/lookup_plugins/pipe.py @@ -32,7 +32,18 @@ class LookupModule(object): ret = [] for term in terms: - p = subprocess.Popen(term, cwd=self.basedir, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + ''' + http://docs.python.org/2/library/subprocess.html#popen-constructor + + The shell argument (which defaults to False) specifies whether to use the + shell as the program to execute. If shell is True, it is recommended to pass + args as a string rather than as a sequence + + https://github.com/ansible/ansible/issues/6550 + ''' + term = str(term) + + p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode == 0: ret.append(stdout.decode("utf-8").rstrip()) From a57581e1e780e4d860a744535ddc4c2ca10dacdc Mon Sep 17 00:00:00 2001 From: andreaskern Date: Tue, 18 Mar 2014 17:24:56 +0100 Subject: [PATCH 439/772] add param to note example for ssh-keyscan hostnames in the known hosts file are typically stored as Hashed values, calling 'ssh-keyscan' with '-H' changes to output to the Hashed format so that the known_hosts file looks more consistent --- library/source_control/git | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/source_control/git b/library/source_control/git index f4ee4d522b7..e61c7728156 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -123,7 +123,7 @@ notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the git module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." + the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts." ''' EXAMPLES = ''' From 4045b793be55d40b686b1d5926cd26bdc50ba540 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 18 Mar 2014 12:29:54 -0400 Subject: [PATCH 440/772] Addresses #6550 add integration tests for pipe lookup --- test/integration/roles/test_lookups/tasks/main.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index d54b769ecb9..0340a12c74e 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -82,3 +82,17 @@ assert: that: - "test_val == known_var_value.stdout" + + +# PIPE LOOKUP + +# https://github.com/ansible/ansible/issues/6550 +- name: confirm pipe lookup works with a single positional arg + debug: msg="{{ lookup('pipe', 'ls') }}" + +# https://github.com/ansible/ansible/issues/6550 +- name: confirm pipe lookup works with multiple positional args + debug: msg="{{ lookup('pipe', 'ls /tmp /') }}" + + + From 0556c53f7878c9ac2a59100c2752785482e806aa Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 18 Mar 2014 12:22:58 -0500 Subject: [PATCH 441/772] Execute commands through accelerate with shell when needed This fixes an issue related to the new run_command changes, whereby Popen fails to run a command when an executable is specified + shell=False and there are multiple positional arguments (see Popen docs for details) --- library/utilities/accelerate | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/library/utilities/accelerate b/library/utilities/accelerate index 6508f1433ea..a61e54e374d 100644 --- a/library/utilities/accelerate +++ b/library/utilities/accelerate @@ -329,11 +329,15 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): return dict(failed=True, msg='internal error: cmd is required') if 'tmp_path' not in data: return dict(failed=True, msg='internal error: tmp_path is required') - if 'executable' not in data: - return dict(failed=True, msg='internal error: executable is required') vvvv("executing: %s" % data['cmd']) - rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=data['executable'], close_fds=True) + + use_unsafe_shell = False + executable = data.get('executable') + if executable: + use_unsafe_shell = True + + rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=executable, use_unsafe_shell=use_unsafe_shell) if stdout is None: stdout = '' if stderr is None: From c9fcbf7bdd46b493a1349662f92be63b66412007 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 18 Mar 2014 11:14:56 -0500 Subject: [PATCH 442/772] Change print message to vvv for missing known hosts in ssh.py --- lib/ansible/runner/connection_plugins/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index cc548a1c9b2..bcf90cbc7d3 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -157,7 +157,7 @@ class Connection(object): return False if (hfiles_not_found == len(host_file_list)): - print "previous known host file not found" + vvv("EXEC previous known host file not found for %s" % host) return True def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False): From 8a4dca4b86684f1c816275689d0af584acb53ac4 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 18 Mar 2014 13:30:13 -0400 Subject: [PATCH 443/772] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfde85c48e9..73dfeb73206 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ New Modules: * packaging: homebrew_cask (OS X) * notification: nexmo (SMS) * notification: twilio (SMS) +* notification: slack (Slack.com) * system: debconf * system: ufw * system: locale_gen From eb7717aa02f2b94a4b330a153dc4df1788967e2e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 18 Mar 2014 12:30:54 -0500 Subject: [PATCH 444/772] Make sure all tags are in a list before merging for role deps Fixes #6557 --- lib/ansible/playbook/play.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 5d2f9e0a731..9195c5f2b66 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -234,6 +234,8 @@ class Play(object): # (dep_vars) to look for tags going forward def __merge_tags(var_obj): old_tags = dep_vars.get('tags', []) + if isinstance(old_tags, basestring): + old_tags = [old_tags, ] if isinstance(var_obj, dict): new_tags = var_obj.get('tags', []) if isinstance(new_tags, basestring): From f594e8b6f0b1b32f60cf5884d1e0d31d2a3d0fe9 Mon Sep 17 00:00:00 2001 From: patrickheeney Date: Tue, 18 Mar 2014 11:17:20 -0700 Subject: [PATCH 445/772] Fix domain name in slack module --- library/notification/slack | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/notification/slack b/library/notification/slack index df069375a83..176d6b338fb 100644 --- a/library/notification/slack +++ b/library/notification/slack @@ -50,7 +50,7 @@ options: default: ansible icon_url: description: - - Url for the message sender's icon (default C(http://www.ansibleworks.com/favicon.ico)) + - Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico)) required: false icon_emoji: description: @@ -143,7 +143,7 @@ def main(): msg = dict(type='str', required=True), channel = dict(type='str', default=None), username = dict(type='str', default='Ansible'), - icon_url = dict(type='str', default='http://www.ansibleworks.com/favicon.ico'), + icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'), icon_emoji = dict(type='str', default=None), link_names = dict(type='int', default=1, choices=[0,1]), parse = dict(type='str', default=None, choices=['none', 'full']), From 815115f79249515cb28e98e0c0854e9d388562b4 Mon Sep 17 00:00:00 2001 From: joehack3r Date: Tue, 18 Mar 2014 15:49:27 -0500 Subject: [PATCH 446/772] Update example to include delete on termination --- library/cloud/ec2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index fe215499221..6a352f3f4d0 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -286,7 +286,7 @@ EXAMPLES = ''' db: postgres monitoring: yes -# Single instance with additional IOPS volume from snapshot +# Single instance with additional IOPS volume from snapshot and volume delete on termination local_action: module: ec2 key_name: mykey @@ -301,6 +301,7 @@ local_action: device_type: io1 iops: 1000 volume_size: 100 + delete_on_termination: true monitoring: yes # Multiple groups example From 17fcc89b3baa66e9286f60eec7a583ea2736e148 Mon Sep 17 00:00:00 2001 From: Fabian Freyer Date: Tue, 18 Mar 2014 21:55:05 +0100 Subject: [PATCH 447/772] Fixed bug that occurred when pkgsite not specified --- library/packaging/pkgng | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/library/packaging/pkgng b/library/packaging/pkgng index 5bf8fb650f0..67d10c2635b 100644 --- a/library/packaging/pkgng +++ b/library/packaging/pkgng @@ -142,9 +142,11 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions # in /usr/local/etc/pkg/repos old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) - - if old_pkgng and (pkgsite != ""): - pkgsite = "PACKAGESITE=%s" % (pkgsite) + if pkgsite != "": + if old_pkgng: + pkgsite = "PACKAGESITE=%s" % (pkgsite) + else: + pkgsite = "-r %s" % (pkgsite) if not module.check_mode and cached == "no": if old_pkgng: @@ -162,7 +164,7 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): if old_pkgng: rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package)) else: - rc, out, err = module.run_command("%s install -r %s -g -U -y %s" % (pkgng_path, pkgsite, package)) + rc, out, err = module.run_command("%s install %s -g -U -y %s" % (pkgng_path, pkgsite, package)) if not module.check_mode and not query_package(module, pkgng_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) From 3b5aa8bd30a3fa84eca6141a434274b1455c591b Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 18 Mar 2014 17:16:44 -0500 Subject: [PATCH 448/772] Provide a dummy ca to allow OS X to do it's OpenSSL keychain magic --- lib/ansible/module_utils/urls.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 41b1135855f..2eb26bfd6a2 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -52,6 +52,31 @@ except: import tempfile + +# This is a dummy cacert provided for Mac OS since you need at least 1 +# ca cert, regardless of validity, for Python on Mac OS to use the +# keychain functionality in OpenSSL for validating SSL certificates. +# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher +DUMMY_CA_CERT = """-----BEGIN CERTIFICATE----- +MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV +BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt +MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy +MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD +VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9 +gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1 +4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj +gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA +FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE +CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z +aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA +MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH +qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV +zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg= +-----END CERTIFICATE----- +""" + + class RequestWithMethod(urllib2.Request): ''' Workaround for using DELETE/PUT/etc with urllib2 @@ -112,6 +137,9 @@ class SSLValidationHandler(urllib2.BaseHandler): tmp_fd, tmp_path = tempfile.mkstemp() + # Write the dummy ca cert + os.write(tmp_fd, DUMMY_CA_CERT) + # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use # in the ssl check to speed up the test From 324a943e1278c63a16b8424b3e7f2a0872197efe Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 18 Mar 2014 21:21:18 -0400 Subject: [PATCH 449/772] Fixes #6548 correct issues from rhn_register refactoring --- library/packaging/rhn_register | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/library/packaging/rhn_register b/library/packaging/rhn_register index 85ccacdac2d..552dfcc580a 100644 --- a/library/packaging/rhn_register +++ b/library/packaging/rhn_register @@ -84,10 +84,14 @@ try: except ImportError, e: module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e) +# INSERT REDHAT SNIPPETS +from ansible.module_utils.redhat import * +# INSERT COMMON SNIPPETS +from ansible.module_utils.basic import * class Rhn(RegistrationBase): - def __init__(self, module, username=None, password=None): + def __init__(self, username=None, password=None): RegistrationBase.__init__(self, username, password) self.config = self.load_config() @@ -193,21 +197,26 @@ class Rhn(RegistrationBase): Register system to RHN. If enable_eus=True, extended update support will be requested. ''' - register_cmd = "/usr/sbin/rhnreg_ks --username '%s' --password '%s' --force" % (self.username, self.password) + register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password) + if self.module.params.get('server_url', None): + register_cmd += " --serverUrl=%s" % self.module.params.get('server_url') if enable_eus: register_cmd += " --use-eus-channel" if activationkey is not None: register_cmd += " --activationkey '%s'" % activationkey # FIXME - support --profilename # FIXME - support --systemorgid - rc, stdout, stderr = self.module.run_command(register_command, check_rc=True) + rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) def api(self, method, *args): ''' Convenience RPC wrapper ''' if not hasattr(self, 'server') or self.server is None: - url = "https://xmlrpc.%s/rpc/api" % self.hostname + if self.hostname != 'rhn.redhat.com': + url = "https://%s/rpc/api" % self.hostname + else: + url = "https://xmlrpc.%s/rpc/api" % self.hostname self.server = xmlrpclib.Server(url, verbose=0) self.session = self.server.auth.login(self.username, self.password) @@ -270,6 +279,7 @@ def main(): rhn.configure(module.params['server_url']) activationkey = module.params['activationkey'] channels = module.params['channels'] + rhn.module = module # Ensure system is registered if state == 'present': @@ -288,10 +298,10 @@ def main(): rhn.enable() rhn.register(module.params['enable_eus'] == True, activationkey) rhn.subscribe(channels) - except CommandException, e: + except Exception, e: module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e)) - else: - module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) + + module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) # Ensure system is *not* registered if state == 'absent': @@ -300,14 +310,10 @@ def main(): else: try: rhn.unregister() - except CommandException, e: + except Exception, e: module.fail_json(msg="Failed to unregister: %s" % e) - else: - module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) + module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.redhat import * main() From f6a75ebaf13a7cc63b9c94ed4fe78b108f4a8cf1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 18 Mar 2014 22:29:22 -0400 Subject: [PATCH 450/772] fixed issue with directory permissions on copy #6552 --- library/files/file | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/file b/library/files/file index 176bd416984..b1c57ff4d97 100644 --- a/library/files/file +++ b/library/files/file @@ -203,7 +203,7 @@ def main(): src = os.path.expanduser(src) # original_basename is used by other modules that depend on file. - if os.path.isdir(path) and state not in ["link", "absent", "directory"]: + if os.path.isdir(path) and state not in ["link", "absent"]: if params['original_basename']: basename = params['original_basename'] else: From b4053b67d183d9980240b13788c609e816f44958 Mon Sep 17 00:00:00 2001 From: Mahemoff Date: Wed, 19 Mar 2014 09:55:49 +0000 Subject: [PATCH 451/772] Make hipchat "notify" flag optional (default off) --- plugins/callbacks/hipchat.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/callbacks/hipchat.py b/plugins/callbacks/hipchat.py index ee4aca6aa33..a7bfdb6ec1b 100644 --- a/plugins/callbacks/hipchat.py +++ b/plugins/callbacks/hipchat.py @@ -36,6 +36,7 @@ class CallbackModule(object): HIPCHAT_TOKEN (required): HipChat API token HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible HIPCHAT_FROM (optional): Name to post as. Default: ansible + HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: false Requires: prettytable @@ -52,6 +53,7 @@ class CallbackModule(object): self.token = os.getenv('HIPCHAT_TOKEN') self.room = os.getenv('HIPCHAT_ROOM', 'ansible') self.from_name = os.getenv('HIPCHAT_FROM', 'ansible') + self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') == 'true') if self.token is None: self.disabled = True @@ -71,7 +73,7 @@ class CallbackModule(object): params['message'] = msg params['message_format'] = msg_format params['color'] = color - params['notify'] = int(notify) + params['notify'] = int(self.allow_notify and notify) url = ('%s?auth_token=%s' % (self.msg_uri, self.token)) try: From 1d3d73a0b643a630a751ad0acc59b0f6a430b95b Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 19 Mar 2014 09:01:13 -0500 Subject: [PATCH 452/772] Only write the DUMMY_CA_CERT on OS X --- lib/ansible/module_utils/urls.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 2eb26bfd6a2..e02f171aee4 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -137,8 +137,9 @@ class SSLValidationHandler(urllib2.BaseHandler): tmp_fd, tmp_path = tempfile.mkstemp() - # Write the dummy ca cert - os.write(tmp_fd, DUMMY_CA_CERT) + # Write the dummy ca cert if we are running on Mac OS X + if platform == 'Darwin': + os.write(tmp_fd, DUMMY_CA_CERT) # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use From 67517e96d39eb2b694e3c05c0937d9c0414d7e54 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 19 Mar 2014 10:30:10 -0400 Subject: [PATCH 453/772] Fixes #6567 put the git wrapper script in the module temp dir --- lib/ansible/module_utils/basic.py | 2 ++ library/source_control/git | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 2d85245cc20..43d1f3bccac 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1142,3 +1142,5 @@ class AnsibleModule(object): break return '%.2f %s' % (float(size)/ limit, suffix) +def get_module_path(): + return os.path.dirname(os.path.realpath(__file__)) diff --git a/library/source_control/git b/library/source_control/git index e61c7728156..7b6073a2ee4 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -143,7 +143,8 @@ import re import tempfile def write_ssh_wrapper(): - fd, wrapper_path = tempfile.mkstemp() + module_dir = get_module_path() + fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/') fh = os.fdopen(fd, 'w+b') template = """#!/bin/sh if [ -z "$GIT_SSH_OPTS" ]; then From 1727fddf504e5efcdcc9137e036175bd98b8ac21 Mon Sep 17 00:00:00 2001 From: Thomas Omans Date: Wed, 19 Mar 2014 10:09:41 -0700 Subject: [PATCH 454/772] Allow vault encrypted vars as extra_vars Forward the vault_password to the yaml file utility. --- bin/ansible-playbook | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index be178a6565c..e55e8b0a3fd 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -137,7 +137,7 @@ def main(args): for extra_vars_opt in options.extra_vars: if extra_vars_opt.startswith("@"): # Argument is a YAML file (JSON is a subset of YAML) - extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml_from_file(extra_vars_opt[1:])) + extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass)) elif extra_vars_opt and extra_vars_opt[0] in '[{': # Arguments as YAML extra_vars = utils.combine_vars(extra_vars, utils.parse_yaml(extra_vars_opt)) From 989913bc21f3e1aec7fb1076c527d19cc3244c54 Mon Sep 17 00:00:00 2001 From: Mahemoff Date: Wed, 19 Mar 2014 18:08:30 +0000 Subject: [PATCH 455/772] Change hipchat notification default to true --- plugins/callbacks/hipchat.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/callbacks/hipchat.py b/plugins/callbacks/hipchat.py index a7bfdb6ec1b..09e8342a448 100644 --- a/plugins/callbacks/hipchat.py +++ b/plugins/callbacks/hipchat.py @@ -36,7 +36,7 @@ class CallbackModule(object): HIPCHAT_TOKEN (required): HipChat API token HIPCHAT_ROOM (optional): HipChat room to post in. Default: ansible HIPCHAT_FROM (optional): Name to post as. Default: ansible - HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: false + HIPCHAT_NOTIFY (optional): Add notify flag to important messages ("true" or "false"). Default: true Requires: prettytable @@ -53,7 +53,8 @@ class CallbackModule(object): self.token = os.getenv('HIPCHAT_TOKEN') self.room = os.getenv('HIPCHAT_ROOM', 'ansible') self.from_name = os.getenv('HIPCHAT_FROM', 'ansible') - self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') == 'true') + self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false') + print(self.allow_notify) if self.token is None: self.disabled = True From 1640788f3046be3272bacfc289e91b433797f3a4 Mon Sep 17 00:00:00 2001 From: Mahemoff Date: Wed, 19 Mar 2014 18:15:20 +0000 Subject: [PATCH 456/772] Remove debugging line (hipchat notification) --- plugins/callbacks/hipchat.py | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/callbacks/hipchat.py b/plugins/callbacks/hipchat.py index 09e8342a448..a5acf9194ea 100644 --- a/plugins/callbacks/hipchat.py +++ b/plugins/callbacks/hipchat.py @@ -54,7 +54,6 @@ class CallbackModule(object): self.room = os.getenv('HIPCHAT_ROOM', 'ansible') self.from_name = os.getenv('HIPCHAT_FROM', 'ansible') self.allow_notify = (os.getenv('HIPCHAT_NOTIFY') != 'false') - print(self.allow_notify) if self.token is None: self.disabled = True From 409044155d2b6df49f63cc24d347e8cd715fe6a5 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 19 Mar 2014 14:36:52 -0400 Subject: [PATCH 457/772] Fixes #6579 allow for vault passwords ending with newline chars Also add a unit test for vaulteditor to verify 1.0 passwords with newline chars. --- bin/ansible | 3 --- bin/ansible-playbook | 3 --- bin/ansible-vault | 2 -- test/units/TestVaultEditor.py | 26 +++++++++++++++++++ ...oo-ansible-1.0-ansible-newline-ansible.yml | 4 +++ 5 files changed, 30 insertions(+), 8 deletions(-) create mode 100644 test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml diff --git a/bin/ansible b/bin/ansible index 86a91d0b492..b403a7fc869 100755 --- a/bin/ansible +++ b/bin/ansible @@ -133,9 +133,6 @@ class Cli(object): except (OSError, IOError), e: raise errors.AnsibleError("Could not read %s: %s" % (this_path, e)) - # get rid of newline chars - tmp_vault_pass = tmp_vault_pass.strip() - if not options.ask_vault_pass: vault_pass = tmp_vault_pass diff --git a/bin/ansible-playbook b/bin/ansible-playbook index be178a6565c..344590341e6 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -127,9 +127,6 @@ def main(args): except (OSError, IOError), e: raise errors.AnsibleError("Could not read %s: %s" % (this_path, e)) - # get rid of newline chars - tmp_vault_pass = tmp_vault_pass.strip() - if not options.ask_vault_pass: vault_pass = tmp_vault_pass diff --git a/bin/ansible-vault b/bin/ansible-vault index 2c8094d13b1..9be2a172fb9 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -105,8 +105,6 @@ def _read_password(filename): f = open(filename, "rb") data = f.read() f.close - # get rid of newline chars - data = data.strip() return data def execute_create(args, options, parser): diff --git a/test/units/TestVaultEditor.py b/test/units/TestVaultEditor.py index 4d3f99e89a9..cf7515370ab 100644 --- a/test/units/TestVaultEditor.py +++ b/test/units/TestVaultEditor.py @@ -75,6 +75,32 @@ class TestVaultEditor(TestCase): assert error_hit == False, "error decrypting 1.0 file" assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + def test_decrypt_1_0_newline(self): + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + raise SkipTest + dirpath = tempfile.mkdtemp() + filename = os.path.join(dirpath, "foo-ansible-1.0-ansible-newline-ansible.yml") + shutil.rmtree(dirpath) + shutil.copytree("vault_test_data", dirpath) + ve = VaultEditor(None, "ansible\nansible\n", filename) + + # make sure the password functions for the cipher + error_hit = False + try: + ve.decrypt_file() + except errors.AnsibleError, e: + error_hit = True + + # verify decrypted content + f = open(filename, "rb") + fdata = f.read() + f.close() + + shutil.rmtree(dirpath) + assert error_hit == False, "error decrypting 1.0 file with newline in password" + #assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + + def test_decrypt_1_1(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest diff --git a/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml b/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml new file mode 100644 index 00000000000..dd4e6e746b0 --- /dev/null +++ b/test/units/vault_test_data/foo-ansible-1.0-ansible-newline-ansible.yml @@ -0,0 +1,4 @@ +$ANSIBLE_VAULT;1.0;AES +53616c7465645f5ff0442ae8b08e2ff316d0d6512013185df7aded44f3c0eeef1b7544d078be1fe7 +ed88d0fedcb11928df45558f4b7f80fce627fbb08c5288885ab053f4129175779a8f24f5c1113731 +7d22cee14284670953c140612edf62f92485123fc4f15099ffe776e906e08145 From 2aaecc5afae1ed7138d82fc225554880afb0a96f Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 19 Mar 2014 13:49:31 -0500 Subject: [PATCH 458/772] Fix logic in process_common_errors for unbalanced quotes --- lib/ansible/utils/__init__.py | 2 +- test/units/TestUtils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 2d3fb24db30..d3c9d2e15ca 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -424,7 +424,7 @@ Or: match = True elif middle.startswith('"') and not middle.endswith('"'): match = True - if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count("'") > 2: + if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2: unbalanced = True if match: msg = msg + """ diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 97553271000..85564c96cc7 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -301,8 +301,7 @@ class TestUtils(unittest.TestCase): self.assertTrue('same kind of quote' in ansible.utils.process_common_errors('', "foo: '{{bar}}'baz", 6)) # unbalanced - # The first test fails and is commented out for now, logic is wrong and the test fails - #self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', 'foo: "bad" "wolf"', 6)) + self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', 'foo: "bad" "wolf"', 6)) self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', "foo: 'bad' 'wolf'", 6)) From a9000e7f3aa7256a39d6695a25c04b2228a24c10 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 19 Mar 2014 14:09:03 -0500 Subject: [PATCH 459/772] Making the error messages for bad key/dir permissions more understandable Fixes #6254 --- lib/ansible/utils/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 2d3fb24db30..ddcf2598398 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -99,7 +99,7 @@ def key_for_hostname(hostname): raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.') if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8): - raise errors.AnsibleError('Incorrect permissions on ACCELERATE_KEYS_DIR (%s)' % (C.ACCELERATE_KEYS_DIR,)) + raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))) key_path = os.path.join(key_path, hostname) @@ -113,7 +113,7 @@ def key_for_hostname(hostname): return key else: if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8): - raise errors.AnsibleError('Incorrect permissions on ACCELERATE_KEYS_FILE (%s)' % (key_path,)) + raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path)) fh = open(key_path) key = AesKey.Read(fh.read()) fh.close() From 5f428041db1085d877b701ba7e98f2bb08bb3ad5 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 19 Mar 2014 15:42:40 -0400 Subject: [PATCH 460/772] Fixes #6582 Re-add regex to ignore untracked files that were never tracked --- library/source_control/git | 1 + 1 file changed, 1 insertion(+) diff --git a/library/source_control/git b/library/source_control/git index 7b6073a2ee4..29dd8489f68 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -223,6 +223,7 @@ def has_local_mods(module, git_path, dest, bare): cmd = "%s status -s" % (git_path) rc, stdout, stderr = module.run_command(cmd, cwd=dest) lines = stdout.splitlines() + lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) return len(lines) > 0 From 8c2e1e2baa6e58d49ffd766120fdb6d42dd2ac2f Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 19 Mar 2014 15:56:14 -0400 Subject: [PATCH 461/772] Addresses #6579 Disallow vault passwords with newline characters by stripping them in utils --- bin/ansible-vault | 1 + lib/ansible/utils/__init__.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/bin/ansible-vault b/bin/ansible-vault index 9be2a172fb9..0784c9cec81 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -105,6 +105,7 @@ def _read_password(filename): f = open(filename, "rb") data = f.read() f.close + data = data.strip() return data def execute_create(args, options, parser): diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index ddcf2598398..e53697cc4c4 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -810,6 +810,10 @@ def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_ if new_vault_pass != new_vault_pass2: raise errors.AnsibleError("Passwords do not match") + # enforce no newline chars at the end of passwords + vault_pass = vault_pass.strip() + new_vault_pass = new_vault_pass.strip() + return vault_pass, new_vault_pass def ask_passwords(ask_pass=False, ask_sudo_pass=False, ask_su_pass=False, ask_vault_pass=False): From e71857fbdf465f19403741cce3dbed4c7edd7818 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 19 Mar 2014 16:08:35 -0400 Subject: [PATCH 462/772] Addresses #6579 Only strip vault passwords if given --- lib/ansible/utils/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index e53697cc4c4..f269b5c41ec 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -811,8 +811,10 @@ def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_ raise errors.AnsibleError("Passwords do not match") # enforce no newline chars at the end of passwords - vault_pass = vault_pass.strip() - new_vault_pass = new_vault_pass.strip() + if vault_pass: + vault_pass = vault_pass.strip() + if new_vault_pass: + new_vault_pass = new_vault_pass.strip() return vault_pass, new_vault_pass From b8f627d1d543eb3acb181e0ff7c7a50bc6717402 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 19 Mar 2014 16:31:03 -0400 Subject: [PATCH 463/772] Prevent rewriting the encrypted file if decryption fails --- lib/ansible/utils/vault.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 62b082a9af4..4931871cd54 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -113,7 +113,6 @@ class VaultLib(object): # clean out header data = self._split_header(data) - # create the cipher object if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: cipher = globals()['Vault' + self.cipher_name] @@ -123,6 +122,8 @@ class VaultLib(object): # try to unencrypt data data = this_cipher.decrypt(data, self.password) + if not data: + raise errors.AnsibleError("Decryption failed") return data @@ -209,7 +210,10 @@ class VaultEditor(object): this_vault = VaultLib(self.password) if this_vault.is_encrypted(tmpdata): dec_data = this_vault.decrypt(tmpdata) - self.write_data(dec_data, self.filename) + if not dec_data: + raise errors.AnsibleError("Decryption failed") + else: + self.write_data(dec_data, self.filename) else: raise errors.AnsibleError("%s is not encrypted" % self.filename) From bf251e3dbf81dc92c111cc01dbb9c19783c3b51a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 7 Mar 2014 14:35:18 -0600 Subject: [PATCH 464/772] Correct overly broad import from chube in linode inventory script Fixes #4875 --- plugins/inventory/linode.py | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 0cc825aa847..e68bf5d8b31 100755 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -71,21 +71,39 @@ just adapted that for Linode. ###################################################################### # Standard imports +import os import re import sys import argparse from time import time + try: import json except ImportError: import simplejson as json -# chube imports 'yaml', which is also the name of an inventory plugin, -# so we remove the plugins dir from sys.path before importing it. -old_path = sys.path -sys.path = [d for d in sys.path if "ansible/plugins" not in d] -from chube import * -sys.path = old_path +try: + from chube import load_chube_config + from chube import api as chube_api + from chube.datacenter import Datacenter + from chube.linode_obj import Linode +except: + try: + # remove local paths and other stuff that may + # cause an import conflict, as chube is sensitive + # to name collisions on importing + old_path = sys.path + sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))] + + from chube import load_chube_config + from chube import api as chube_api + from chube.datacenter import Datacenter + from chube.linode_obj import Linode + + sys.path = old_path + except Exception, e: + raise Exception("could not import chube") + load_chube_config() # Imports for ansible @@ -166,7 +184,7 @@ class LinodeInventory(object): try: for node in Linode.search(status=Linode.STATUS_RUNNING): self.add_node(node) - except api.linode_api.ApiError, e: + except chube_api.linode_api.ApiError, e: print "Looks like Linode's API is down:" print print e @@ -176,7 +194,7 @@ class LinodeInventory(object): """Gets details about a specific node.""" try: return Linode.find(api_id=linode_id) - except api.linode_api.ApiError, e: + except chube_api.linode_api.ApiError, e: print "Looks like Linode's API is down:" print print e From 54635cb59f5c12453648d68dfee4a9da2d380452 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 19 Mar 2014 17:21:41 -0400 Subject: [PATCH 465/772] Add note about IRC library default change. --- library/notification/irc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/notification/irc b/library/notification/irc index 32141abe54f..bba7319a083 100644 --- a/library/notification/irc +++ b/library/notification/irc @@ -49,9 +49,9 @@ options: default: null color: description: - - Text color for the message. Default is black. + - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). required: false - default: none + default: "none" choices: [ "none", "yellow", "red", "green", "blue", "black" ] channel: description: From b1fe7ee24afe8e563e9c84088b85fec2681db7cb Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 19 Mar 2014 17:23:30 -0400 Subject: [PATCH 466/772] Add notes about new notification module. --- CHANGELOG.md | 1 + library/notification/typetalk | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 73dfeb73206..3990c71cb9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ New Modules: * notification: nexmo (SMS) * notification: twilio (SMS) * notification: slack (Slack.com) +* notification: typetalk (Typetalk.in) * system: debconf * system: ufw * system: locale_gen diff --git a/library/notification/typetalk b/library/notification/typetalk index 56d64d15329..b987acbe837 100644 --- a/library/notification/typetalk +++ b/library/notification/typetalk @@ -4,7 +4,7 @@ DOCUMENTATION = ''' --- module: typetalk -version_added: "1.5" +version_added: "1.6" short_description: Send a message to typetalk description: - Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ ) From a5bdf621cfa556082f1a01d05debfd38559d1a67 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 19 Mar 2014 17:26:59 -0400 Subject: [PATCH 467/772] add notes about new AWS modules. --- CHANGELOG.md | 2 ++ library/cloud/{ec2_metricalarm => ec2_metric_alarm} | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) rename library/cloud/{ec2_metricalarm => ec2_metric_alarm} (99%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3990c71cb9b..8ace4321ec6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,8 @@ New Modules: * cloud: digital_ocean_sshkey * cloud: rax_identity * cloud: ec2_asg (configure autoscaling groups) +* cloud: ec2_scaling_policy +* cloud: ec2_metricalarm Other notable changes: diff --git a/library/cloud/ec2_metricalarm b/library/cloud/ec2_metric_alarm similarity index 99% rename from library/cloud/ec2_metricalarm rename to library/cloud/ec2_metric_alarm index d1f3f8151fa..35faa640ffb 100644 --- a/library/cloud/ec2_metricalarm +++ b/library/cloud/ec2_metric_alarm @@ -17,7 +17,7 @@ DOCUMENTATION = """ --- -module: ec2_metricalarm +module: ec2_metric_alarm short_description: Create/update or delete AWS Cloudwatch 'metric alarms' description: - Can create or delete AWS metric alarms @@ -99,7 +99,7 @@ options: EXAMPLES = ''' - name: create alarm - ec2_metricalarm: + ec2_metric_alarm: state: present region: ap-southeast-2 name: "cpu-low" From f26ebff4388eba27c6747c4b3218966bd217dfd4 Mon Sep 17 00:00:00 2001 From: "nate@bx.psu.edu" Date: Wed, 26 Feb 2014 15:53:42 -0600 Subject: [PATCH 468/772] Create a capabilities module for managing Linux kernel capabilities(7) with setcap(8). --- library/system/capabilities | 187 ++++++++++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100644 library/system/capabilities diff --git a/library/system/capabilities b/library/system/capabilities new file mode 100644 index 00000000000..bd3832e7109 --- /dev/null +++ b/library/system/capabilities @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Nate Coraor +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: capabilities +short_description: Manage Linux capabilities +description: + - This module manipulates files' priviliges using the Linux capabilities(7) system. +version_added: "1.0" +options: + path: + description: + - Specifies the path to the file to be managed. + required: true + default: null + capability: + description: + - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) + required: true + default: null + aliases: [ 'cap' ] + state: + description: + - Whether the entry should be present or absent in the file's capabilities. + choices: [ "present", "absent" ] + default: present +notes: + - The capabilities system will automatically transform operators and flags + into the effective set, so (for example, cap_foo=ep will probably become + cap_foo+ep). This module does not attempt to determine the final operator + and flags to compare, so you will want to ensure that your capabilities + argument matches the final capabilities. +requirements: [] +author: Nate Coraor +''' + +EXAMPLES = ''' +# Set cap_sys_chroot+ep on /foo +- capabilities: path=/foo capability=cap_sys_chroot+ep state=present + +# Remove cap_net_bind_service from /bar +- capabilities: path=/bar capability=cap_net_bind_service state=absent +''' + + +OPS = ( '=', '-', '+' ) + +# ============================================================== + +import os +import tempfile +import re + +class CapabilitiesModule(object): + + platform = 'Linux' + distribution = None + + def __init__(self, module): + self.module = module + self.path = module.params['path'].strip() + self.capability = module.params['capability'].strip().lower() + self.state = module.params['state'] + self.getcap_cmd = module.get_bin_path('getcap', required=True) + self.setcap_cmd = module.get_bin_path('setcap', required=True) + self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present') + + self.changed = False + + self.run() + + def run(self): + + current = self.getcap(self.path) + caps = [ cap[0] for cap in current ] + + if self.state == 'present' and self.capability_tup not in current: + # need to add capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list if it's already set (but op/flags differ) + current = filter(lambda x: x[0] != self.capability_tup[0], current) + # add new cap with correct op/flags + current.append( self.capability_tup ) + self.module.exit_json(changed=True, msg='capabilities changed', stdout=self.setcap(self.path, current)) + if self.state == 'absent' and self.capability_tup[0] in caps: + # need to remove capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list and then set current list + current = filter(lambda x: x[0] != self.capability_tup[0], current) + self.module.exit_json(changed=True, msg='capabilities changed', stdout=self.setcap(self.path, current)) + + def getcap(self, path): + rval = [] + cmd = "%s -v %s" % (self.getcap_cmd, path) + rc, stdout, stderr = self.module.run_command(cmd) + # If file xattrs are set but no caps are set the output will be: + # '/foo =' + # If file xattrs are unset the output will be: + # '/foo' + # If the file does not eixst the output will be (with rc == 0...): + # '/foo (No such file or directory)' + if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1): + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout, stderr=stderr) + caps = stdout.split(' =')[1].strip().split() + for cap in caps: + cap = cap.lower() + # getcap condenses capabilities with the same op/flags into a + # comma-separated list, so we have to parse that + if ',' in cap: + cap_group = cap.split(',') + cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) + for subcap in cap_group: + rval.append( ( subcap, op, flags ) ) + else: + rval.append(self._parse_cap(cap)) + return rval + + def setcap(self, path, caps): + caps = ' '.join([ ''.join(cap) for cap in caps ]) + cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) + rc, stdout, stderr = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) + else: + return stdout + + def _parse_cap(self, cap, op_required=True): + opind = -1 + try: + i = 0 + while opind == -1: + opind = cap.find(OPS[i]) + i += 1 + except: + if op_required: + self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) + else: + return (cap, None, None) + op = cap[opind] + cap, flags = cap.split(op) + return (cap, op, flags) + +# ============================================================== +# main + +def main(): + + # defining module + module = AnsibleModule( + argument_spec = dict( + path = dict(aliases=['key'], required=True), + capability = dict(aliases=['cap'], required=True), + state = dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True + ) + + CapabilitiesModule(module) + + sys.exit(0) + +# import module snippets +from ansible.module_utils.basic import * +main() From 97db1676e075e759706946be377399835d9e2845 Mon Sep 17 00:00:00 2001 From: "nate@bx.psu.edu" Date: Wed, 26 Feb 2014 16:23:40 -0600 Subject: [PATCH 469/772] Bugfix for xattrless files and the capabilities module. --- library/system/capabilities | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/library/system/capabilities b/library/system/capabilities index bd3832e7109..1d5762d1b2d 100644 --- a/library/system/capabilities +++ b/library/system/capabilities @@ -84,8 +84,6 @@ class CapabilitiesModule(object): self.setcap_cmd = module.get_bin_path('setcap', required=True) self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present') - self.changed = False - self.run() def run(self): @@ -102,15 +100,16 @@ class CapabilitiesModule(object): current = filter(lambda x: x[0] != self.capability_tup[0], current) # add new cap with correct op/flags current.append( self.capability_tup ) - self.module.exit_json(changed=True, msg='capabilities changed', stdout=self.setcap(self.path, current)) - if self.state == 'absent' and self.capability_tup[0] in caps: + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + elif self.state == 'absent' and self.capability_tup[0] in caps: # need to remove capability if self.module.check_mode: self.module.exit_json(changed=True, msg='capabilities changed') else: # remove from current cap list and then set current list current = filter(lambda x: x[0] != self.capability_tup[0], current) - self.module.exit_json(changed=True, msg='capabilities changed', stdout=self.setcap(self.path, current)) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + self.module.exit_json(changed=False, state=self.state) def getcap(self, path): rval = [] @@ -123,19 +122,20 @@ class CapabilitiesModule(object): # If the file does not eixst the output will be (with rc == 0...): # '/foo (No such file or directory)' if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1): - self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout, stderr=stderr) - caps = stdout.split(' =')[1].strip().split() - for cap in caps: - cap = cap.lower() - # getcap condenses capabilities with the same op/flags into a - # comma-separated list, so we have to parse that - if ',' in cap: - cap_group = cap.split(',') - cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) - for subcap in cap_group: - rval.append( ( subcap, op, flags ) ) - else: - rval.append(self._parse_cap(cap)) + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) + if stdout.strip() != path: + caps = stdout.split(' =')[1].strip().split() + for cap in caps: + cap = cap.lower() + # getcap condenses capabilities with the same op/flags into a + # comma-separated list, so we have to parse that + if ',' in cap: + cap_group = cap.split(',') + cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) + for subcap in cap_group: + rval.append( ( subcap, op, flags ) ) + else: + rval.append(self._parse_cap(cap)) return rval def setcap(self, path, caps): From a8514dacc3432eca4cfd9093874001677ac00ac7 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 19 Mar 2014 17:37:50 -0400 Subject: [PATCH 470/772] Add capabilities module. --- CHANGELOG.md | 1 + library/system/capabilities | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ace4321ec6..c88def6ca67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ New Modules: * system: ufw * system: locale_gen * system: alternatives +* system: capabilities * net_infrastructure: bigip_facts * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey diff --git a/library/system/capabilities b/library/system/capabilities index 1d5762d1b2d..872473001c2 100644 --- a/library/system/capabilities +++ b/library/system/capabilities @@ -25,7 +25,7 @@ module: capabilities short_description: Manage Linux capabilities description: - This module manipulates files' priviliges using the Linux capabilities(7) system. -version_added: "1.0" +version_added: "1.6" options: path: description: From 4dfa40f18e8c4b815a43593baa9bc98d0d61a09a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 15 Mar 2014 16:19:28 -0400 Subject: [PATCH 471/772] added gathering control to ansible, defaults to 'smart' --- examples/ansible.cfg | 5 +++++ lib/ansible/constants.py | 1 + lib/ansible/playbook/__init__.py | 10 +++++++--- lib/ansible/playbook/play.py | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 7a46caf8c0c..1a592d3e355 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -24,6 +24,11 @@ transport = smart remote_port = 22 module_lang = C +# controls implicit fact gathering (always, never or smart). +# smart gathers only if not currently in memory. +# does NOT affect explicit 'gather_facts' entries. +gathering = smart + # additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index f9cd208c4ad..ade825d17f2 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -134,6 +134,7 @@ DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) +DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'smart').lower() DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins') diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 918b9341717..88e054c3c8f 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -479,11 +479,15 @@ class PlayBook(object): def _do_setup_step(self, play): ''' get facts from the remote system ''' - if play.gather_facts is False: - return {} - host_list = self._trim_unavailable_hosts(play._play_hosts) + if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart': + host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]] + if len(host_list) == 0: + return {} + elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'never'): + return {} + self.callbacks.on_setup() self.inventory.restrict_to(host_list) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 9195c5f2b66..e3e5fefcfda 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -117,7 +117,7 @@ class Play(object): self.sudo = ds.get('sudo', self.playbook.sudo) self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user) self.transport = ds.get('connection', self.playbook.transport) - self.gather_facts = ds.get('gather_facts', True) + self.gather_facts = ds.get('gather_facts', None) self.remote_port = self.remote_port self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false')) self.accelerate = utils.boolean(ds.get('accelerate', 'false')) From 18adf07fc6b0c58b078eca55ca060fcffd6bb9d3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 15 Mar 2014 18:10:39 -0400 Subject: [PATCH 472/772] changed setting values and updated docs --- examples/ansible.cfg | 8 +++++--- lib/ansible/playbook/__init__.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 1a592d3e355..4312ec9d824 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -24,9 +24,11 @@ transport = smart remote_port = 22 module_lang = C -# controls implicit fact gathering (always, never or smart). -# smart gathers only if not currently in memory. -# does NOT affect explicit 'gather_facts' entries. +# This setting controls implicit fact gathering, valid values are +# implicit, explicit or smart (default). +# smart gathers only if facts for that host are not currently in memory. +# implicit set the default of gather_facts to True, explicit sets it +# to False. This does NOT affect explicit 'gather_facts' entries. gathering = smart # additional paths to search for roles in, colon separated diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 88e054c3c8f..88dd0f5f4bc 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -485,7 +485,7 @@ class PlayBook(object): host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]] if len(host_list) == 0: return {} - elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'never'): + elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'): return {} self.callbacks.on_setup() From e639b5382b6fcdaf6b8346d483bff9509626a398 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 19 Mar 2014 18:02:49 -0400 Subject: [PATCH 473/772] Change default gathering policy, add to docs. --- CHANGELOG.md | 1 + docsite/rst/intro_configuration.rst | 10 ++++++++++ examples/ansible.cfg | 13 +++++++------ lib/ansible/constants.py | 2 +- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c88def6ca67..f6b9d0ffbf0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Major features/changes: * The deprecated legacy variable templating system has been finally removed. Use {{ foo }} always not $foo or ${foo}. * Any data file can also be JSON. Use sparingly -- with great power comes great responsibility. Starting file with "{" or "[" denotes JSON. +* Added 'gathering' param for ansible.cfg to change the default gather_facts policy. New Modules: diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 0c25297cf35..3313cb275b1 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -211,6 +211,16 @@ is very very conservative:: forks=5 +.. _gathering: + +gathering +========= + +New in 1.6, the 'gathering' setting controls the default policy of facts gathering (variables discovered about remote systems). + +The value 'implicit' is the default, meaning facts will be gathered per play unless 'gather_facts: False' is set in the play. The value 'explicit' is the inverse, facts will not be gathered unless directly requested in the play. + +The value 'smart' means each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the playbook run. This option can be useful for those wishing to save fact gathering time. hash_behaviour ============== diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 4312ec9d824..a4fc4c55aca 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -24,12 +24,13 @@ transport = smart remote_port = 22 module_lang = C -# This setting controls implicit fact gathering, valid values are -# implicit, explicit or smart (default). -# smart gathers only if facts for that host are not currently in memory. -# implicit set the default of gather_facts to True, explicit sets it -# to False. This does NOT affect explicit 'gather_facts' entries. -gathering = smart +# plays will gather facts by default, which contain information about +# the remote system. +# +# smart - gather by default, but don't regather if already gathered +# implicit - gather by default, turn off with gather_facts: False +# explicit - do not gather by default, must say gather_facts: True +gathering = implicit # additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index ade825d17f2..7d0b270beb7 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -134,7 +134,7 @@ DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) -DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'smart').lower() +DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins') From ba79cf34df9be8e414436f48a1fb2b1ba3f2b869 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 19 Mar 2014 18:05:33 -0400 Subject: [PATCH 474/772] Update the FAQ advice on recursive copy now that there are two good ways to do this. --- docsite/rst/faq.rst | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index f42ff0c999d..13ab9437cdb 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -179,17 +179,7 @@ Notice how we interchanged the bracket syntax for dots -- that can be done anywh How do I copy files recursively onto a target host? +++++++++++++++++++++++++++++++++++++++++++++++++++ -The "copy" module doesn't handle recursive copies of directories. A common solution to do this is to use a local action to call 'rsync' to recursively copy files to the managed servers. - -Here is an example:: - - --- - # ... - tasks: - - name: recursively copy files from management server to target - local_action: command rsync -a /path/to/files $inventory_hostname:/path/to/target/ - -Note that you'll need passphrase-less SSH or ssh-agent set up to let rsync copy without prompting for a passphrase or password. +The "copy" module has a recursive parameter, though if you want to do something more efficient for a large number of files, take a look at the "synchronize" module instead, which wraps rsync. See the module index for info on both of these modules. .. _shell_env: From 991399edf0e4b1609eb8c9d1608c90e22d50b753 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 19 Mar 2014 18:15:52 -0400 Subject: [PATCH 475/772] Update comments about redis config. --- library/database/redis | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/library/database/redis b/library/database/redis index 8cddb925ab8..59a1bde7277 100644 --- a/library/database/redis +++ b/library/database/redis @@ -22,9 +22,9 @@ module: redis short_description: Various redis commands, slave and flush description: - Unified utility to interact with redis instances. - 'slave' Sets a redis instance in slave or master mode. - 'flush' Flushes all the instance or a specified db. - 'config' Ensures a configuration setting on an instance. + 'slave' sets a redis instance in slave or master mode. + 'flush' flushes all the instance or a specified db. + 'config' (new in 1.6), ensures a configuration setting on an instance. version_added: "1.3" options: command: @@ -77,11 +77,13 @@ options: default: all choices: [ "all", "db" ] name: + version_added: 1.6 description: - A redis config key. required: false default: null value: + version_added: 1.6 description: - A redis config value. required: false From 65deb7f50f4a68e60d83fb246df8d4eab6e4d310 Mon Sep 17 00:00:00 2001 From: Till Maas Date: Tue, 11 Mar 2014 09:57:35 +0100 Subject: [PATCH 476/772] authorized_key: Be more specific about manage_dir - Explain which properties are managed when manage_dir is True. --- library/system/authorized_key | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/library/system/authorized_key b/library/system/authorized_key index 1a7c8b97b0e..8185cc4646d 100644 --- a/library/system/authorized_key +++ b/library/system/authorized_key @@ -48,7 +48,11 @@ options: version_added: "1.2" manage_dir: description: - - Whether this module should manage the directory of the authorized_keys file. Make sure to set C(manage_dir=no) if you are using an alternate directory for authorized_keys set with C(path), since you could lock yourself out of SSH access. See the example below. + - Whether this module should manage (create it, change owner and + permissions) the directory of the authorized_keys file. Make sure to + set C(manage_dir=no) if you are using an alternate directory for + authorized_keys set with C(path), since you could lock yourself out of + SSH access. See the example below. required: false choices: [ "yes", "no" ] default: "yes" From c21fa8ebee2abedffbd9b9972335d6623bde188c Mon Sep 17 00:00:00 2001 From: Till Maas Date: Mon, 17 Mar 2014 11:43:52 +0100 Subject: [PATCH 477/772] service module: Properly disable Debian services Services on Debian need to be disabled with 'disable' instead of 'remove' to avoid them being enabled again when 'update-rc.d $service defaults' is run, e.g. as part of a postinst script. --- library/system/service | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/system/service b/library/system/service index 2e26a47b636..8219e9c456a 100644 --- a/library/system/service +++ b/library/system/service @@ -658,7 +658,8 @@ class LinuxService(Service): return self.execute_command("%s %s enable" % (self.enable_cmd, self.name)) else: - return self.execute_command("%s -f %s remove" % (self.enable_cmd, self.name)) + return self.execute_command("%s %s disable" % (self.enable_cmd, + self.name)) # we change argument depending on real binary used: # - update-rc.d and systemctl wants enable/disable From 8f778a83dfaaea142663b878bafd84be3afee3f6 Mon Sep 17 00:00:00 2001 From: Till Maas Date: Thu, 20 Mar 2014 12:30:55 +0100 Subject: [PATCH 478/772] module_utils/atomic_move(): Restore owner/group Manually restore owner and group if both src and dest are on a separate file system. --- lib/ansible/module_utils/basic.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 42b9d3d669b..c589f92612a 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -977,11 +977,16 @@ class AnsibleModule(object): try: # leaves tmp file behind when sudo and not root if os.getenv("SUDO_USER") and os.getuid() != 0: # cleanup will happen by 'rm' of tempdir - shutil.copy(src, tmp_dest) + # copy2 will preserve some metadata + shutil.copy2(src, tmp_dest) else: shutil.move(src, tmp_dest) if self.selinux_enabled(): - self.set_context_if_different(tmp_dest, context, False) + self.set_context_if_different( + tmp_dest, context, False) + # Reset owners, they are not preserved by shutil.copy2(), which + # is what shutil.move() falls back to. + os.chown(tmp_dest, st.st_uid, st.st_gid) os.rename(tmp_dest, dest) except (shutil.Error, OSError, IOError), e: self.cleanup(tmp_dest) From 898a38b074f2561d19059315030b2b5da68a4f34 Mon Sep 17 00:00:00 2001 From: Till Maas Date: Thu, 20 Mar 2014 11:12:58 +0100 Subject: [PATCH 479/772] module_utils/atomic_move(): Use tempfile module Fix a potential race condition by using the tempfile module. --- lib/ansible/module_utils/basic.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index c589f92612a..e8e983e3057 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -55,6 +55,7 @@ import types import time import shutil import stat +import tempfile import traceback import grp import pwd @@ -972,24 +973,25 @@ class AnsibleModule(object): dest_dir = os.path.dirname(dest) dest_file = os.path.basename(dest) - tmp_dest = "%s/.%s.%s.%s" % (dest_dir,dest_file,os.getpid(),time.time()) + tmp_dest = tempfile.NamedTemporaryFile( + prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file) try: # leaves tmp file behind when sudo and not root if os.getenv("SUDO_USER") and os.getuid() != 0: # cleanup will happen by 'rm' of tempdir # copy2 will preserve some metadata - shutil.copy2(src, tmp_dest) + shutil.copy2(src, tmp_dest.name) else: - shutil.move(src, tmp_dest) + shutil.move(src, tmp_dest.name) if self.selinux_enabled(): self.set_context_if_different( - tmp_dest, context, False) + tmp_dest.name, context, False) # Reset owners, they are not preserved by shutil.copy2(), which # is what shutil.move() falls back to. - os.chown(tmp_dest, st.st_uid, st.st_gid) - os.rename(tmp_dest, dest) + os.chown(tmp_dest.name, st.st_uid, st.st_gid) + os.rename(tmp_dest.name, dest) except (shutil.Error, OSError, IOError), e: - self.cleanup(tmp_dest) + self.cleanup(tmp_dest.name) self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) if self.selinux_enabled(): From 6e6ad972399168d21299c197975f3f4110277d87 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 18 Mar 2014 20:41:42 -0400 Subject: [PATCH 480/772] now correctly checks absolute path for src= existance for links also updated docs to be a bit clearer on symlink behaviour --- library/files/file | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/library/files/file b/library/files/file index b1c57ff4d97..4296af2011f 100644 --- a/library/files/file +++ b/library/files/file @@ -81,8 +81,8 @@ options: default: null choices: [] description: - - path of the file to link to (applies only to C(state=link)). Will accept absolute, - relative and nonexisting paths. Relative paths are not expanded. + - path of the file to link to (applies only to C(state= link or hard)). Will accept absolute, + relative and nonexisting (with C(force)) paths. Relative paths are not expanded. seuser: required: false default: null @@ -266,8 +266,12 @@ def main(): elif state in ['link','hard']: - if not os.path.exists(src) and not force: - module.fail_json(path=path, src=src, msg='src file does not exist') + absrc = src + if not os.path.isabs(absrc): + absrc = os.path.normpath('%s/%s' % (os.path.dirname(path), absrc)) + + if not os.path.exists(absrc) and not force: + module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc) if state == 'hard': if not os.path.isabs(src): From 1b38bd8a40a643dc804d1ac23cb2681eb42278a4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 19 Mar 2014 09:11:17 -0400 Subject: [PATCH 481/772] fixed issue with tmp file (path can include filename) and added missing else which made it try to link x2 --- library/files/file | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/library/files/file b/library/files/file index 4296af2011f..e1bdf53375f 100644 --- a/library/files/file +++ b/library/files/file @@ -301,7 +301,7 @@ def main(): if changed and not module.check_mode: if prev_state != 'absent': # try to replace atomically - tmppath = ".%s.%s.%s.tmp" % (path,os.getpid(),time.time()) + tmppath = '/'.join([os.path.dirname(path), ".%s.%s.tmp" % (os.getpid(),time.time())]) try: if state == 'hard': os.link(src,tmppath) @@ -311,13 +311,14 @@ def main(): except OSError, e: os.unlink(tmppath) module.fail_json(path=path, msg='Error while replacing: %s' % str(e)) - try: - if state == 'hard': - os.link(src,path) - else: - os.symlink(src, path) - except OSError, e: - module.fail_json(path=path, msg='Error while linking: %s' % str(e)) + else: + try: + if state == 'hard': + os.link(src,path) + else: + os.symlink(src, path) + except OSError, e: + module.fail_json(path=path, msg='Error while linking: %s' % str(e)) changed = module.set_fs_attributes_if_different(file_args, changed) module.exit_json(dest=path, src=src, changed=changed) From 2156e87e6286f0ca6b16c744eb4ba9270e2c4442 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 20 Mar 2014 08:10:02 -0400 Subject: [PATCH 482/772] Update cpanm --- library/packaging/cpanm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/cpanm b/library/packaging/cpanm index 9fa003e1af0..5b1a9878d21 100644 --- a/library/packaging/cpanm +++ b/library/packaging/cpanm @@ -25,7 +25,7 @@ module: cpanm short_description: Manages Perl library dependencies. description: - Manage Perl library dependencies. -version_added: "1.0" +version_added: "1.6" options: name: description: From e157355fd6d26f7e66e32f417c8108ce7a7794b6 Mon Sep 17 00:00:00 2001 From: Steve Smith Date: Thu, 20 Mar 2014 13:14:24 +0100 Subject: [PATCH 483/772] Only unlink the symlink tempfile on error if it was created in the previous operation. --- library/files/file | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/files/file b/library/files/file index b1c57ff4d97..710a9224c13 100644 --- a/library/files/file +++ b/library/files/file @@ -305,7 +305,8 @@ def main(): os.symlink(src, tmppath) os.rename(tmppath, path) except OSError, e: - os.unlink(tmppath) + if os.path.exists(tmppath): + os.unlink(tmppath) module.fail_json(path=path, msg='Error while replacing: %s' % str(e)) try: if state == 'hard': From 1f980b68808ef45beb923963c1737db7febdd4ed Mon Sep 17 00:00:00 2001 From: jjshoe Date: Thu, 20 Mar 2014 09:21:10 -0500 Subject: [PATCH 484/772] assign_public_ip was actually added in 1.5 Checked 1.4.5 and this option doesn't exist. Will check/update/pull docs next. --- library/cloud/ec2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 6a352f3f4d0..3752136098b 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -170,7 +170,7 @@ options: default: null aliases: [] assign_public_ip: - version_added: "1.4" + version_added: "1.5" description: - when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+ required: false From f97243d6ed7dc7bb23c9cf675ae84c116beae457 Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Thu, 20 Mar 2014 15:50:08 +0100 Subject: [PATCH 485/772] ec2_group: Add support for handling egress rules --- library/cloud/ec2_group | 74 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index 1dd463cc8d6..e0b2bc85021 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -135,6 +135,7 @@ def main(): description=dict(required=True), vpc_id=dict(), rules=dict(), + rules_egress=dict(), state = dict(default='present', choices=['present', 'absent']), ) ) @@ -147,6 +148,7 @@ def main(): description = module.params['description'] vpc_id = module.params['vpc_id'] rules = module.params['rules'] + rules_egress = module.params['rules_egress'] state = module.params.get('state') changed = False @@ -203,6 +205,8 @@ def main(): # create a lookup for all existing rules on the group if group: + + # Manage ingress rules groupRules = {} addRulesToLookup(group.rules, 'in', groupRules) @@ -260,6 +264,76 @@ def main(): group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) changed = True + # Manage egress rules + groupRules = {} + addRulesToLookup(group.rules_egress, 'out', groupRules) + + # Now, go through all provided rules and ensure they are there. + if rules_egress: + for rule in rules_egress: + group_id = None + group_name = None + ip = None + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_id OR cidr_ip, not both") + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_name OR cidr_ip, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg="Specify group_id OR group_name, not both") + elif 'group_id' in rule: + group_id = rule['group_id'] + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name in groups: + group_id = groups[group_name].id + elif group_name == name: + group_id = group.id + groups[group_id] = group + groups[group_name] = group + elif 'cidr_ip' in rule: + ip = rule['cidr_ip'] + + if rule['proto'] == 'all': + rule['proto'] = -1 + rule['from_port'] = None + rule['to_port'] = None + + # If rule already exists, don't later delete it + ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) + if ruleId in groupRules: + del groupRules[ruleId] + # Otherwise, add new rule + else: + grantGroup = None + if group_id: + grantGroup = groups[group_id].id + + if not module.check_mode: + ec2.authorize_security_group_egress( + group_id=group.id, + ip_protocol=rule['proto'], + from_port=rule['from_port'], + to_port=rule['to_port'], + src_group_id=grantGroup, + cidr_ip=ip) + changed = True + + # Finally, remove anything left in the groupRules -- these will be defunct rules + for rule in groupRules.itervalues(): + for grant in rule.grants: + grantGroup = None + if grant.group_id: + grantGroup = groups[grant.group_id].id + if not module.check_mode: + ec2.revoke_security_group_egress( + group_id=group.id, + ip_protocol=rule.ip_protocol, + from_port=rule.from_port, + to_port=rule.to_port, + src_group_id=grantGroup, + cidr_ip=grant.cidr_ip) + changed = True + if group: module.exit_json(changed=changed, group_id=group.id) else: From 77d7165dde0b5f5642ec3667854d55ca2482602c Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Thu, 20 Mar 2014 16:19:35 +0100 Subject: [PATCH 486/772] ec2_group: Deduplicate rule parsing/validation code --- library/cloud/ec2_group | 83 ++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index e0b2bc85021..d685b29aa06 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -128,6 +128,45 @@ def addRulesToLookup(rules, prefix, dict): dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port, grant.group_id, grant.cidr_ip)] = rule + +def get_target_from_rule(rule, name, groups): + """ + Returns tuple of (group_id, ip) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + """ + + group_id = None + group_name = None + ip = None + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_id OR cidr_ip, not both") + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_name OR cidr_ip, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg="Specify group_id OR group_name, not both") + elif 'group_id' in rule: + group_id = rule['group_id'] + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name in groups: + group_id = groups[group_name].id + elif group_name == name: + group_id = group.id + groups[group_id] = group + groups[group_name] = group + elif 'cidr_ip' in rule: + ip = rule['cidr_ip'] + + return group_id, ip + + def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( @@ -213,27 +252,7 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules: for rule in rules: - group_id = None - group_name = None - ip = None - if 'group_id' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_id OR cidr_ip, not both") - elif 'group_name' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_name OR cidr_ip, not both") - elif 'group_id' in rule and 'group_name' in rule: - module.fail_json(msg="Specify group_id OR group_name, not both") - elif 'group_id' in rule: - group_id = rule['group_id'] - elif 'group_name' in rule: - group_name = rule['group_name'] - if group_name in groups: - group_id = groups[group_name].id - elif group_name == name: - group_id = group.id - groups[group_id] = group - groups[group_name] = group - elif 'cidr_ip' in rule: - ip = rule['cidr_ip'] + group_id, ip = get_target_from_rule(rule, name, groups) if rule['proto'] == 'all': rule['proto'] = -1 @@ -271,27 +290,7 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules_egress: for rule in rules_egress: - group_id = None - group_name = None - ip = None - if 'group_id' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_id OR cidr_ip, not both") - elif 'group_name' in rule and 'cidr_ip' in rule: - module.fail_json(msg="Specify group_name OR cidr_ip, not both") - elif 'group_id' in rule and 'group_name' in rule: - module.fail_json(msg="Specify group_id OR group_name, not both") - elif 'group_id' in rule: - group_id = rule['group_id'] - elif 'group_name' in rule: - group_name = rule['group_name'] - if group_name in groups: - group_id = groups[group_name].id - elif group_name == name: - group_id = group.id - groups[group_id] = group - groups[group_name] = group - elif 'cidr_ip' in rule: - ip = rule['cidr_ip'] + group_id, ip = get_target_from_rule(rule, name, groups) if rule['proto'] == 'all': rule['proto'] = -1 From d327e3d11a3c7092327630541d01b04e8450b335 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 20 Mar 2014 11:55:46 -0400 Subject: [PATCH 487/772] Fixes #5622 included tasks in roles now inherit tags from the role --- lib/ansible/playbook/play.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index e3e5fefcfda..914af951f78 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -142,6 +142,8 @@ class Play(object): self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars) + # apply any missing tags to role tasks + self._late_merge_role_tags() if self.sudo_user != 'root': self.sudo = True @@ -712,6 +714,31 @@ class Play(object): # ************************************************* + def _late_merge_role_tags(self): + # build a local dict of tags for roles + role_tags = {} + for task in self._ds['tasks']: + if 'role_name' in task: + this_role = task['role_name'] + + if this_role not in role_tags: + role_tags[this_role] = [] + + if 'tags' in task['vars']: + if isinstance(task['vars']['tags'], basestring): + role_tags[task['role_name']] += shlex.split(task['vars']['tags']) + else: + role_tags[task['role_name']] += task['vars']['tags'] + + # apply each role's tags to it's tasks + for idx, val in enumerate(self._tasks): + if hasattr(val, 'role_name'): + this_role = val.role_name + if this_role in role_tags: + self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role])) + + # ************************************************* + def _has_vars_in(self, msg): return "$" in msg or "{{" in msg From 649fcd3e1cc72c480a1c87c6985a319488f64afa Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Thu, 20 Mar 2014 16:38:54 +0100 Subject: [PATCH 488/772] ec2_group: Auto create missing groups referenced in rules Suppose a pair of groups, A and B, depending on each other. One solution for breaking the circular dependency at playbook level: - declare group A without dependencies - declare group B depending on A - declare group A depending on B This patch breaks the dependency at module level. Whenever a depended-on group is missing it's first created. This approach requires only two tasks: - declare group A depending on B (group B will be auto created) - declare group B depending on A When creating a group EC2 requires you to pass the group description. In order to fullfil this, rules now accept the `group_desc` param. Note that group description can't be changed once the group is created so it's nice to keep descriptions in sync. Concrete example: - ec2_group: name: mysql-client description: MySQL Client rules_egress: - proto: tcp from_port: 3306 to_port: 3306 group_name: mysql-server group_desc: MySQL Server - ec2_group: name: mysql-server description: MySQL Server rules: - proto: tcp from_port: 3306 to_port: 3306 group_name: mysql-client --- library/cloud/ec2_group | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index d685b29aa06..ede8050c0a9 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -145,6 +145,7 @@ def get_target_from_rule(rule, name, groups): group_id = None group_name = None ip = None + target_group_created = False if 'group_id' in rule and 'cidr_ip' in rule: module.fail_json(msg="Specify group_id OR cidr_ip, not both") elif 'group_name' in rule and 'cidr_ip' in rule: @@ -161,10 +162,19 @@ def get_target_from_rule(rule, name, groups): group_id = group.id groups[group_id] = group groups[group_name] = group + else: + if not rule.get('group_desc', '').strip(): + module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule)) + if not module.check_mode: + auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id) + group_id = auto_group.id + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True elif 'cidr_ip' in rule: ip = rule['cidr_ip'] - return group_id, ip + return group_id, ip, target_group_created def main(): @@ -252,7 +262,9 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules: for rule in rules: - group_id, ip = get_target_from_rule(rule, name, groups) + group_id, ip, target_group_created = get_target_from_rule(rule, name, groups) + if target_group_created: + changed = True if rule['proto'] == 'all': rule['proto'] = -1 @@ -290,7 +302,9 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules_egress: for rule in rules_egress: - group_id, ip = get_target_from_rule(rule, name, groups) + group_id, ip, target_group_created = get_target_from_rule(rule, name, groups) + if target_group_created: + changed = True if rule['proto'] == 'all': rule['proto'] = -1 From 3156df9931e3eabb1f3a7dccf02a73e35d97c228 Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Thu, 20 Mar 2014 17:20:21 +0100 Subject: [PATCH 489/772] ec2_group: Request a fresh group object after creation When a group is created, an egress_rule ALLOW ALL to 0.0.0.0/0 is added automatically but it's not reflected in the object returned by the AWS API call. After creation we re-read the group for getting an updated object. --- library/cloud/ec2_group | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index ede8050c0a9..f15756c97ab 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -248,6 +248,12 @@ def main(): '''no match found, create it''' if not module.check_mode: group = ec2.create_security_group(name, description, vpc_id=vpc_id) + + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + group = ec2.get_all_security_groups(group_ids=(group.id,))[0] changed = True else: module.fail_json(msg="Unsupported state requested: %s" % state) From b642e39e675db2d848c38cd8912410f5ed5bfec7 Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Thu, 20 Mar 2014 17:22:56 +0100 Subject: [PATCH 490/772] ec2_group: rules are not a required task argument --- library/cloud/ec2_group | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index f15756c97ab..cf290c34746 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -25,7 +25,7 @@ options: rules: description: - List of firewall rules to enforce in this group (see example). - required: true + required: false region: description: - the EC2 region to use From a429ff26dc8a9c9abc663eaeb5810b5e6eab5bec Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Thu, 20 Mar 2014 17:23:53 +0100 Subject: [PATCH 491/772] ec2_group: Add documentation for rules_egress --- library/cloud/ec2_group | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index cf290c34746..e25185c5f1c 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -24,7 +24,11 @@ options: required: false rules: description: - - List of firewall rules to enforce in this group (see example). + - List of firewall inbound rules to enforce in this group (see example). + required: false + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). required: false region: description: @@ -113,6 +117,11 @@ EXAMPLES = ''' - proto: all # the containing group name may be specified here group_name: example + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + group_name: example ''' try: From 95c79ad483c3be40c192f56f59869d670388de1b Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 20 Mar 2014 13:12:33 -0400 Subject: [PATCH 492/772] Fixes #4239 merge injects with new vars loaded from files --- lib/ansible/playbook/play.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 914af951f78..2da555bd0f9 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -777,6 +777,7 @@ class Play(object): if self._has_vars_in(filename2) and not self._has_vars_in(filename3): # this filename has variables in it that were fact specific # so it needs to be loaded into the per host SETUP_CACHE + data = utils.combine_vars(inject, data) self.playbook.SETUP_CACHE[host].update(data) self.playbook.callbacks.on_import_for_host(host, filename4) elif not self._has_vars_in(filename4): @@ -809,6 +810,7 @@ class Play(object): if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3): # running a host specific pass and has host specific variables # load into setup cache + new_vars = utils.combine_vars(inject, new_vars) self.playbook.SETUP_CACHE[host] = utils.combine_vars( self.playbook.SETUP_CACHE[host], new_vars) self.playbook.callbacks.on_import_for_host(host, filename4) From c729bf209c2a4d6963f307f80f24e01f74822924 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 20 Mar 2014 13:22:03 -0400 Subject: [PATCH 493/772] Update documentation YAML syntax. --- library/cloud/ec2_metric_alarm | 6 +----- library/cloud/ec2_scaling_policy | 3 +-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/library/cloud/ec2_metric_alarm b/library/cloud/ec2_metric_alarm index 35faa640ffb..b9d7173b2f3 100644 --- a/library/cloud/ec2_metric_alarm +++ b/library/cloud/ec2_metric_alarm @@ -15,10 +15,8 @@ # along with Ansible. If not, see . DOCUMENTATION = """ - ---- module: ec2_metric_alarm -short_description: Create/update or delete AWS Cloudwatch 'metric alarms' +short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" description: - Can create or delete AWS metric alarms - Metrics you wish to alarm on must already exist @@ -93,8 +91,6 @@ options: description: - A list of the names of action(s) to take when the alarm is in the 'ok' status required: false - ---- """ EXAMPLES = ''' diff --git a/library/cloud/ec2_scaling_policy b/library/cloud/ec2_scaling_policy index b2395cd0a3c..f5ad4d3b010 100755 --- a/library/cloud/ec2_scaling_policy +++ b/library/cloud/ec2_scaling_policy @@ -1,8 +1,7 @@ #!/usr/bin/python DOCUMENTATION = """ ---- -module:ec2_scaling_policy +module: ec2_scaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups description: - Can create or delete scaling policies for autoscaling groups From 51422970767bfd012521735a7714cbb62c3c83fb Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 3 Feb 2014 10:53:35 -0600 Subject: [PATCH 494/772] Improvements to the rax inventory script --- plugins/inventory/rax.py | 156 ++++++++++++++++++++++++++------------- 1 file changed, 105 insertions(+), 51 deletions(-) diff --git a/plugins/inventory/rax.py b/plugins/inventory/rax.py index 6836db61f66..039233005d7 100755 --- a/plugins/inventory/rax.py +++ b/plugins/inventory/rax.py @@ -22,9 +22,11 @@ DOCUMENTATION = ''' inventory: rax short_description: Rackspace Public Cloud external inventory script description: - - Generates inventory that Ansible can understand by making API request to Rackspace Public Cloud API + - Generates inventory that Ansible can understand by making API request to + Rackspace Public Cloud API - | - When run against a specific host, this script returns the following variables: + When run against a specific host, this script returns the following + variables: rax_os-ext-sts_task_state rax_addresses rax_links @@ -65,12 +67,23 @@ options: authors: - Jesse Keating - Paul Durivage + - Matt Martz notes: - - RAX_CREDS_FILE is an optional environment variable that points to a pyrax-compatible credentials file. - - If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file at ~/.rackspace_cloud_credentials. + - RAX_CREDS_FILE is an optional environment variable that points to a + pyrax-compatible credentials file. + - If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file + at ~/.rackspace_cloud_credentials. - See https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating - - RAX_REGION is an optional environment variable to narrow inventory search scope - - RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace datacenter) and optionally accepts a comma-separated list + - RAX_REGION is an optional environment variable to narrow inventory search + scope + - RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace + datacenter) and optionally accepts a comma-separated list + - RAX_ENV is an environment variable that will use an environment as + configured in ~/.pyrax.cfg, see + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration + - RAX_META_PREFIX is an environment variable that changes the prefix used + for meta key/value groups. For compatibility with ec2.py set to + RAX_META_PREFIX=tag requirements: [ "pyrax" ] examples: - description: List server instances @@ -83,13 +96,14 @@ examples: code: RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com ''' -import sys -import re import os - +import re +import sys import argparse import collections +from types import NoneType + try: import json except: @@ -98,9 +112,26 @@ except: try: import pyrax except ImportError: - print('pyrax required for this module') + print('pyrax is required for this module') sys.exit(1) +NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) + + +def rax_slugify(value): + return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_')) + + +def to_dict(obj): + instance = {} + for key in dir(obj): + value = getattr(obj, key) + if (isinstance(value, NON_CALLABLES) and not key.startswith('_')): + key = rax_slugify(key) + instance[key] = value + + return instance + def host(regions, hostname): hostvars = {} @@ -110,15 +141,7 @@ def host(regions, hostname): cs = pyrax.connect_to_cloudservers(region=region) for server in cs.servers.list(): if server.name == hostname: - keys = [key for key in vars(server) if key not in ('manager', '_info')] - for key in keys: - # Extract value - value = getattr(server, key) - - # Generate sanitized key - key = 'rax_' + (re.sub("[^A-Za-z0-9\-]", "_", key) - .lower() - .lstrip("_")) + for key, value in to_dict(server).items(): hostvars[key] = value # And finally, add an IP address @@ -129,6 +152,7 @@ def host(regions, hostname): def _list(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) + images = {} # Go through all the regions looking for servers for region in regions: @@ -139,26 +163,39 @@ def _list(regions): groups[region].append(server.name) # Check if group metadata key in servers' metadata - try: - group = server.metadata['group'] - except KeyError: - pass - else: - # Create group if not exist and add the server + group = server.metadata.get('group') + if group: groups[group].append(server.name) + for extra_group in server.metadata.get('groups', '').split(','): + groups[extra_group].append(server.name) + # Add host metadata - keys = [key for key in vars(server) if key not in ('manager', '_info')] - for key in keys: - # Extract value - value = getattr(server, key) - - # Generate sanitized key - key = 'rax_' + (re.sub("[^A-Za-z0-9\-]", "_", key) - .lower() - .lstrip('_')) + for key, value in to_dict(server).items(): hostvars[server.name][key] = value + hostvars[server.name]['rax_region'] = region + + for key, value in server.metadata.iteritems(): + prefix = os.getenv('RAX_META_PREFIX', 'meta') + groups['%s_%s_%s' % (prefix, key, value)].append(server.name) + + groups['instance-%s' % server.id].append(server.name) + groups['flavor-%s' % server.flavor['id']].append(server.name) + try: + imagegroup = 'image-%s' % images[server.image['id']] + groups[imagegroup].append(server.name) + groups['image-%s' % server.image['id']].append(server.name) + except KeyError: + try: + image = cs.images.get(server.image['id']) + except cs.exceptions.NotFound: + groups['image-%s' % server.image['id']].append(server.name) + else: + images[image.id] = image.human_id + groups['image-%s' % image.human_id].append(server.name) + groups['image-%s' % server.image['id']].append(server.name) + # And finally, add an IP address hostvars[server.name]['ansible_ssh_host'] = server.accessIPv4 @@ -172,7 +209,7 @@ def parse_args(): 'inventory module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', - help='List active servers') + help='List active servers') group.add_argument('--host', help='List details about the specific host') return parser.parse_args() @@ -180,38 +217,54 @@ def parse_args(): def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') + env = os.getenv('RAX_ENV', None) + if env: + pyrax.set_environment(env) + + keyring_username = pyrax.get_setting('keyring_username') + # Attempt to grab credentials from environment first try: - creds_file = os.environ['RAX_CREDS_FILE'] + creds_file = os.path.expanduser(os.environ['RAX_CREDS_FILE']) except KeyError, e: - # But if that fails, use the default location of ~/.rackspace_cloud_credentials + # But if that fails, use the default location of + # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): creds_file = default_creds_file - else: + elif not keyring_username: sys.stderr.write('No value in environment variable %s and/or no ' 'credentials file at %s\n' % (e.message, default_creds_file)) sys.exit(1) - pyrax.set_setting('identity_type', 'rackspace') + identity_type = pyrax.get_setting('identity_type') + pyrax.set_setting('identity_type', identity_type or 'rackspace') + + region = pyrax.get_setting('region') try: - pyrax.set_credential_file(os.path.expanduser(creds_file)) + if keyring_username: + pyrax.keyring_auth(keyring_username, region=region) + else: + pyrax.set_credential_file(creds_file, region=region) except Exception, e: sys.stderr.write("%s: %s\n" % (e, e.message)) sys.exit(1) regions = [] - for region in os.getenv('RAX_REGION', 'all').split(','): - region = region.strip().upper() - if region == 'ALL': - regions = pyrax.regions - break - elif region not in pyrax.regions: - sys.stderr.write('Unsupported region %s' % region) - sys.exit(1) - elif region not in regions: - regions.append(region) + if region: + regions.append(region) + else: + for region in os.getenv('RAX_REGION', 'all').split(','): + region = region.strip().upper() + if region == 'ALL': + regions = pyrax.regions + break + elif region not in pyrax.regions: + sys.stderr.write('Unsupported region %s' % region) + sys.exit(1) + elif region not in regions: + regions.append(region) return regions @@ -225,5 +278,6 @@ def main(): host(regions, args.host) sys.exit(0) + if __name__ == '__main__': - main() \ No newline at end of file + main() From ea5186ca63843f63d48f0f2e0b11269d9aac38a1 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 20 Mar 2014 13:55:02 -0400 Subject: [PATCH 495/772] Fixes #6590 add set_remote_user parameter to synchronize This allows usage of custom ssh configs for remote hosts where the inventory user does not match the configured user. --- .../runner/action_plugins/synchronize.py | 23 ++++++++++++------- library/files/synchronize | 7 ++++++ 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py index c66fcdff3ce..8bd0bcd0f5f 100644 --- a/lib/ansible/runner/action_plugins/synchronize.py +++ b/lib/ansible/runner/action_plugins/synchronize.py @@ -30,7 +30,10 @@ class ActionModule(object): def _process_origin(self, host, path, user): if not host in ['127.0.0.1', 'localhost']: - return '%s@%s:%s' % (user, host, path) + if user: + return '%s@%s:%s' % (user, host, path) + else: + return '%s:%s' % (host, path) else: return path @@ -38,7 +41,10 @@ class ActionModule(object): transport = self.runner.transport return_data = None if not host in ['127.0.0.1', 'localhost'] or transport != "local": - return_data = '%s@%s:%s' % (user, host, path) + if user: + return_data = '%s@%s:%s' % (user, host, path) + else: + return_data = '%s:%s' % (host, path) else: return_data = path @@ -122,13 +128,14 @@ class ActionModule(object): if process_args or use_delegate: user = None - if use_delegate: - user = inject['hostvars'][conn.delegate].get('ansible_ssh_user') - - if not use_delegate or not user: - user = inject.get('ansible_ssh_user', - self.runner.remote_user) + if utils.boolean(options.get('set_remote_user', 'yes')): + if use_delegate: + user = inject['hostvars'][conn.delegate].get('ansible_ssh_user') + if not use_delegate or not user: + user = inject.get('ansible_ssh_user', + self.runner.remote_user) + if use_delegate: # FIXME private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file) diff --git a/library/files/synchronize b/library/files/synchronize index eb556c30f53..1401a326fa1 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -119,6 +119,12 @@ options: - Specify a --timeout for the rsync command in seconds. default: 10 required: false + set_remote_user: + description: + - put user@ for the remote paths. If you have a custom ssh config to define the remote user for a host + that does not match the inventory user, you should set this parameter to "no". + default: yes + required: false notes: - Inspect the verbose output to validate the destination user/host/path are what was expected. @@ -189,6 +195,7 @@ def main(): times = dict(type='bool'), owner = dict(type='bool'), group = dict(type='bool'), + set_remote_user = dict(default='yes', type='bool'), rsync_timeout = dict(type='int', default=10) ), supports_check_mode = True From 14499e8bf3ed10f7818ec67e1f865d68e4a2a60b Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 20 Mar 2014 16:09:58 -0400 Subject: [PATCH 496/772] Fixes #4325 allow async and poll parameters to be templated vars --- lib/ansible/playbook/task.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 11b356f99ab..dd76c47a052 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -206,8 +206,12 @@ class Task(object): self.changed_when = ds.get('changed_when', None) self.failed_when = ds.get('failed_when', None) - self.async_seconds = int(ds.get('async', 0)) # not async by default - self.async_poll_interval = int(ds.get('poll', 10)) # default poll = 10 seconds + self.async_seconds = ds.get('async', 0) # not async by default + self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, self.module_vars) + self.async_seconds = int(self.async_seconds) + self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds + self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, self.module_vars) + self.async_poll_interval = int(self.async_poll_interval) self.notify = ds.get('notify', []) self.first_available_file = ds.get('first_available_file', None) From 1c504eff689ae64ac17e0f19a6e4e82fc8f81dfc Mon Sep 17 00:00:00 2001 From: Joshua Conner Date: Thu, 20 Mar 2014 15:16:28 -0700 Subject: [PATCH 497/772] etcd lookup: use $ANSIBLE_ETCD_URL if it exists in the env --- lib/ansible/runner/lookup_plugins/etcd.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/lookup_plugins/etcd.py b/lib/ansible/runner/lookup_plugins/etcd.py index 07adec80297..a758a2fb0b5 100644 --- a/lib/ansible/runner/lookup_plugins/etcd.py +++ b/lib/ansible/runner/lookup_plugins/etcd.py @@ -16,6 +16,7 @@ # along with Ansible. If not, see . from ansible import utils +import os import urllib2 try: import json @@ -24,6 +25,8 @@ except ImportError: # this can be made configurable, not should not use ansible.cfg ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001' +if os.getenv('ANSIBLE_ETCD_URL') is not None: + ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL'] class etcd(): def __init__(self, url=ANSIBLE_ETCD_URL): @@ -62,7 +65,7 @@ class LookupModule(object): def run(self, terms, inject=None, **kwargs): - terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if isinstance(terms, basestring): terms = [ terms ] From 7272877df6774c266cacd8c90c5573fa57bd9cd5 Mon Sep 17 00:00:00 2001 From: willthames Date: Fri, 21 Mar 2014 13:26:07 +1000 Subject: [PATCH 498/772] Allow npm to install to a directory that doesn't yet exist If path is specified but does not exist, create it. Fail if path is specified but is not a directory --- library/packaging/npm | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/library/packaging/npm b/library/packaging/npm index 1157d8f6636..7034c7f9964 100644 --- a/library/packaging/npm +++ b/library/packaging/npm @@ -139,6 +139,10 @@ class Npm(object): #If path is specified, cd into that path and run the command. cwd = None if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) cwd = self.path rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) From 6a92d48b0f053617c42dd4c28e042b971b4259e0 Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Fri, 21 Mar 2014 08:35:25 +0100 Subject: [PATCH 499/772] ec2_group: Document group_desc rule param --- library/cloud/ec2_group | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index e25185c5f1c..bf40e7b83b7 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -85,6 +85,11 @@ options: version_added: "1.6" requirements: [ "boto" ] + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. ''' EXAMPLES = ''' @@ -121,7 +126,9 @@ EXAMPLES = ''' - proto: tcp from_port: 80 to_port: 80 - group_name: example + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group ''' try: From b8158f5f6a3fc832a1d6e4ba318090e23f3432ac Mon Sep 17 00:00:00 2001 From: Manuel Faux Date: Fri, 21 Mar 2014 08:40:38 +0100 Subject: [PATCH 500/772] Addresses #6591 Hide diff for lineinfile if file did not change. --- lib/ansible/runner/action_plugins/template.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index b34c14ec6a5..34392ba5abd 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -123,7 +123,8 @@ class ActionModule(object): return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant)) else: res = self.runner._execute_module(conn, tmp, 'copy', module_args, inject=inject, complex_args=complex_args) - res.diff = dict(before=dest_contents, after=resultant) + if res.result['changed']: + res.diff = dict(before=dest_contents, after=resultant) return res else: return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=complex_args) From c0ac615a07b84d789ce321cfbaf0db015c2040cc Mon Sep 17 00:00:00 2001 From: willthames Date: Fri, 21 Mar 2014 11:04:30 +1000 Subject: [PATCH 501/772] Allow state=absent for ec2_vol Add methods to the module to make it more readable and allow reuse --- library/cloud/ec2_vol | 194 ++++++++++++++++++++++++++---------------- 1 file changed, 119 insertions(+), 75 deletions(-) diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index 176fb30ec2e..fb38852f429 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -55,7 +55,7 @@ options: version_added: "1.6" id: description: - - volume id if you wish to attach an existing volume (requires instance) + - volume id if you wish to attach an existing volume (requires instance) or remove an existing volume required: false default: null aliases: [] @@ -63,7 +63,7 @@ options: volume_size: description: - size of volume (in GB) to create. - required: true + required: false default: null aliases: [] iops: @@ -118,7 +118,13 @@ options: default: null aliases: [] version_added: "1.6" - + state: + description: + - whether to ensure the volume is present or absent + required: false + default: present + choices: ['absent', 'present'] + version_added: "1.6" requirements: [ "boto" ] author: Lester Wade ''' @@ -173,6 +179,7 @@ EXAMPLES = ''' wait: yes count: 1 register: ec2 + - local_action: module: ec2_vol instance: "{{ item.id }}" @@ -180,6 +187,12 @@ EXAMPLES = ''' device_name: /dev/xvdf with_items: ec2.instances register: ec2_vol + +# Remove a volume +- location: action + module: ec2_vol + id: vol-XXXXXXXX + state: absent ''' # Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. @@ -196,57 +209,50 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - instance = dict(), - id = dict(), - name = dict(), - volume_size = dict(), - iops = dict(), - device_name = dict(), - zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), - snapshot = dict(), - ) - ) - module = AnsibleModule(argument_spec=argument_spec) - +def get_volume(module, ec2): + name = module.params.get('name') id = module.params.get('id') + zone = module.params.get('zone') + filters = {} + volume_ids = None + if zone: + filters['availability_zone'] = zone + if name: + filters = {'tag:Name': name} + if id: + volume_ids = [id] + try: + vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) + except boto.exception.BotoServerError, e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + + if not vols: + module.fail_json(msg="Could not find volume in zone (if specified): %s" % name or id) + if len(vols) > 1: + module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name) + return vols[0] + + +def delete_volume(module, ec2): + vol = get_volume(module, ec2) + if not vol: + module.exit_json(changed=False) + else: + if vol.attachment_state() is not None: + adata = vol.attach_data + module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id)) + ec2.delete_volume(vol.id) + module.exit_json(changed=True) + + +def create_volume(module, ec2, zone): name = module.params.get('name') + id = module.params.get('id') instance = module.params.get('instance') - volume_size = module.params.get('volume_size') iops = module.params.get('iops') - device_name = module.params.get('device_name') - zone = module.params.get('zone') + volume_size = module.params.get('volume_size') snapshot = module.params.get('snapshot') - - ec2 = ec2_connect(module) - - if id and name: - module.fail_json(msg="Both id and name cannot be specified") - - if not (id or name or volume_size): - module.fail_json(msg="Cannot specify volume_size and either one of name or id") - - # Here we need to get the zone info for the instance. This covers situation where - # instance is specified but zone isn't. - # Useful for playbooks chaining instance launch with volume create + attach and where the - # zone doesn't matter to the user. - if instance: - reservation = ec2.get_all_instances(instance_ids=instance) - inst = reservation[0].instances[0] - zone = inst.placement - - # Check if there is a volume already mounted there. - if device_name: - if device_name in inst.block_device_mapping: - module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), - volume_id=inst.block_device_mapping[device_name].volume_id, - device=device_name, - changed=False) - # If custom iops is defined we use volume_type "io1" rather than the default of "standard" - if iops: volume_type = 'io1' else: @@ -259,26 +265,7 @@ def main(): if iops or volume_size: module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") - filters = {} - volume_ids = None - if zone: - filters['availability_zone'] = zone - if name: - filters = {'tag:Name': name} - if id: - volume_ids = [id] - try: - vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - if not vols: - module.fail_json(msg = "Could not find volume in zone (if specified): %s" % name or id) - if len(vols) > 1: - module.fail_json(msg = - "Found more than one volume in zone (if specified) with name: %s" % name) - - volume = vols.pop() + volume = get_volume(module, ec2) if volume.attachment_state() is not None: adata = volume.attach_data if adata.instance_id != instance: @@ -298,12 +285,15 @@ def main(): volume.update() except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + return volume + - # Attach the created volume. +def attach_volume(module, ec2, volume, instance): + device_name = module.params.get('device_name') if device_name and instance: try: - attach = volume.attach(inst.id, device_name) + attach = volume.attach(instance.id, device_name) while volume.attachment_state() != 'attached': time.sleep(3) volume.update() @@ -334,11 +324,65 @@ def main(): except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - print json.dumps({ - "volume_id": volume.id, - "device": device_name - }) - sys.exit(0) + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + instance = dict(), + id = dict(), + name = dict(), + volume_size = dict(), + iops = dict(), + device_name = dict(), + zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), + snapshot = dict(), + state = dict(choices=['absent', 'present'], default='present') + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + id = module.params.get('id') + name = module.params.get('name') + instance = module.params.get('instance') + volume_size = module.params.get('volume_size') + iops = module.params.get('iops') + device_name = module.params.get('device_name') + zone = module.params.get('zone') + snapshot = module.params.get('snapshot') + state = module.params.get('state') + + ec2 = ec2_connect(module) + + if id and name: + module.fail_json(msg="Both id and name cannot be specified") + + if not (id or name or volume_size): + module.fail_json(msg="Cannot specify volume_size and either one of name or id") + + # Here we need to get the zone info for the instance. This covers situation where + # instance is specified but zone isn't. + # Useful for playbooks chaining instance launch with volume create + attach and where the + # zone doesn't matter to the user. + if instance: + reservation = ec2.get_all_instances(instance_ids=instance) + inst = reservation[0].instances[0] + zone = inst.placement + + # Check if there is a volume already mounted there. + if device_name: + if device_name in inst.block_device_mapping: + module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), + volume_id=inst.block_device_mapping[device_name].volume_id, + device=device_name, + changed=False) + + if state == 'absent': + delete_volume(module, ec2) + else: + volume = create_volume(module, ec2, zone) + if instance: + attach_volume(module, ec2, volume, inst) + module.exit_json(volume_id=volume.id, device=device_name) # import module snippets from ansible.module_utils.basic import * From 5b3b9ba2670cd308d03ffd3708b8fb7213288bf5 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Fri, 21 Mar 2014 13:31:47 -0400 Subject: [PATCH 502/772] Addresses #4407 Caculate failed percentage based on serial and number of hosts in play --- lib/ansible/playbook/__init__.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 88dd0f5f4bc..ce17303bbce 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -674,8 +674,14 @@ class PlayBook(object): play.max_fail_pct = 0 # If threshold for max nodes failed is exceeded , bail out. - if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): - host_list = None + if play.serial > 0: + # if serial is set, we need to shorten the size of host_count + play_count = len(play._play_hosts) + if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count): + host_list = None + else: + if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): + host_list = None # if no hosts remain, drop out if not host_list: From 50aa3f6a09a7d124ee4c8b1ca3a52dedfe4f9834 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 21 Mar 2014 14:26:20 -0400 Subject: [PATCH 503/772] Fix documentation for twilio module. --- library/notification/twilio | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/notification/twilio b/library/notification/twilio index 8b9be137747..8969c28aa50 100644 --- a/library/notification/twilio +++ b/library/notification/twilio @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- version_added: "1.6" -module: text +module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - Sends a text message to a phone number through an the Twilio SMS service. From 3e6bf9640ca55ca327488c5373870bbcf817aad3 Mon Sep 17 00:00:00 2001 From: Colin Mattson Date: Fri, 21 Mar 2014 13:52:36 -0700 Subject: [PATCH 504/772] Clarify documentation for apt pkg, state, install_recommends --- library/packaging/apt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) mode change 100644 => 100755 library/packaging/apt diff --git a/library/packaging/apt b/library/packaging/apt old mode 100644 new mode 100755 index 7711da1b1d9..7542ba960b6 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -29,18 +29,18 @@ version_added: "0.0.2" options: pkg: description: - - A package name or package specifier with version, like C(foo) or C(foo=1.0). Shell like wildcards (fnmatch) like apt* are also supported. + - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Wildcards (fnmatch) like apt* are also supported. required: false default: null state: description: - - Indicates the desired package state + - Indicates the desired package state. C(latest) ensures that the latest version is installed. required: false default: present choices: [ "latest", "absent", "present" ] update_cache: description: - - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step + - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. required: false default: no choices: [ "yes", "no" ] @@ -62,7 +62,7 @@ options: default: null install_recommends: description: - - Corresponds to the C(--no-install-recommends) option for I(apt), default behavior works as apt's default behavior, C(no) does not install recommended packages. Suggested packages are never installed. + - Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed. required: false default: yes choices: [ "yes", "no" ] From 712e114dffc387caa2dae7bb36292021aa8999d6 Mon Sep 17 00:00:00 2001 From: Brent Langston Date: Fri, 21 Mar 2014 23:22:05 -0400 Subject: [PATCH 505/772] Allow custom hipchat urls --- library/notification/hipchat | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/library/notification/hipchat b/library/notification/hipchat index 2107ac021b3..86e3f1092da 100644 --- a/library/notification/hipchat +++ b/library/notification/hipchat @@ -54,6 +54,13 @@ options: default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 + api: + description: + - API url if using a self-hosted hipchat server + required: false + default: 'https://api.hipchat.com/v1/rooms/message' + version_added: 1.6.0 + # informational: requirements for nodes requirements: [ urllib, urllib2 ] @@ -68,11 +75,8 @@ EXAMPLES = ''' # HipChat module specific support methods. # -MSG_URI = "https://api.hipchat.com/v1/rooms/message?" - - def send_msg(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False): + color='yellow', notify=False, api='https://api.hipchat.com/v1/rooms/message'): '''sending message to hipchat''' params = {} @@ -81,13 +85,14 @@ def send_msg(module, token, room, msg_from, msg, msg_format='text', params['message'] = msg params['message_format'] = msg_format params['color'] = color + params['api'] = api if notify: params['notify'] = 1 else: params['notify'] = 0 - url = MSG_URI + "auth_token=%s" % (token) + url = api + "?auth_token=%s" % (token) data = urllib.urlencode(params) response, info = fetch_url(module, url, data=data) if info['status'] == 200: @@ -113,6 +118,7 @@ def main(): msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs = dict(default='yes', type='bool'), + api = dict(default='https://api.hipchat.com/v1/rooms/message'), ), supports_check_mode=True ) @@ -124,9 +130,10 @@ def main(): color = module.params["color"] msg_format = module.params["msg_format"] notify = module.params["notify"] + api = module.params["api"] try: - send_msg(module, token, room, msg_from, msg, msg_format, color, notify) + send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception, e: module.fail_json(msg="unable to sent msg: %s" % e) From ca14df47908bde30bd9d173a15e9e00395761238 Mon Sep 17 00:00:00 2001 From: Brent Langston Date: Sat, 22 Mar 2014 13:56:49 -0400 Subject: [PATCH 506/772] Make the default URI a constant --- library/notification/hipchat | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/notification/hipchat b/library/notification/hipchat index 86e3f1092da..4ff95b32bf6 100644 --- a/library/notification/hipchat +++ b/library/notification/hipchat @@ -75,8 +75,10 @@ EXAMPLES = ''' # HipChat module specific support methods. # +MSG_URI = "https://api.hipchat.com/v1/rooms/message" + def send_msg(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api='https://api.hipchat.com/v1/rooms/message'): + color='yellow', notify=False, api=MSG_URI): '''sending message to hipchat''' params = {} @@ -118,7 +120,7 @@ def main(): msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs = dict(default='yes', type='bool'), - api = dict(default='https://api.hipchat.com/v1/rooms/message'), + api = dict(default=MSG_URI), ), supports_check_mode=True ) From 6b0ef5f10ea5f744d8eb80f491c0fb85ed3fe4ff Mon Sep 17 00:00:00 2001 From: David Fox Date: Sat, 22 Mar 2014 15:12:56 -0400 Subject: [PATCH 507/772] Modified library/system/user so that password change date is set on Solaris when updating password --- library/system/user | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/system/user b/library/system/user index a6d3a0ec32d..12b1a62d270 100644 --- a/library/system/user +++ b/library/system/user @@ -1186,6 +1186,7 @@ class SunOS(User): lines.append(line) continue fields[1] = self.password + fields[2] = str(int(time.time() / 86400)) line = ':'.join(fields) lines.append('%s\n' % line) open(self.SHADOWFILE, 'w+').writelines(lines) @@ -1272,6 +1273,7 @@ class SunOS(User): lines.append(line) continue fields[1] = self.password + fields[2] = str(int(time.time() / 86400)) line = ':'.join(fields) lines.append('%s\n' % line) open(self.SHADOWFILE, 'w+').writelines(lines) From 9114fd6d7a3a68d589e3e2992acce486bb977014 Mon Sep 17 00:00:00 2001 From: Max Riveiro Date: Mon, 10 Feb 2014 00:38:50 +0400 Subject: [PATCH 508/772] Add rollbar_deployment module Signed-off-by: Max Riveiro --- library/monitoring/rollbar_deployment | 132 ++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 library/monitoring/rollbar_deployment diff --git a/library/monitoring/rollbar_deployment b/library/monitoring/rollbar_deployment new file mode 100644 index 00000000000..ee67dc58151 --- /dev/null +++ b/library/monitoring/rollbar_deployment @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014, Max Riveiro, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rollbar_deployment +author: Max Riveiro +short_description: Notify Rollbar about app deployments +description: + - Notify Rollbar about app deployments + (see https://rollbar.com/docs/deploys_other/) +options: + token: + description: + - Your project access token. + required: true + environment: + description: + - Name of the environment being deployed, e.g. 'production'. + required: true + revision: + description: + - Revision number/sha being deployed. + required: true + user: + description: + - User who deployed. + required: false + rollbar_user: + description: + - Rollbar username of the user who deployed. + required: false + comment: + description: + - Deploy comment (e.g. what is being deployed). + required: false + url: + description: + - Optional URL to submit the notification to. + required: false + default: 'https://api.rollbar.com/api/1/deploy/' + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. + This should only be used on personally controlled sites using + self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] +''' + +EXAMPLES = ''' +- rollbar_deployment: token=AAAAAA + environment='staging' + user='ansible' + revision=4.2, + rollbar_user='admin', + comment='Test Deploy' +''' + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + environment=dict(required=True), + revision=dict(required=True), + user=dict(required=False), + rollbar_user=dict(required=False), + comment=dict(required=False), + url=dict( + required=False, + default='https://api.rollbar.com/api/1/deploy/' + ), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + if module.check_mode: + module.exit_json(changed=True) + + params = dict( + access_token=module.params['token'], + environment=module.params['environment'], + revision=module.params['revision'] + ) + + if module.params['user']: + params['local_username'] = module.params['user'] + + if module.params['rollbar_user']: + params['rollbar_username'] = module.params['rollbar_user'] + + if module.params['comment']: + params['comment'] = module.params['comment'] + + url = module.params.get('url') + + try: + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception, e: + module.fail_json(msg='Unable to notify Rollbar: %s' % e) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() From 9f7342d46d9e65cd9d67786e39cd95bc3f95da38 Mon Sep 17 00:00:00 2001 From: Max Riveiro Date: Sun, 23 Mar 2014 00:42:51 +0400 Subject: [PATCH 509/772] Add tesing deps installation into hacking/README Signed-off-by: Max Riveiro --- hacking/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/README.md b/hacking/README.md index 5ac4e3de192..6d65464eee8 100644 --- a/hacking/README.md +++ b/hacking/README.md @@ -17,7 +17,7 @@ and do not wish to install them from your operating system package manager, you can install them from pip $ easy_install pip # if pip is not already available - $ pip install pyyaml jinja2 + $ pip install pyyaml jinja2 nose passlib pycrypto From there, follow ansible instructions on docs.ansible.com as normal. From 1bb5b0f6bdc379f3393f4e62b7de9c71bf0c74f9 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Sun, 23 Mar 2014 03:35:10 +0100 Subject: [PATCH 510/772] Fix ValueError when selection contains a colon --- library/system/debconf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/debconf b/library/system/debconf index 244561973db..4ffefc8acec 100644 --- a/library/system/debconf +++ b/library/system/debconf @@ -96,7 +96,7 @@ def get_selections(module, pkg): selections = {} for line in out.splitlines(): - (key, value) = line.split(':') + (key, value) = line.split(':', 1) selections[ key.strip('*').strip() ] = value.strip() return selections From d3a470db0c24c938342e9a1c4561c68c382570ea Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Fri, 21 Mar 2014 02:32:33 +0100 Subject: [PATCH 511/772] Remove print statements "print item" raises an exception when type of item is jinja2.runtime.StrictUndefined --- lib/ansible/runner/filter_plugins/core.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 9b9a9b5cf2b..a99b17f67a1 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -42,8 +42,6 @@ def failed(*a, **kw): ''' Test if task result yields failed ''' item = a[0] if type(item) != dict: - print "DEBUG: GOT A" - print item raise errors.AnsibleFilterError("|failed expects a dictionary") rc = item.get('rc',0) failed = item.get('failed',False) From e9d7476c281034a679c824eb9598ef1bb5fdf582 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Jos=C3=A9=20Marques=20Vieira?= Date: Sun, 23 Mar 2014 17:19:23 +0000 Subject: [PATCH 512/772] Add missing sha1 import on known_hosts.py --- lib/ansible/module_utils/known_hosts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 14e0deb8fe4..dfd684c2328 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -27,6 +27,7 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import hmac +from hashlib import sha1 HASHED_KEY_MAGIC = "|1|" def add_git_host_key(module, url, accept_hostkey=True): From bebb11afeb7bf38957d4da1c154d375d4f889779 Mon Sep 17 00:00:00 2001 From: Ian Pointer Date: Sun, 23 Mar 2014 13:51:54 -0400 Subject: [PATCH 513/772] Adding type int for count (in line with other cloud modules) --- library/cloud/ec2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 3752136098b..a6bd32d58a4 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -1125,7 +1125,7 @@ def main(): spot_price = dict(), image = dict(), kernel = dict(), - count = dict(default='1'), + count = dict(type='int', default='1'), monitoring = dict(type='bool', default=False), ramdisk = dict(), wait = dict(type='bool', default=False), From 3ea5d573aa72614f1f8d3dbc39ed7c5ae83b257e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 19 Mar 2014 12:59:06 -0500 Subject: [PATCH 514/772] Acclerate improvements * Added capability to support multiple keys, so clients from different machines can connect to a single daemon instance * Any activity on the daemon will cause the timeout to extend, so that the daemon must be idle for the full number of minutes before it will auto- shutdown * Various other small fixes to remove some redundancy Fixes #5171 --- lib/ansible/constants.py | 1 + .../runner/connection_plugins/accelerate.py | 78 ++-- library/utilities/accelerate | 369 ++++++++++++++---- 3 files changed, 340 insertions(+), 108 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 7d0b270beb7..a9b4c29af06 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -165,6 +165,7 @@ ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') +ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) # characters included in auto-generated passwords diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/lib/ansible/runner/connection_plugins/accelerate.py index 60c1319262a..8cc29fab9dc 100644 --- a/lib/ansible/runner/connection_plugins/accelerate.py +++ b/lib/ansible/runner/connection_plugins/accelerate.py @@ -22,10 +22,10 @@ import socket import struct import time from ansible.callbacks import vvv, vvvv +from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.runner.connection_plugins.ssh import Connection as SSHConnection from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection from ansible import utils -from ansible import errors from ansible import constants # the chunk size to read and send, assuming mtu 1500 and @@ -85,7 +85,9 @@ class Connection(object): utils.AES_KEYS = self.runner.aes_keys def _execute_accelerate_module(self): - args = "password=%s port=%s debug=%d ipv6=%s" % (base64.b64encode(self.key.__str__()), str(self.accport), int(utils.VERBOSITY), self.runner.accelerate_ipv6) + args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (base64.b64encode(self.key.__str__()), str(self.accport), constants.ACCELERATE_TIMEOUT, int(utils.VERBOSITY), self.runner.accelerate_ipv6) + if constants.ACCELERATE_MULTI_KEY: + args += " multi_key=yes" inject = dict(password=self.key) if getattr(self.runner, 'accelerate_inventory_host', False): inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host)) @@ -109,33 +111,38 @@ class Connection(object): while tries > 0: try: self.conn.connect((self.host,self.accport)) - if not self.validate_user(): - # the accelerated daemon was started with a - # different remote_user. The above command - # should have caused the accelerate daemon to - # shutdown, so we'll reconnect. - wrong_user = True break - except: - vvvv("failed, retrying...") + except socket.error: + vvvv("connection to %s failed, retrying..." % self.host) time.sleep(0.1) tries -= 1 if tries == 0: vvv("Could not connect via the accelerated connection, exceeded # of tries") - raise errors.AnsibleError("Failed to connect") + raise AnsibleError("FAILED") elif wrong_user: vvv("Restarting daemon with a different remote_user") - raise errors.AnsibleError("Wrong user") + raise AnsibleError("WRONG_USER") + self.conn.settimeout(constants.ACCELERATE_TIMEOUT) - except: + if not self.validate_user(): + # the accelerated daemon was started with a + # different remote_user. The above command + # should have caused the accelerate daemon to + # shutdown, so we'll reconnect. + wrong_user = True + + except AnsibleError, e: if allow_ssh: + if "WRONG_USER" in e: + vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host) + time.sleep(5) vvv("Falling back to ssh to startup accelerated mode") res = self._execute_accelerate_module() if not res.is_successful(): - raise errors.AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg'))) + raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg'))) return self.connect(allow_ssh=False) else: - raise errors.AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport)) + raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport)) self.is_connected = True return self @@ -163,11 +170,12 @@ class Connection(object): if not d: vvvv("%s: received nothing, bailing out" % self.host) return None + vvvv("%s: received %d bytes" % (self.host, len(d))) data += d vvvv("%s: received all of the data, returning" % self.host) return data except socket.timeout: - raise errors.AnsibleError("timed out while waiting to receive data") + raise AnsibleError("timed out while waiting to receive data") def validate_user(self): ''' @@ -176,6 +184,7 @@ class Connection(object): daemon to exit if they don't match ''' + vvvv("%s: sending request for validate_user" % self.host) data = dict( mode='validate_user', username=self.user, @@ -183,15 +192,16 @@ class Connection(object): data = utils.jsonify(data) data = utils.encrypt(self.key, data) if self.send_data(data): - raise errors.AnsibleError("Failed to send command to %s" % self.host) + raise AnsibleError("Failed to send command to %s" % self.host) + vvvv("%s: waiting for validate_user response" % self.host) while True: # we loop here while waiting for the response, because a # long running command may cause us to receive keepalive packets # ({"pong":"true"}) rather than the response we want. response = self.recv_data() if not response: - raise errors.AnsibleError("Failed to get a response from %s" % self.host) + raise AnsibleError("Failed to get a response from %s" % self.host) response = utils.decrypt(self.key, response) response = utils.parse_json(response) if "pong" in response: @@ -199,11 +209,11 @@ class Connection(object): vvvv("%s: received a keepalive packet" % self.host) continue else: - vvvv("%s: received the response" % self.host) + vvvv("%s: received the validate_user response: %s" % (self.host, response)) break if response.get('failed'): - raise errors.AnsibleError("Error while validating user: %s" % response.get("msg")) + return False else: return response.get('rc') == 0 @@ -211,10 +221,10 @@ class Connection(object): ''' run a command on the remote host ''' if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + raise AnsibleError("Internal Error: this module does not support running commands via su") if in_data: - raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + raise AnsibleError("Internal Error: this module does not support optimized module pipelining") if executable == "": executable = constants.DEFAULT_EXECUTABLE @@ -233,7 +243,7 @@ class Connection(object): data = utils.jsonify(data) data = utils.encrypt(self.key, data) if self.send_data(data): - raise errors.AnsibleError("Failed to send command to %s" % self.host) + raise AnsibleError("Failed to send command to %s" % self.host) while True: # we loop here while waiting for the response, because a @@ -241,7 +251,7 @@ class Connection(object): # ({"pong":"true"}) rather than the response we want. response = self.recv_data() if not response: - raise errors.AnsibleError("Failed to get a response from %s" % self.host) + raise AnsibleError("Failed to get a response from %s" % self.host) response = utils.decrypt(self.key, response) response = utils.parse_json(response) if "pong" in response: @@ -260,7 +270,7 @@ class Connection(object): vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) + raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) fd = file(in_path, 'rb') fstat = os.stat(in_path) @@ -279,27 +289,27 @@ class Connection(object): data = utils.encrypt(self.key, data) if self.send_data(data): - raise errors.AnsibleError("failed to send the file to %s" % self.host) + raise AnsibleError("failed to send the file to %s" % self.host) response = self.recv_data() if not response: - raise errors.AnsibleError("Failed to get a response from %s" % self.host) + raise AnsibleError("Failed to get a response from %s" % self.host) response = utils.decrypt(self.key, response) response = utils.parse_json(response) if response.get('failed',False): - raise errors.AnsibleError("failed to put the file in the requested location") + raise AnsibleError("failed to put the file in the requested location") finally: fd.close() vvvv("waiting for final response after PUT") response = self.recv_data() if not response: - raise errors.AnsibleError("Failed to get a response from %s" % self.host) + raise AnsibleError("Failed to get a response from %s" % self.host) response = utils.decrypt(self.key, response) response = utils.parse_json(response) if response.get('failed',False): - raise errors.AnsibleError("failed to put the file in the requested location") + raise AnsibleError("failed to put the file in the requested location") def fetch_file(self, in_path, out_path): ''' save a remote file to the specified path ''' @@ -309,7 +319,7 @@ class Connection(object): data = utils.jsonify(data) data = utils.encrypt(self.key, data) if self.send_data(data): - raise errors.AnsibleError("failed to initiate the file fetch with %s" % self.host) + raise AnsibleError("failed to initiate the file fetch with %s" % self.host) fh = open(out_path, "w") try: @@ -317,11 +327,11 @@ class Connection(object): while True: response = self.recv_data() if not response: - raise errors.AnsibleError("Failed to get a response from %s" % self.host) + raise AnsibleError("Failed to get a response from %s" % self.host) response = utils.decrypt(self.key, response) response = utils.parse_json(response) if response.get('failed', False): - raise errors.AnsibleError("Error during file fetch, aborting") + raise AnsibleError("Error during file fetch, aborting") out = base64.b64decode(response['data']) fh.write(out) bytes += len(out) @@ -330,7 +340,7 @@ class Connection(object): data = utils.jsonify(dict()) data = utils.encrypt(self.key, data) if self.send_data(data): - raise errors.AnsibleError("failed to send ack during file fetch") + raise AnsibleError("failed to send ack during file fetch") if response.get('last', False): break finally: diff --git a/library/utilities/accelerate b/library/utilities/accelerate index a61e54e374d..5a8c96c64a9 100644 --- a/library/utilities/accelerate +++ b/library/utilities/accelerate @@ -53,6 +53,14 @@ options: if this parameter is set to true. required: false default: false + multi_key: + description: + - When enabled, the daemon will open a local socket file which can be used by future daemon executions to + upload a new key to the already running daemon, so that multiple users can connect using different keys. + This access still requires an ssh connection as the uid for which the daemon is currently running. + required: false + default: no + version_added: "1.6" notes: - See the advanced playbooks chapter for more about using accelerated mode. requirements: [ "python-keyczar" ] @@ -71,6 +79,7 @@ EXAMPLES = ''' ''' import base64 +import errno import getpass import json import os @@ -88,10 +97,13 @@ import traceback import SocketServer from datetime import datetime -from threading import Thread +from threading import Thread, Lock + +# import module snippets +# we must import this here at the top so we can use get_module_path() +from ansible.module_utils.basic import * syslog.openlog('ansible-%s' % os.path.basename(__file__)) -PIDFILE = os.path.expanduser("~/.accelerate.pid") # the chunk size to read and send, assuming mtu 1500 and # leaving room for base64 (+33%) encoding and header (100 bytes) @@ -107,6 +119,9 @@ def log(msg, cap=0): if DEBUG_LEVEL >= cap: syslog.syslog(syslog.LOG_NOTICE|syslog.LOG_DAEMON, msg) +def v(msg): + log(msg, cap=1) + def vv(msg): log(msg, cap=2) @@ -116,16 +131,6 @@ def vvv(msg): def vvvv(msg): log(msg, cap=4) -if os.path.exists(PIDFILE): - try: - data = int(open(PIDFILE).read()) - try: - os.kill(data, signal.SIGKILL) - except OSError: - pass - except ValueError: - pass - os.unlink(PIDFILE) HAS_KEYCZAR = False try: @@ -134,10 +139,26 @@ try: except ImportError: pass +SOCKET_FILE = os.path.join(get_module_path(), '.ansible-accelerate', ".local.socket") + +def get_pid_location(module): + """ + Try to find a pid directory in the common locations, falling + back to the user's home directory if no others exist + """ + for dir in ['/var/run', '/var/lib/run', '/run', os.path.expanduser("~/")]: + try: + if os.path.isdir(dir) and os.access(dir, os.R_OK|os.W_OK): + return os.path.join(dir, '.accelerate.pid') + except: + pass + module.fail_json(msg="couldn't find any valid directory to use for the accelerate pid file") + + # NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move # this into utils.module_common and probably should anyway -def daemonize_self(module, password, port, minutes): +def daemonize_self(module, password, port, minutes, pid_file): # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 try: pid = os.fork() @@ -158,11 +179,11 @@ def daemonize_self(module, password, port, minutes): try: pid = os.fork() if pid > 0: - log("daemon pid %s, writing %s" % (pid, PIDFILE)) - pid_file = open(PIDFILE, "w") + log("daemon pid %s, writing %s" % (pid, pid_file)) + pid_file = open(pid_file, "w") pid_file.write("%s" % pid) pid_file.close() - vvv("pidfile written") + vvv("pid file written") sys.exit(0) except OSError, e: log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) @@ -174,8 +195,85 @@ def daemonize_self(module, password, port, minutes): os.dup2(dev_null.fileno(), sys.stderr.fileno()) log("daemonizing successful") -class ThreadWithReturnValue(Thread): +class LocalSocketThread(Thread): + server = None + terminated = False + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): + self.server = kwargs.get('server') + Thread.__init__(self, group, target, name, args, kwargs, Verbose) + + def run(self): + try: + if os.path.exists(SOCKET_FILE): + os.remove(SOCKET_FILE) + else: + dir = os.path.dirname(SOCKET_FILE) + if os.path.exists(dir): + if not os.path.isdir(dir): + log("The socket file path (%s) exists, but is not a directory. No local connections will be available" % dir) + return + else: + # make sure the directory is accessible only to this + # user, as socket files derive their permissions from + # the directory that contains them + os.chmod(dir, 0700) + elif not os.path.exists(dir): + os.makedirs(dir, 0700) + except OSError: + pass + self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.s.bind(SOCKET_FILE) + self.s.listen(5) + while not self.terminated: + try: + conn, addr = self.s.accept() + vv("received local connection") + data = "" + while "\n" not in data: + data += conn.recv(2048) + try: + new_key = AesKey.Read(data.strip()) + found = False + for key in self.server.key_list: + try: + new_key.Decrypt(key.Encrypt("foo")) + found = True + break + except: + pass + if not found: + vv("adding new key to the key list") + self.server.key_list.append(new_key) + conn.sendall("OK\n") + else: + vv("key already exists in the key list, ignoring") + conn.sendall("EXISTS\n") + + # update the last event time so the server doesn't + # shutdown sooner than expected for new cliets + try: + self.server.last_event_lock.acquire() + self.server.last_event = datetime.now() + finally: + self.server.last_event_lock.release() + except Exception, e: + vv("key loaded locally was invalid, ignoring (%s)" % e) + conn.sendall("BADKEY\n") + finally: + try: + conn.close() + except: + pass + except: + pass + + def terminate(self): + self.terminated = True + self.s.shutdown(socket.SHUT_RDWR) + self.s.close() +class ThreadWithReturnValue(Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): Thread.__init__(self, group, target, name, args, kwargs, Verbose) self._return = None @@ -190,24 +288,41 @@ class ThreadWithReturnValue(Thread): return self._return class ThreadedTCPServer(SocketServer.ThreadingTCPServer): - def __init__(self, server_address, RequestHandlerClass, module, password, timeout): + key_list = [] + last_event = datetime.now() + last_event_lock = Lock() + def __init__(self, server_address, RequestHandlerClass, module, password, timeout, use_ipv6=False): self.module = module - self.key = AesKey.Read(password) + self.key_list.append(AesKey.Read(password)) self.allow_reuse_address = True self.timeout = timeout - SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass) -class ThreadedTCPV6Server(SocketServer.ThreadingTCPServer): - def __init__(self, server_address, RequestHandlerClass, module, password, timeout): - self.module = module - self.address_family = socket.AF_INET6 - self.key = AesKey.Read(password) - self.allow_reuse_address = True - self.timeout = timeout + if use_ipv6: + self.address_family = socket.AF_INET6 + + if self.module.params.get('multi_key', False): + vv("starting thread to handle local connections for multiple keys") + self.local_thread = LocalSocketThread(kwargs=dict(server=self)) + self.local_thread.start() + SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass) + def shutdown(self): + self.local_thread.terminate() + self.running = False + SocketServer.ThreadingTCPServer.shutdown(self) + class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): + # the key to use for this connection + active_key = None + def send_data(self, data): + try: + self.server.last_event_lock.acquire() + self.server.last_event = datetime.now() + finally: + self.server.last_event_lock.release() + packed_len = struct.pack('!Q', len(data)) return self.request.sendall(packed_len + data) @@ -216,23 +331,40 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): data = "" vvvv("in recv_data(), waiting for the header") while len(data) < header_len: - d = self.request.recv(header_len - len(data)) - if not d: - vvv("received nothing, bailing out") + try: + d = self.request.recv(header_len - len(data)) + if not d: + vvv("received nothing, bailing out") + return None + data += d + except: + # probably got a connection reset + vvvv("exception received while waiting for recv(), returning None") return None - data += d vvvv("in recv_data(), got the header, unpacking") data_len = struct.unpack('!Q',data[:header_len])[0] data = data[header_len:] vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) while len(data) < data_len: - d = self.request.recv(data_len - len(data)) - if not d: - vvv("received nothing, bailing out") + try: + d = self.request.recv(data_len - len(data)) + if not d: + vvv("received nothing, bailing out") + return None + data += d + vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) + except: + # probably got a connection reset + vvvv("exception received while waiting for recv(), returning None") return None - data += d - vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) vvvv("received all of the data, returning") + + try: + self.server.last_event_lock.acquire() + self.server.last_event = datetime.now() + finally: + self.server.last_event_lock.release() + return data def handle(self): @@ -243,18 +375,26 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): if not data: vvvv("received nothing back from recv_data(), breaking out") break - try: - vvvv("got data, decrypting") - data = self.server.key.Decrypt(data) - vvvv("decryption done") - except: - vv("bad decrypt, skipping...") - data2 = json.dumps(dict(rc=1)) - data2 = self.server.key.Encrypt(data2) - self.send_data(data2) - return + vvvv("got data, decrypting") + if not self.active_key: + for key in self.server.key_list: + try: + data = key.Decrypt(data) + self.active_key = key + break + except: + pass + else: + vv("bad decrypt, exiting the connection handler") + return + else: + try: + data = self.active_key.Decrypt(data) + except: + vv("bad decrypt, exiting the connection handler") + return - vvvv("loading json from the data") + vvvv("decryption done, loading json from the data") data = json.loads(data) mode = data['mode'] @@ -270,7 +410,7 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): last_pong = datetime.now() vvvv("command still running, sending keepalive packet") data2 = json.dumps(dict(pong=True)) - data2 = self.server.key.Encrypt(data2) + data2 = self.active_key.Encrypt(data2) self.send_data(data2) time.sleep(0.1) response = twrv._return @@ -286,8 +426,9 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): response = self.validate_user(data) vvvv("response result is %s" % str(response)) - data2 = json.dumps(response) - data2 = self.server.key.Encrypt(data2) + json_response = json.dumps(response) + vvvv("dumped json is %s" % json_response) + data2 = self.active_key.Encrypt(json_response) vvvv("sending the response back to the controller") self.send_data(data2) vvvv("done sending the response") @@ -299,9 +440,10 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): tb = traceback.format_exc() log("encountered an unhandled exception in the handle() function") log("error was:\n%s" % tb) - data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function")) - data2 = self.server.key.Encrypt(data2) - self.send_data(data2) + if self.active_key: + data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function")) + data2 = self.active_key.Encrypt(data2) + self.send_data(data2) def validate_user(self, data): if 'username' not in data: @@ -362,7 +504,7 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): last = True data = dict(data=base64.b64encode(data), last=last) data = json.dumps(data) - data = self.server.key.Encrypt(data) + data = self.active_key.Encrypt(data) if self.send_data(data): return dict(failed=True, stderr="failed to send data") @@ -371,7 +513,7 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): if not response: log("failed to get a response, aborting") return dict(failed=True, stderr="Failed to get a response from %s" % self.host) - response = self.server.key.Decrypt(response) + response = self.active_key.Decrypt(response) response = json.loads(response) if response.get('failed',False): @@ -394,7 +536,7 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): final_path = None if 'user' in data and data.get('user') != getpass.getuser(): - vv("the target user doesn't match this user, we'll move the file into place via sudo") + vvv("the target user doesn't match this user, we'll move the file into place via sudo") tmp_path = os.path.expanduser('~/.ansible/tmp/') if not os.path.exists(tmp_path): try: @@ -415,14 +557,14 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): bytes += len(out) out_fd.write(out) response = json.dumps(dict()) - response = self.server.key.Encrypt(response) + response = self.active_key.Encrypt(response) self.send_data(response) if data['last']: break data = self.recv_data() if not data: raise "" - data = self.server.key.Decrypt(data) + data = self.active_key.Decrypt(data) data = json.loads(data) except: out_fd.close() @@ -438,27 +580,45 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): self.server.module.atomic_move(out_path, final_path) return dict() -def daemonize(module, password, port, timeout, minutes, ipv6): +def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file): try: - daemonize_self(module, password, port, minutes) + daemonize_self(module, password, port, minutes, pid_file) - def catcher(signum, _): - module.exit_json(msg='timer expired') + def timer_handler(signum, _): + try: + server.last_event_lock.acquire() + td = datetime.now() - server.last_event + # older python timedelta objects don't have total_seconds(), + # so we use the formula from the docs to calculate it + total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 + if total_seconds >= minutes * 60: + log("server has been idle longer than the timeout, shutting down") + server.running = False + server.shutdown() + else: + # reschedule the check + vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60)) + signal.alarm(30) + except: + pass + finally: + server.last_event_lock.release() - signal.signal(signal.SIGALRM, catcher) - signal.setitimer(signal.ITIMER_REAL, 60 * minutes) + signal.signal(signal.SIGALRM, timer_handler) + signal.alarm(30) tries = 5 while tries > 0: try: - if ipv6: - server = ThreadedTCPV6Server(("::", port), ThreadedTCPRequestHandler, module, password, timeout) + if use_ipv6: + address = ("::", port) else: - server = ThreadedTCPServer(("0.0.0.0", port), ThreadedTCPRequestHandler, module, password, timeout) + address = ("0.0.0.0", port) + server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6) server.allow_reuse_address = True break - except: - vv("Failed to create the TCP server (tries left = %d)" % tries) + except Exception, e: + vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e)) tries -= 1 time.sleep(0.2) @@ -466,8 +626,20 @@ def daemonize(module, password, port, timeout, minutes, ipv6): vv("Maximum number of attempts to create the TCP server reached, bailing out") raise Exception("max # of attempts to serve reached") - vv("serving!") - server.serve_forever(poll_interval=0.1) + # run the server in a separate thread to make signal handling work + server_thread = Thread(target=server.serve_forever, kwargs=dict(poll_interval=0.1)) + server_thread.start() + server.running = True + + v("serving!") + while server.running: + time.sleep(1) + + # wait for the thread to exit fully + server_thread.join() + + v("server thread terminated, exiting!") + sys.exit(0) except Exception, e: tb = traceback.format_exc() log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb)) @@ -479,6 +651,7 @@ def main(): argument_spec = dict( port=dict(required=False, default=5099), ipv6=dict(required=False, default=False, type='bool'), + multi_key=dict(required=False, default=False, type='bool'), timeout=dict(required=False, default=300), password=dict(required=True), minutes=dict(required=False, default=30), @@ -493,14 +666,62 @@ def main(): minutes = int(module.params['minutes']) debug = int(module.params['debug']) ipv6 = module.params['ipv6'] + multi_key = module.params['multi_key'] if not HAS_KEYCZAR: module.fail_json(msg="keyczar is not installed (on the remote side)") DEBUG_LEVEL=debug + pid_file = get_pid_location(module) + + daemon_pid = None + daemon_running = False + if os.path.exists(pid_file): + try: + daemon_pid = int(open(pid_file).read()) + try: + # sending signal 0 doesn't do anything to the + # process, other than tell the calling program + # whether other signals can be sent + os.kill(daemon_pid, 0) + except OSError, e: + if e.errno == errno.EPERM: + # no permissions means the pid is probably + # running, but as a different user, so fail + module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid) + else: + daemon_running = True + except ValueError: + # invalid pid file, unlink it - otherwise we don't care + try: + os.unlink(pid_file) + except: + pass - daemonize(module, password, port, timeout, minutes, ipv6) + if daemon_running and multi_key: + # try to connect to the file socket for the daemon if it exists + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + s.connect(SOCKET_FILE) + s.sendall(password + '\n') + data = "" + while '\n' not in data: + data += s.recv(2048) + res = data.strip() + except: + module.fail_json(msg="failed to connect to the local socket file") + finally: + try: + s.close() + except: + pass + + if res in ("OK", "EXISTS"): + module.exit_json(msg="transferred new key to the existing daemon") + else: + module.fail_json(msg="could not transfer new key: %s" % data.strip()) + else: + # try to start up the daemon + daemonize(module, password, port, timeout, minutes, ipv6, pid_file) -# import module snippets -from ansible.module_utils.basic import * main() From 1fa0fab6cb5216b4758dcf028c5d2cd382d734e2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 23 Mar 2014 14:44:38 -0500 Subject: [PATCH 515/772] Documentation updates for accelerate mode improvements --- CHANGELOG.md | 3 +++ docsite/rst/playbooks_acceleration.rst | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6b9d0ffbf0..6543ae45727 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,9 @@ Major features/changes: * The deprecated legacy variable templating system has been finally removed. Use {{ foo }} always not $foo or ${foo}. * Any data file can also be JSON. Use sparingly -- with great power comes great responsibility. Starting file with "{" or "[" denotes JSON. * Added 'gathering' param for ansible.cfg to change the default gather_facts policy. +* Accelerate improvements: + - multiple users can connect with different keys, when `accelerate_multi_key = yes` is specified in the ansible.cfg. + - daemon lifetime is now based on the time from the last activity, not the time from the daemon's launch. New Modules: diff --git a/docsite/rst/playbooks_acceleration.rst b/docsite/rst/playbooks_acceleration.rst index 6b25f6cced1..b7f08828a84 100644 --- a/docsite/rst/playbooks_acceleration.rst +++ b/docsite/rst/playbooks_acceleration.rst @@ -76,4 +76,11 @@ As noted above, accelerated mode also supports running tasks via sudo, however t * You must remove requiretty from your sudoers options. * Prompting for the sudo password is not yet supported, so the NOPASSWD option is required for sudo'ed commands. +As of Ansible version `1.6`, you can also allow the use of multiple keys for connections from multiple Ansible management nodes. To do so, add the following option +to your `ansible.cfg` configuration:: + + accelerate_multi_key = yes + +When enabled, the daemon will open a UNIX socket file (by default `$ANSIBLE_REMOTE_TEMP/.ansible-accelerate/.local.socket`). New connections over SSH can +use this socket file to upload new keys to the daemon. From 31628d86a13a3383f84fe2f7025db382759f0ffd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 23 Mar 2014 14:45:05 -0500 Subject: [PATCH 516/772] Adding in a configurable option for the accelerate daemon timeout This was apparently an oversite, as it has never been configurable despite having a module parameter for the timeout. --- lib/ansible/constants.py | 1 + lib/ansible/runner/connection_plugins/accelerate.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index a9b4c29af06..ea909243761 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -162,6 +162,7 @@ ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_po ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True) +ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True) ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys') ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700') ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600') diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/lib/ansible/runner/connection_plugins/accelerate.py index 8cc29fab9dc..3f35a325484 100644 --- a/lib/ansible/runner/connection_plugins/accelerate.py +++ b/lib/ansible/runner/connection_plugins/accelerate.py @@ -85,7 +85,13 @@ class Connection(object): utils.AES_KEYS = self.runner.aes_keys def _execute_accelerate_module(self): - args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (base64.b64encode(self.key.__str__()), str(self.accport), constants.ACCELERATE_TIMEOUT, int(utils.VERBOSITY), self.runner.accelerate_ipv6) + args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % ( + base64.b64encode(self.key.__str__()), + str(self.accport), + constants.ACCELERATE_DAEMON_TIMEOUT, + int(utils.VERBOSITY), + self.runner.accelerate_ipv6, + ) if constants.ACCELERATE_MULTI_KEY: args += " multi_key=yes" inject = dict(password=self.key) From c28d3545fbfa70dec1ca393ed7efb7cc92cb3da3 Mon Sep 17 00:00:00 2001 From: Seth Woodworth Date: Sun, 23 Mar 2014 18:52:08 -0400 Subject: [PATCH 517/772] adds missing `'` to debconf examples --- library/system/debconf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/debconf b/library/system/debconf index 4ffefc8acec..1dade71f8ad 100644 --- a/library/system/debconf +++ b/library/system/debconf @@ -75,7 +75,7 @@ EXAMPLES = ''' debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 # set to generate locales: -debconf: name=locales question='locales/locales_to_be_generated value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' +debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' # Accept oracle license debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' From 3c97ac160c8c3277fa1ab98322c0bf57da9d42cc Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sun, 23 Mar 2014 19:03:27 -0400 Subject: [PATCH 518/772] Update CONTRIBUTING.md Some clarifications about workflow and the priority queue in GitHub --- CONTRIBUTING.md | 79 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 59 insertions(+), 20 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e980b6eb7da..76aa8dc5167 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,13 +29,9 @@ content up on places like github to share with others. Sharing A Feature Idea ---------------------- -If you have an idea for a new feature, you can open a new ticket at -[github.com/ansible/ansible](https://github.com/ansible/ansible), though in general we like to -talk about feature ideas first and bring in lots of people into the discussion. Consider stopping -by the -[Ansible project mailing list](https://groups.google.com/forum/#!forum/ansible-project) ([Subscribe](https://groups.google.com/forum/#!forum/ansible-project/join)) -or #ansible on irc.freenode.net. There is an overview about more mailing lists -later in this document. +Ideas are very welcome and the best place to share them is the [Ansible project mailing list](https://groups.google.com/forum/#!forum/ansible-project) ([Subscribe](https://groups.google.com/forum/#!forum/ansible-project/join)) or #ansible on irc.freenode.net. + +While you can file a feature request on GitHub, pull requests are a much better way to get your feature added than submitting a feature request. Open source is all about itch scratching, and it's less likely that someone else will have the same itches as yourself. We keep code reasonably simple on purpose so it's easy to dive in and make additions, but be sure to read the "Contributing Code" section below too -- as it doesn't hurt to have a discussion about a feature first -- we're inclined to have preferences about how incoming features might be implemented, and that can save confusion later. Helping with Documentation -------------------------- @@ -58,18 +54,24 @@ The Ansible project keeps it’s source on github at and takes contributions through [github pull requests](https://help.github.com/articles/using-pull-requests). -It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission, and this -especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request for the first -time, that revisions are needed. (This is not usually needed for module development) +It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission, and this especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request for the first time, that revisions are needed. (This is not usually needed for module development) + +Note that we do keep Ansible to a particular aesthetic, so if you are unclear about whether a feature +is a good fit or not, having the discussion on the development list is often a lot easier than having +to modify a pull request later. When submitting patches, be sure to run the unit tests first “make tests” and always use “git rebase” vs “git merge” (aliasing git pull to git pull --rebase is a great idea) to -avoid merge commits in your submissions. We will require resubmission of pull requests that -contain merge commits. +avoid merge commits in your submissions. There are also integration tests that can be run in the "tests/integration" directory. + +In order to keep the history clean and better audit incoming code, we will require resubmission of pull requests that contain merge commits. Use "git pull --rebase" vs "git pull" and "git rebase" vs "git merge". Also be sure to use topic branches to keep your additions on different branches, such that they won't pick up stray commits later. + +We’ll then review your contributions and engage with you about questions and so on. + +As we have a very large and active community, so it may take awhile to get your contributions +in! See the notes about priorities in a later section for understanding our work queue. -We’ll then review your contributions and engage with you about questions and so on. Please be -advised we have a very large and active community, so it may take awhile to get your contributions -in! Patches should be made against the 'devel' branch. +Patches should be made against the 'devel' branch. Contributions can be for new features like modules, or to fix bugs you or others have found. If you are interested in writing new modules to be included in the core Ansible distribution, please refer @@ -108,6 +110,44 @@ the mailing list or IRC first. As we are a very high volume project, if you det you do have a bug, please be sure to open the issue yourself to ensure we have a record of it. Don’t rely on someone else in the community to file the bug report for you. +It may take some time to get to your report, see "A Note About Priorities" below. + +A Note About Priorities +======================= + +Ansible was one of the top 5 projects with the most OSS contributors on GitHub in 2013, and well over +600 people have added code to the project. As a result, we have a LOT of incoming activity to process. + +In the interest of transparency, we're telling you how we do this. + +In our bug tracker you'll notice some labels - P1, P2, P3, P4, and P5. These are our internal +priority orders that we use to sort tickets. + +With some exceptions for easy merges (like documentation typos for instance), +we're going to spend most of our time working on P1 and P2 items first, including pull requests. +These usually relate to important +bugs or features affecting large segments of the userbase. So if you see something categorized +"P3 or P4", and it's not appearing to get a lot of immediate attention, this is why. + +These labels don't really have definition - they are a simple ordering. However something +affecting a major module (yum, apt, etc) is likely to be prioritized higher than a module +affecting a smaller number of users. + +Since we place a strong emphasis on testing and code review, it may take a few months for a minor feature to get merged. + +Don't worry though -- we'll also take periodic sweeps through the lower priority queues and give +them some attention as well, particularly in the area of new module changes. So it doesn't neccessarily +mean that we'll be exhausting all of the higher-priority queues before getting to your ticket. + +Release Numbering +================= + +Releases ending in ".0" are major releases and this is where all new features land. Releases ending +in another integer, like "0.X.1" and "0.X.2" are dot releases, and these are only going to contain +bugfixes. Typically we don't do dot releases for minor releases, but may occasionally decide to cut +dot releases containing a large number of smaller fixes if it's still a fairly long time before +the next release comes out. + Online Resources ================ @@ -165,11 +205,10 @@ we post with an @ansible.com address. Community Code of Conduct ------------------------- -Ansible’s community welcomes users of all types, backgrounds, and skill levels. Please -treat others as you expect to be treated, keep discussions positive, and avoid discrimination -or engaging in controversial debates (except vi vs emacs is cool). Posts to mailing lists -should remain focused around Ansible and IT automation. Abuse of these community guidelines -will not be tolerated and may result in banning from community resources. +Ansible’s community welcomes users of all types, backgrounds, and skill levels. Please +treat others as you expect to be treated, keep discussions positive, and avoid discrimination, profanity, allegations of Cthulhu worship, or engaging in controversial debates (except vi vs emacs is cool). + +Posts to mailing lists should remain focused around Ansible and IT automation. Abuse of these community guidelines will not be tolerated and may result in banning from community resources. Contributors License Agreement ------------------------------ From c0886a5f3ad2e760ea72cd8499180ee8525c59be Mon Sep 17 00:00:00 2001 From: Herby Gillot Date: Sun, 23 Mar 2014 21:59:47 -0400 Subject: [PATCH 519/772] Add a fact providing the full hostname, without the domain portion truncated. --- lib/ansible/module_utils/facts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 01f812def7b..dd6d76ff63d 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -108,6 +108,7 @@ class Facts(object): self.facts['python_version'] = platform.python_version() self.facts['fqdn'] = socket.getfqdn() self.facts['hostname'] = platform.node().split('.')[0] + self.facts['full_hostname'] = platform.node() self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:]) arch_bits = platform.architecture()[0] self.facts['userspace_bits'] = arch_bits.replace('bit', '') From 5d518379218dd4cffaf05fe31fa434b7b89e823a Mon Sep 17 00:00:00 2001 From: bryan hunt Date: Mon, 24 Mar 2014 13:51:44 +0000 Subject: [PATCH 520/772] Accept status code from a list of valid codes. --- library/network/uri | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/library/network/uri b/library/network/uri index 9a22d4758c8..ac12ffbb782 100644 --- a/library/network/uri +++ b/library/network/uri @@ -101,7 +101,7 @@ options: required: false status_code: description: - - A valid, numeric, HTTP status code that signifies success of the request. + - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes. required: false default: 200 timeout: @@ -315,7 +315,7 @@ def main(): follow_redirects = dict(required=False, default='no', type='bool'), creates = dict(required=False, default=None), removes = dict(required=False, default=None), - status_code = dict(required=False, default=200, type='int'), + status_code = dict(required=False, default=[200], type='list'), timeout = dict(required=False, default=30, type='int'), ), check_invalid_arguments=False, @@ -338,7 +338,7 @@ def main(): follow_redirects = module.params['follow_redirects'] creates = module.params['creates'] removes = module.params['removes'] - status_code = int(module.params['status_code']) + status_code = list(module.params['status_code']) socket_timeout = module.params['timeout'] # Grab all the http headers. Need this hack since passing multi-values is currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}') @@ -412,7 +412,11 @@ def main(): uresp['json'] = js except: pass - if resp['status'] != status_code: + + sys.stderr.write("status ----> %s \n" % resp['status']) + sys.stderr.write("status_code ----> %s \n" % status_code) + + if str(resp['status']) not in status_code: module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp) elif return_content: module.exit_json(changed=changed, content=content, **uresp) From 9ff6c40ed07a746d74237d832aaec9d8026493ae Mon Sep 17 00:00:00 2001 From: bryan hunt Date: Mon, 24 Mar 2014 14:17:36 +0000 Subject: [PATCH 521/772] Accept status code from a list of valid codes. --- library/network/uri | 4 ---- 1 file changed, 4 deletions(-) diff --git a/library/network/uri b/library/network/uri index ac12ffbb782..0d6cd7dc789 100644 --- a/library/network/uri +++ b/library/network/uri @@ -412,10 +412,6 @@ def main(): uresp['json'] = js except: pass - - sys.stderr.write("status ----> %s \n" % resp['status']) - sys.stderr.write("status_code ----> %s \n" % status_code) - if str(resp['status']) not in status_code: module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp) elif return_content: From aa2d6e47f090bead429b041c6dcfe49d883338db Mon Sep 17 00:00:00 2001 From: bryan hunt Date: Mon, 24 Mar 2014 14:23:18 +0000 Subject: [PATCH 522/772] Accept status code from a list of valid codes. (cast the list of acceptable codes to a list of int ) --- library/network/uri | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/network/uri b/library/network/uri index 0d6cd7dc789..afc1dbd7f43 100644 --- a/library/network/uri +++ b/library/network/uri @@ -338,7 +338,7 @@ def main(): follow_redirects = module.params['follow_redirects'] creates = module.params['creates'] removes = module.params['removes'] - status_code = list(module.params['status_code']) + status_code = [int(x) for x in list(module.params['status_code'])] socket_timeout = module.params['timeout'] # Grab all the http headers. Need this hack since passing multi-values is currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}') @@ -412,7 +412,7 @@ def main(): uresp['json'] = js except: pass - if str(resp['status']) not in status_code: + if resp['status'] not in status_code: module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp) elif return_content: module.exit_json(changed=changed, content=content, **uresp) From 9169a110889cbcad21a4be76b640e16bd1d80d57 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 24 Mar 2014 10:28:48 -0400 Subject: [PATCH 523/772] Fixes #4777 add --force-handlers option to run handlers even if tasks fail --- bin/ansible-playbook | 5 +- lib/ansible/playbook/__init__.py | 97 +++++++++++++++++++------------- 2 files changed, 62 insertions(+), 40 deletions(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 344590341e6..18f8af208a0 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -78,6 +78,8 @@ def main(args): help="one-step-at-a-time: confirm each task before running") parser.add_option('--start-at-task', dest='start_at', help="start the playbook at the task matching this name") + parser.add_option('--force-handlers', dest='force_handlers', action='store_true', + help="run handlers even if a task fails") options, args = parser.parse_args(args) @@ -191,7 +193,8 @@ def main(args): su=options.su, su_pass=su_pass, su_user=options.su_user, - vault_password=vault_pass + vault_password=vault_pass, + force_handlers=options.force_handlers ) if options.listhosts or options.listtasks or options.syntax: diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index ce17303bbce..779f30088cc 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -73,6 +73,7 @@ class PlayBook(object): su_user = False, su_pass = False, vault_password = False, + force_handlers = False, ): """ @@ -92,6 +93,8 @@ class PlayBook(object): sudo: if not specified per play, requests all plays use sudo mode inventory: can be specified instead of host_list to use a pre-existing inventory object check: don't change anything, just try to detect some potential changes + any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed + force_handlers: continue to notify and run handlers even if a task fails """ self.SETUP_CACHE = SETUP_CACHE @@ -140,6 +143,7 @@ class PlayBook(object): self.su_user = su_user self.su_pass = su_pass self.vault_password = vault_password + self.force_handlers = force_handlers self.callbacks.playbook = self self.runner_callbacks.playbook = self @@ -568,7 +572,7 @@ class PlayBook(object): def _run_play(self, play): ''' run a list of tasks for a given pattern, in order ''' - + self.callbacks.on_play_start(play.name) # Get the hosts for this play play._play_hosts = self.inventory.list_hosts(play.hosts) @@ -606,42 +610,9 @@ class PlayBook(object): for task in play.tasks(): + # skip handlers until play is finished if task.meta is not None: - - # meta tasks are an internalism and are not valid for end-user playbook usage - # here a meta task is a placeholder that signals handlers should be run - - if task.meta == 'flush_handlers': - fired_names = {} - for handler in play.handlers(): - if len(handler.notified_by) > 0: - self.inventory.restrict_to(handler.notified_by) - - # Resolve the variables first - handler_name = template(play.basedir, handler.name, handler.module_vars) - if handler_name not in fired_names: - self._run_task(play, handler, True) - # prevent duplicate handler includes from running more than once - fired_names[handler_name] = 1 - - host_list = self._trim_unavailable_hosts(play._play_hosts) - if handler.any_errors_fatal and len(host_list) < hosts_count: - play.max_fail_pct = 0 - if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): - host_list = None - if not host_list: - self.callbacks.on_no_hosts_remaining() - return False - - self.inventory.lift_restriction() - new_list = handler.notified_by[:] - for host in handler.notified_by: - if host in on_hosts: - while host in new_list: - new_list.remove(host) - handler.notified_by = new_list - - continue + continue # only run the task if the requested tags match should_run = False @@ -685,10 +656,58 @@ class PlayBook(object): # if no hosts remain, drop out if not host_list: - self.callbacks.on_no_hosts_remaining() + if self.force_handlers: + if not self.run_handlers(play): + return False + else: + self.callbacks.on_no_hosts_remaining() return False - - self.inventory.lift_also_restriction() + else: + self.inventory.lift_also_restriction() + if not self.run_handlers(play): + return False return True + + def run_handlers(self, play): + on_hosts = play._play_hosts + hosts_count = len(on_hosts) + for task in play.tasks(): + if task.meta is not None: + + # meta tasks are an internalism and are not valid for end-user playbook usage + # here a meta task is a placeholder that signals handlers should be run + + if task.meta == 'flush_handlers': + fired_names = {} + for handler in play.handlers(): + if len(handler.notified_by) > 0: + self.inventory.restrict_to(handler.notified_by) + + # Resolve the variables first + handler_name = template(play.basedir, handler.name, handler.module_vars) + if handler_name not in fired_names: + self._run_task(play, handler, True) + # prevent duplicate handler includes from running more than once + fired_names[handler_name] = 1 + + host_list = self._trim_unavailable_hosts(play._play_hosts) + if handler.any_errors_fatal and len(host_list) < hosts_count: + play.max_fail_pct = 0 + if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): + host_list = None + if not host_list and not self.force_handlers: + self.callbacks.on_no_hosts_remaining() + return False + + self.inventory.lift_restriction() + new_list = handler.notified_by[:] + for host in handler.notified_by: + if host in on_hosts: + while host in new_list: + new_list.remove(host) + handler.notified_by = new_list + + continue + return True From 8fce4ed7bf59794f6c1f6e4e73e98c9fba7a4cfa Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 24 Mar 2014 10:31:14 -0400 Subject: [PATCH 524/772] Update changelog for force-handlers --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6543ae45727..cba4c726e55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Major features/changes: * Accelerate improvements: - multiple users can connect with different keys, when `accelerate_multi_key = yes` is specified in the ansible.cfg. - daemon lifetime is now based on the time from the last activity, not the time from the daemon's launch. +* ansible-playbook now accepts --force-handlers to run handlers even if tasks result in failures New Modules: From 8a19d3139142b738e270d69fafe1f06a3c142528 Mon Sep 17 00:00:00 2001 From: Robert Wehner Date: Mon, 24 Mar 2014 09:06:25 -0600 Subject: [PATCH 525/772] Update 'validate=' parameter documentation to make clear that the command passed via this paramter is not passed via shell and so all shell features will not work. --- library/files/copy | 1 + library/files/lineinfile | 3 ++- library/files/template | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/library/files/copy b/library/files/copy index dbf9c71b4f6..bbf277837a8 100644 --- a/library/files/copy +++ b/library/files/copy @@ -73,6 +73,7 @@ options: description: - The validation command to run before copying into place. The path to the file to validate is passed in via '%s' which must be present as in the visudo example below. + The command is passed securely so shell features like expansion and pipes won't work. required: false default: "" version_added: "1.2" diff --git a/library/files/lineinfile b/library/files/lineinfile index bdc7b51ed90..bad0cf093ce 100644 --- a/library/files/lineinfile +++ b/library/files/lineinfile @@ -110,7 +110,8 @@ options: validate: required: false description: - - validation to run before copying into place + - validation to run before copying into place. The command is passed + securely so shell features like expansion and pipes won't work. required: false default: None version_added: "1.4" diff --git a/library/files/template b/library/files/template index 39f92c72a72..b4a964962f0 100644 --- a/library/files/template +++ b/library/files/template @@ -40,7 +40,8 @@ options: default: "no" validate: description: - - validation to run before copying into place + - validation to run before copying into place. The command is passed + securely so shell features like expansion and pipes won't work. required: false default: "" version_added: "1.2" From a4af31e51104be852d193465484ee729d0a4750b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 24 Mar 2014 12:27:32 -0500 Subject: [PATCH 526/772] Revert "add yum module downgrade support" This reverts commit cbd1da645756e6d6fbd255bde37d9be452c547fe. --- library/packaging/yum | 27 ++------------------------- 1 file changed, 2 insertions(+), 25 deletions(-) diff --git a/library/packaging/yum b/library/packaging/yum index eb8472ee0a9..aded7abbb63 100644 --- a/library/packaging/yum +++ b/library/packaging/yum @@ -29,7 +29,6 @@ import yum try: from yum.misc import find_unfinished_transactions, find_ts_remaining from rpmUtils.miscutils import splitFilename - from rpmUtils.miscutils import compareEVR transaction_helpers = True except: transaction_helpers = False @@ -40,7 +39,7 @@ module: yum version_added: historical short_description: Manages packages with the I(yum) package manager description: - - Installs, upgrades, downgrades, removes and lists packages and groups with the I(yum) package manager. + - Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager. options: name: description: @@ -464,8 +463,6 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['rc'] = 0 res['changed'] = False - downgrade = False - for spec in items: pkg = None @@ -540,32 +537,12 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): if found: continue - # downgrade - the yum install command will only install or upgrade to a spec version, it will - # not install an older version of an RPM even if specified by the install spec. So we need to - # determine if this is a downgrade, and then use the yum downgrade command to install the RPM. - split_pkg_name = splitFilename(spec) - # if the Name and Version match a version was not provided and this is not a downgrade. - if split_pkg_name[0] != split_pkg_name[1]: - pkg_name = split_pkg_name[0] - pkgs = is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) - if pkgs: - (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(pkgs[0]) - (new_name, new_ver, new_rel, new_epoch, new_arch) = splitFilename(spec) - - compare = compareEVR((cur_epoch, cur_ver, cur_rel), (new_epoch, new_ver, new_rel)) - if compare > 0: - downgrade = True - # if not - then pass in the spec as what to install # we could get here if nothing provides it but that's not # the error we're catching here pkg = spec - operation = 'install' - if downgrade: - operation = 'downgrade' - - cmd = yum_basecmd + [operation, pkg] + cmd = yum_basecmd + ['install', pkg] if module.check_mode: module.exit_json(changed=True) From b17b5ad85bddc2e80273f2f91f39987e28c28ed3 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 24 Mar 2014 13:43:00 -0400 Subject: [PATCH 527/772] Added ansible_date_time.weekday as a fact. This returns Monday,Tuesday,Wednesday,Thursday,Friday,Saturday, or Sunday depending on the remote servers time. Useful for backups the way I use it --- lib/ansible/module_utils/facts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 01f812def7b..4ed366d4ccd 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -416,6 +416,7 @@ class Facts(object): now = datetime.datetime.now() self.facts['date_time']['year'] = now.strftime('%Y') self.facts['date_time']['month'] = now.strftime('%m') + self.facts['date_time']['weekday'] = now.strftime('%A') self.facts['date_time']['day'] = now.strftime('%d') self.facts['date_time']['hour'] = now.strftime('%H') self.facts['date_time']['minute'] = now.strftime('%M') From c76501b8a3b0f1a307240222ee635035ea6c735f Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Mon, 24 Mar 2014 18:55:31 +0100 Subject: [PATCH 528/772] 'changed' key is not defined when task fails fix KeyError introduced by fix of #6591. --- lib/ansible/runner/action_plugins/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 34392ba5abd..44b8e62dda3 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -123,7 +123,7 @@ class ActionModule(object): return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant)) else: res = self.runner._execute_module(conn, tmp, 'copy', module_args, inject=inject, complex_args=complex_args) - if res.result['changed']: + if res.result.get('changed', False): res.diff = dict(before=dest_contents, after=resultant) return res else: From db345391e378a8afb54fafa3867e2466daf5a093 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 24 Mar 2014 15:10:43 -0500 Subject: [PATCH 529/772] Fixing ownership when atomic_move is creating a file while sudo'ing Fixes #6647 --- lib/ansible/module_utils/basic.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index afdbdb84ac9..64d536d2627 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -977,6 +977,8 @@ class AnsibleModule(object): if self.selinux_enabled(): context = self.selinux_default_context(dest) + creating = not os.path.exists(dest) + try: # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic. os.rename(src, dest) @@ -1008,6 +1010,9 @@ class AnsibleModule(object): self.cleanup(tmp_dest.name) self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) + if creating and os.getenv("SUDO_USER"): + os.chown(dest, os.getuid(), os.getgid()) + if self.selinux_enabled(): # rename might not preserve context self.set_context_if_different(dest, context, False) From dc658eaa1c636c47804bf70eddc55ada5e4078bf Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 24 Mar 2014 15:59:43 -0500 Subject: [PATCH 530/772] Check to make sure the host is in the inventory vars cache before clearing it Fixes #6660 --- lib/ansible/runner/action_plugins/group_by.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/group_by.py b/lib/ansible/runner/action_plugins/group_by.py index f8b4f318db2..4d6205ca60c 100644 --- a/lib/ansible/runner/action_plugins/group_by.py +++ b/lib/ansible/runner/action_plugins/group_by.py @@ -83,7 +83,8 @@ class ActionModule(object): inv_group = ansible.inventory.Group(name=group) inventory.add_group(inv_group) for host in hosts: - del self.runner.inventory._vars_per_host[host] + if host in self.runner.inventory._vars_per_host: + del self.runner.inventory._vars_per_host[host] inv_host = inventory.get_host(host) if not inv_host: inv_host = ansible.inventory.Host(name=host) From bc93732b1d6da07dc6c6d69924a7b52c0fd16266 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 24 Mar 2014 16:32:31 -0500 Subject: [PATCH 531/772] Catch permissions errors related to opening a known_hosts file in modules Fixes #6644 --- lib/ansible/module_utils/known_hosts.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index dfd684c2328..e6912d91846 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -87,9 +87,16 @@ def not_in_host_file(self, host): if not os.path.exists(hf): hfiles_not_found += 1 continue - host_fh = open(hf) - data = host_fh.read() - host_fh.close() + + try: + host_fh = open(hf) + except IOError, e: + hfiles_not_found += 1 + continue + else: + data = host_fh.read() + host_fh.close() + for line in data.split("\n"): if line is None or line.find(" ") == -1: continue From 605156c9047a07e02cfaa3c36c3a063ff17af540 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 25 Mar 2014 00:20:11 -0400 Subject: [PATCH 532/772] Fixes premature handler execution after adding force-handlers --- lib/ansible/playbook/__init__.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 779f30088cc..8728965e68d 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -601,6 +601,7 @@ class PlayBook(object): play_hosts.append(all_hosts.pop()) serialized_batch.append(play_hosts) + task_errors = False for on_hosts in serialized_batch: # restrict the play to just the hosts we have in our on_hosts block that are @@ -657,16 +658,18 @@ class PlayBook(object): # if no hosts remain, drop out if not host_list: if self.force_handlers: - if not self.run_handlers(play): - return False + task_errors == True + break else: - self.callbacks.on_no_hosts_remaining() - return False - else: - self.inventory.lift_also_restriction() - if not self.run_handlers(play): return False + if task_errors and not self.force_handlers: + return False + else: + self.inventory.lift_also_restriction() + if not self.run_handlers(play): + return False + return True From 03040a97c8905957b13fa10d6195202e801f3204 Mon Sep 17 00:00:00 2001 From: Alexander Winkler Date: Tue, 25 Mar 2014 09:03:48 +0000 Subject: [PATCH 533/772] svr4pkg supports the -G switch for pkgadd --- library/packaging/svr4pkg | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/library/packaging/svr4pkg b/library/packaging/svr4pkg index 485e7ebcbfe..51c79a85847 100644 --- a/library/packaging/svr4pkg +++ b/library/packaging/svr4pkg @@ -57,6 +57,13 @@ options: description: - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) required: false + zone: + description: + - Whether to install the package only in the current zone, or install it into all zones. + - The installation into all zones works only if you are working with the global zone. + required: false + default: "all" + choices: ["current", "all"] ''' EXAMPLES = ''' @@ -64,7 +71,7 @@ EXAMPLES = ''' - svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present # Install a package directly from an http site -- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present +- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current # Install a package with a response file - svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present @@ -116,9 +123,12 @@ def run_command(module, cmd): cmd[0] = module.get_bin_path(progname, True) return module.run_command(cmd) -def package_install(module, name, src, proxy, response_file): +def package_install(module, name, src, proxy, response_file, zone): adminfile = create_admin_file() - cmd = [ 'pkgadd', '-na', adminfile, '-d', src ] + cmd = [ 'pkgadd', '-n'] + if zone == 'current': + cmd += [ '-G' ] + cmd += [ '-a', adminfile, '-d', src ] if proxy is not None: cmd += [ '-x', proxy ] if response_file is not None: @@ -142,7 +152,8 @@ def main(): state = dict(required = True, choices=['present', 'absent']), src = dict(default = None), proxy = dict(default = None), - response_file = dict(default = None) + response_file = dict(default = None), + zone = dict(required=False, default = 'all', choices=['current','all']) ), supports_check_mode=True ) @@ -151,6 +162,7 @@ def main(): src = module.params['src'] proxy = module.params['proxy'] response_file = module.params['response_file'] + zone = module.params['zone'] rc = None out = '' err = '' @@ -165,7 +177,7 @@ def main(): if not package_installed(module, name): if module.check_mode: module.exit_json(changed=True) - (rc, out, err) = package_install(module, name, src, proxy, response_file) + (rc, out, err) = package_install(module, name, src, proxy, response_file, zone) # Stdout is normally empty but for some packages can be # very long and is not often useful if len(out) > 75: From 09f6e7222e683e49694ddc056a732b31836cd1ae Mon Sep 17 00:00:00 2001 From: apenav Date: Tue, 25 Mar 2014 10:32:06 +0100 Subject: [PATCH 534/772] Typo: assign not equal in boolean guard variable Typo when variable controlling list of hosts left as available --- lib/ansible/playbook/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 8728965e68d..bee4cd6745f 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -658,7 +658,7 @@ class PlayBook(object): # if no hosts remain, drop out if not host_list: if self.force_handlers: - task_errors == True + task_errors = True break else: return False From ef6b437d0d00c50c0c9b012c70aa1293c2a94374 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Tue, 25 Mar 2014 14:50:29 +0100 Subject: [PATCH 535/772] Fix TypeError when using old simplejson lib. On some very old simplejson does not support the 'encoding' and give following exception: TypeError: __init__() got an unexpected keyword argument 'encoding' This fix runs json.dump with no encoding key (such as before #a023cb) on TypeError exception only. --- lib/ansible/module_utils/basic.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 64d536d2627..2dcb9cd5458 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -884,6 +884,9 @@ class AnsibleModule(object): for encoding in ("utf-8", "latin-1", "unicode_escape"): try: return json.dumps(data, encoding=encoding) + # Old systems using simplejson module does not support encoding keyword. + except TypeError, e: + return json.dumps(data) except UnicodeDecodeError, e: continue self.fail_json(msg='Invalid unicode encoding encountered') From ebd1f70c5da643fee79356066a284f4d2df57409 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 25 Mar 2014 10:35:05 -0400 Subject: [PATCH 536/772] Typo in setting task_errors variable --- lib/ansible/playbook/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 8728965e68d..bee4cd6745f 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -658,7 +658,7 @@ class PlayBook(object): # if no hosts remain, drop out if not host_list: if self.force_handlers: - task_errors == True + task_errors = True break else: return False From 08ba18668b2822e9cceed5078693dccc2e98cdc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Pawe=C5=82=20G=C5=82azik?= Date: Tue, 25 Mar 2014 16:10:04 +0100 Subject: [PATCH 537/772] Strip() vault password file --- bin/ansible-playbook | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index be178a6565c..9ab89c6f1e2 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -122,7 +122,7 @@ def main(args): this_path = os.path.expanduser(options.vault_password_file) try: f = open(this_path, "rb") - tmp_vault_pass=f.read() + tmp_vault_pass=f.read().strip() f.close() except (OSError, IOError), e: raise errors.AnsibleError("Could not read %s: %s" % (this_path, e)) From 6b53ac8a9ae4398f9a989e5968d9db311ef4b981 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Pawe=C5=82=20G=C5=82azik?= Date: Tue, 25 Mar 2014 16:10:33 +0100 Subject: [PATCH 538/772] strip() vault password file --- bin/ansible | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible b/bin/ansible index 0189355ddbf..426670e1faf 100755 --- a/bin/ansible +++ b/bin/ansible @@ -128,7 +128,7 @@ class Cli(object): this_path = os.path.expanduser(options.vault_password_file) try: f = open(this_path, "rb") - tmp_vault_pass=f.read() + tmp_vault_pass=f.read().strip() f.close() except (OSError, IOError), e: raise errors.AnsibleError("Could not read %s: %s" % (this_path, e)) From 94297950a0d55c5007dd65dd4ac556d530ba5828 Mon Sep 17 00:00:00 2001 From: Herby Gillot Date: Tue, 25 Mar 2014 11:20:19 -0400 Subject: [PATCH 539/772] Name platform.node() fact as "nodename" --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index dd6d76ff63d..3eaac69ccaf 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -108,7 +108,7 @@ class Facts(object): self.facts['python_version'] = platform.python_version() self.facts['fqdn'] = socket.getfqdn() self.facts['hostname'] = platform.node().split('.')[0] - self.facts['full_hostname'] = platform.node() + self.facts['nodename'] = platform.node() self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:]) arch_bits = platform.architecture()[0] self.facts['userspace_bits'] = arch_bits.replace('bit', '') From f2c06cadc36b0d7465f0e320b1c372d4634b85de Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 10:39:12 -0500 Subject: [PATCH 540/772] Cleanup from merge of replace module and CHANGELOG update --- CHANGELOG.md | 1 + library/files/replace | 2 +- test/known_hosts.txt | 4 ---- 3 files changed, 2 insertions(+), 5 deletions(-) delete mode 100644 test/known_hosts.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index cba4c726e55..553afc77c88 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Major features/changes: New Modules: +* files: replace * packaging: cpanm (Perl) * packaging: portage * packaging: composer (PHP) diff --git a/library/files/replace b/library/files/replace index b008d1b39db..f4193ae9f30 100644 --- a/library/files/replace +++ b/library/files/replace @@ -32,7 +32,7 @@ description: - This module will replace all instances of a pattern within a file. - It is up to the user to maintain idempotence by ensuring that the same pattern would never match any replacements made. -version_added: "1.4" +version_added: "1.6" options: dest: required: true diff --git a/test/known_hosts.txt b/test/known_hosts.txt deleted file mode 100644 index 70fcf353633..00000000000 --- a/test/known_hosts.txt +++ /dev/null @@ -1,4 +0,0 @@ -alpha.example.com,10.11.12.13 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== -bravo.example.com,10.11.12.14 ssh-rsa AAAAB3NzaC1yom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrvic2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9TjQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMcAW4OZPnTPI89ZPmVMLuayrD2cE86Z/iliLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== -charlie.example.com,10.11.12.15 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== -10.11.12.13 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== From 846933dd531b58c2c82d5cb762d72130ebcb4581 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 11:41:00 -0500 Subject: [PATCH 541/772] Make inventory parsing error detection smarter If something is executable but doesn't look like it should be, or if something is NOT executable and DOES looks like it should, show a more apropos error with a hint on correcting the problem Fixes #5113 --- lib/ansible/inventory/__init__.py | 38 +++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index b6f644190f1..a8cca8faaf2 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -99,12 +99,40 @@ class Inventory(object): self.host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(filename=host_list) self.groups = self.parser.groups.values() - elif utils.is_executable(host_list): - self.parser = InventoryScript(filename=host_list) - self.groups = self.parser.groups.values() else: - self.parser = InventoryParser(filename=host_list) - self.groups = self.parser.groups.values() + # check to see if the specified file starts with a + # shebang (#!/), so if an error is raised by the parser + # class we can show a more apropos error + shebang_present = False + try: + inv_file = open(host_list) + first_line = inv_file.readlines()[0] + inv_file.close() + if first_line.find('#!') == 0: + shebang_present = True + except: + pass + + if utils.is_executable(host_list): + try: + self.parser = InventoryScript(filename=host_list) + self.groups = self.parser.groups.values() + except: + if not shebang_present: + raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \ + "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list) + else: + raise + else: + try: + self.parser = InventoryParser(filename=host_list) + self.groups = self.parser.groups.values() + except: + if shebang_present: + raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \ + "Perhaps you want to correct this with `chmod +x %s`?" % host_list) + else: + raise utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True) else: From 8c7828d4690d79d2487ba60c5bf0197dd29ba675 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 11:56:55 -0500 Subject: [PATCH 542/772] Tweaking error message resulting from undefined template variables Fixes #5114 --- lib/ansible/utils/template.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 0b86954e882..2f79d6ca591 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -338,7 +338,10 @@ def template_from_string(basedir, data, vars, fail_on_undefined=False): res = jinja2.utils.concat(rf) except TypeError, te: if 'StrictUndefined' in str(te): - raise errors.AnsibleUndefinedVariable("unable to look up a name or access an attribute in template string") + raise errors.AnsibleUndefinedVariable( + "Unable to look up a name or access an attribute in template string. " + \ + "Make sure your variable name does not contain invalid characters like '-'." + ) else: raise errors.AnsibleError("an unexpected type error occured. Error was %s" % te) return res From f9018a6f1d0c076da6f904b92e559130fb791044 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 25 Mar 2014 13:32:11 -0400 Subject: [PATCH 543/772] Ensure handlers run when meta tasks are defined and add handler integration tests Fixes #6678 Fixes #6670 --- lib/ansible/playbook/__init__.py | 83 ++++++++++--------- test/integration/Makefile | 5 +- test/integration/inventory.handlers | 6 ++ .../test_handlers_meta/handlers/main.yml | 7 ++ .../roles/test_handlers_meta/meta/main.yml | 3 + .../roles/test_handlers_meta/tasks/main.yml | 41 +++++++++ test/integration/test_handlers.yml | 24 ++++++ 7 files changed, 130 insertions(+), 39 deletions(-) create mode 100644 test/integration/inventory.handlers create mode 100644 test/integration/roles/test_handlers_meta/handlers/main.yml create mode 100644 test/integration/roles/test_handlers_meta/meta/main.yml create mode 100644 test/integration/roles/test_handlers_meta/tasks/main.yml create mode 100644 test/integration/test_handlers.yml diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index bee4cd6745f..f624be1b297 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -611,8 +611,12 @@ class PlayBook(object): for task in play.tasks(): - # skip handlers until play is finished if task.meta is not None: + # meta tasks can force handlers to run mid-play + if task.meta == 'flush_handlers': + self.run_handlers(play) + + # skip calling the handler till the play is finished continue # only run the task if the requested tags match @@ -661,14 +665,20 @@ class PlayBook(object): task_errors = True break else: + self.callbacks.on_no_hosts_remaining() return False - if task_errors and not self.force_handlers: - return False - else: + # lift restrictions after each play finishes self.inventory.lift_also_restriction() - if not self.run_handlers(play): + + if task_errors and not self.force_handlers: + # if there were failed tasks and handler execution + # is not forced, quit the play with an error return False + else: + # no errors, go ahead and execute all handlers + if not self.run_handlers(play): + return False return True @@ -679,38 +689,35 @@ class PlayBook(object): for task in play.tasks(): if task.meta is not None: - # meta tasks are an internalism and are not valid for end-user playbook usage - # here a meta task is a placeholder that signals handlers should be run - - if task.meta == 'flush_handlers': - fired_names = {} - for handler in play.handlers(): - if len(handler.notified_by) > 0: - self.inventory.restrict_to(handler.notified_by) - - # Resolve the variables first - handler_name = template(play.basedir, handler.name, handler.module_vars) - if handler_name not in fired_names: - self._run_task(play, handler, True) - # prevent duplicate handler includes from running more than once - fired_names[handler_name] = 1 - - host_list = self._trim_unavailable_hosts(play._play_hosts) - if handler.any_errors_fatal and len(host_list) < hosts_count: - play.max_fail_pct = 0 - if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): - host_list = None - if not host_list and not self.force_handlers: - self.callbacks.on_no_hosts_remaining() - return False - - self.inventory.lift_restriction() - new_list = handler.notified_by[:] - for host in handler.notified_by: - if host in on_hosts: - while host in new_list: - new_list.remove(host) - handler.notified_by = new_list + fired_names = {} + for handler in play.handlers(): + if len(handler.notified_by) > 0: + self.inventory.restrict_to(handler.notified_by) + + # Resolve the variables first + handler_name = template(play.basedir, handler.name, handler.module_vars) + if handler_name not in fired_names: + self._run_task(play, handler, True) + # prevent duplicate handler includes from running more than once + fired_names[handler_name] = 1 + + host_list = self._trim_unavailable_hosts(play._play_hosts) + if handler.any_errors_fatal and len(host_list) < hosts_count: + play.max_fail_pct = 0 + if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): + host_list = None + if not host_list and not self.force_handlers: + self.callbacks.on_no_hosts_remaining() + return False + + self.inventory.lift_restriction() + new_list = handler.notified_by[:] + for host in handler.notified_by: + if host in on_hosts: + while host in new_list: + new_list.remove(host) + handler.notified_by = new_list + + continue - continue return True diff --git a/test/integration/Makefile b/test/integration/Makefile index ad5e62a91d7..da2758c1406 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -14,7 +14,7 @@ else CREDENTIALS_ARG = endif -all: non_destructive destructive check_mode test_hash +all: non_destructive destructive check_mode test_hash test_handlers non_destructive: ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) @@ -25,6 +25,9 @@ destructive: check_mode: ansible-playbook check_mode.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v --check $(TEST_FLAGS) +test_handlers: + ansible-playbook test_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + test_hash: ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' diff --git a/test/integration/inventory.handlers b/test/integration/inventory.handlers new file mode 100644 index 00000000000..905026f12ef --- /dev/null +++ b/test/integration/inventory.handlers @@ -0,0 +1,6 @@ +[testgroup] +A +B +C +D +E diff --git a/test/integration/roles/test_handlers_meta/handlers/main.yml b/test/integration/roles/test_handlers_meta/handlers/main.yml new file mode 100644 index 00000000000..634e6eca2ad --- /dev/null +++ b/test/integration/roles/test_handlers_meta/handlers/main.yml @@ -0,0 +1,7 @@ +- name: set_handler_fact_1 + set_fact: + handler1_called: True + +- name: set_handler_fact_2 + set_fact: + handler2_called: True diff --git a/test/integration/roles/test_handlers_meta/meta/main.yml b/test/integration/roles/test_handlers_meta/meta/main.yml new file mode 100644 index 00000000000..1050c23ce30 --- /dev/null +++ b/test/integration/roles/test_handlers_meta/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + diff --git a/test/integration/roles/test_handlers_meta/tasks/main.yml b/test/integration/roles/test_handlers_meta/tasks/main.yml new file mode 100644 index 00000000000..047b61ce886 --- /dev/null +++ b/test/integration/roles/test_handlers_meta/tasks/main.yml @@ -0,0 +1,41 @@ +# test code for the async keyword +# (c) 2014, James Tanner + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: notify the first handler + shell: echo + notify: + - set_handler_fact_1 + +- name: force handler execution now + meta: "flush_handlers" + +- name: assert handler1 ran and not handler2 + assert: + that: + - "handler1_called is defined" + - "handler2_called is not defined" + +- name: reset handler1_called + set_fact: + handler1_called: False + +- name: notify the second handler + shell: echo + notify: + - set_handler_fact_2 + diff --git a/test/integration/test_handlers.yml b/test/integration/test_handlers.yml new file mode 100644 index 00000000000..dd766a9deaf --- /dev/null +++ b/test/integration/test_handlers.yml @@ -0,0 +1,24 @@ +--- +- name: run handlers + hosts: A + gather_facts: False + connection: local + roles: + - { role: test_handlers_meta } + +- name: verify final handler was run + hosts: A + gather_facts: False + connection: local + tasks: + - name: verify handler2 ran + assert: + that: + - "not hostvars[inventory_hostname]['handler1_called']" + - "'handler2_called' in hostvars[inventory_hostname]" + +#- hosts: testgroup +# gather_facts: False +# connection: local +# roles: +# - { role: test_handlers_meta } From 4ea12c1b86915620b515fb5530cc9f8bb4f1de67 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 13:00:38 -0500 Subject: [PATCH 544/772] Make sure stat of dest is available in atomic_move Fixes #6682 --- lib/ansible/module_utils/basic.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 2dcb9cd5458..4bdb3fc2cc5 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -966,11 +966,12 @@ class AnsibleModule(object): it uses os.rename to ensure this as it is an atomic operation, rest of the function is to work around limitations, corner cases and ensure selinux context is saved if possible''' context = None + dest_stat = None if os.path.exists(dest): try: - st = os.stat(dest) - os.chmod(src, st.st_mode & 07777) - os.chown(src, st.st_uid, st.st_gid) + dest_stat = os.stat(dest) + os.chmod(src, dest_stat.st_mode & 07777) + os.chown(src, dest_stat.st_uid, dest_stat.st_gid) except OSError, e: if e.errno != errno.EPERM: raise @@ -1005,9 +1006,8 @@ class AnsibleModule(object): if self.selinux_enabled(): self.set_context_if_different( tmp_dest.name, context, False) - # Reset owners, they are not preserved by shutil.copy2(), which - # is what shutil.move() falls back to. - os.chown(tmp_dest.name, st.st_uid, st.st_gid) + if dest_stat: + os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid) os.rename(tmp_dest.name, dest) except (shutil.Error, OSError, IOError), e: self.cleanup(tmp_dest.name) From c9bf7eb9bbff4e54398195256a1bcf86b3259cc4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 13:59:57 -0500 Subject: [PATCH 545/772] Don't run {failed|changed}_when checks until async_status is done Fixes #5117 --- lib/ansible/runner/__init__.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 3865b9c0b88..51655388aaf 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -849,16 +849,19 @@ class Runner(object): changed_when = self.module_vars.get('changed_when') failed_when = self.module_vars.get('failed_when') - if changed_when is not None or failed_when is not None: + if (changed_when is not None or failed_when is not None) and self.background == 0: register = self.module_vars.get('register') - if register is not None: + if register is not None: if 'stdout' in data: data['stdout_lines'] = data['stdout'].splitlines() inject[register] = data - if changed_when is not None: - data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) - if failed_when is not None: - data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) + # only run the final checks if the async_status has finished, + # or if we're not running an async_status check at all + if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status': + if changed_when is not None: + data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) + if failed_when is not None: + data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) if is_chained: # no callbacks From c79c001bfb626eefbc1f43df44f0def0f56a5101 Mon Sep 17 00:00:00 2001 From: Ramon de la Fuente Date: Wed, 19 Mar 2014 14:35:11 +0100 Subject: [PATCH 546/772] Changed call to EDITOR to allow for parameters The EDITOR environment variable is used to create and edit files in the vault. But if the EDITOR variable contains parameters, subprocess.call() breaks. This fixes the EDITOR environment variable to be safely split into a list. It adds a dependency on shlex. --- lib/ansible/utils/vault.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 62b082a9af4..265bf7a053e 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -19,6 +19,7 @@ # installs ansible and sets it up to run on cron. import os +import shlex import shutil import tempfile from io import BytesIO @@ -189,8 +190,7 @@ class VaultEditor(object): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) # drop the user into vim on file - EDITOR = os.environ.get('EDITOR','vim') - call([EDITOR, self.filename]) + call(self._editor_shell_command(self.filename)) tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) this_vault.cipher_name = self.cipher_name @@ -226,8 +226,7 @@ class VaultEditor(object): self.write_data(dec_data, tmp_path) # drop the user into vim on the tmp file - EDITOR = os.environ.get('EDITOR','vim') - call([EDITOR, tmp_path]) + call(self._editor_shell_command(tmp_path)) new_data = self.read_data(tmp_path) # create new vault @@ -299,6 +298,13 @@ class VaultEditor(object): os.remove(dest) shutil.move(src, dest) + def _editor_shell_command(self, filename): + EDITOR = os.environ.get('EDITOR','vim') + editor = shlex.split(EDITOR) + editor.append(filename) + + return editor + ######################################## # CIPHERS # ######################################## From 3194fbd36577d1612513cff2b9bfa72052877a92 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 25 Mar 2014 16:07:05 -0400 Subject: [PATCH 547/772] Fixes #6655 catch unicode encoding errors before sending to syslog --- lib/ansible/module_utils/basic.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 4bdb3fc2cc5..7f7054fc478 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -804,6 +804,12 @@ class AnsibleModule(object): else: msg = 'Invoked' + # 6655 - allow for accented characters + try: + msg = unicode(msg).encode('utf8') + except UnicodeDecodeError, e: + pass + if (has_journal): journal_args = ["MESSAGE=%s %s" % (module, msg)] journal_args.append("MODULE=%s" % os.path.basename(__file__)) @@ -814,10 +820,10 @@ class AnsibleModule(object): except IOError, e: # fall back to syslog since logging to journal failed syslog.openlog(str(module), 0, syslog.LOG_USER) - syslog.syslog(syslog.LOG_NOTICE, unicode(msg).encode('utf8')) + syslog.syslog(syslog.LOG_NOTICE, msg) #1 else: syslog.openlog(str(module), 0, syslog.LOG_USER) - syslog.syslog(syslog.LOG_NOTICE, unicode(msg).encode('utf8')) + syslog.syslog(syslog.LOG_NOTICE, msg) #2 def _set_cwd(self): try: From 22e6293e8b19497cd918468a79a4fedd84713954 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 25 Mar 2014 18:38:34 -0400 Subject: [PATCH 548/772] Update CHANGELOG.md Correct module name in changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 553afc77c88..46a0f212ef9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,7 +36,7 @@ New Modules: * cloud: rax_identity * cloud: ec2_asg (configure autoscaling groups) * cloud: ec2_scaling_policy -* cloud: ec2_metricalarm +* cloud: ec2_metric_alarm Other notable changes: From 18d82d1eb6f20590e06aeaf144dc5eeaa19a4515 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 19:29:55 -0500 Subject: [PATCH 549/772] Documentation/examples updates for new 1.6 accelerate options Fixes #6692 --- docsite/rst/intro_configuration.rst | 25 +++++++++++++++++++++++++ examples/ansible.cfg | 11 +++++++++++ 2 files changed, 36 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 3313cb275b1..6304a0d350b 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -639,4 +639,29 @@ This setting controls the timeout for the socket connect call, and should be kep Note, this value can be set to less than one second, however it is probably not a good idea to do so unless you're on a very fast and reliable LAN. If you're connecting to systems over the internet, it may be necessary to increase this timeout. +.. _accelerate_daemon_timeout: + +accelerate_daemon_timeout +========================= + +.. versionadded:: 1.6 + +This setting controls the timeout for the accelerated daemon, as measured in minutes. The default daemon timeout is 30 minutes:: + + accelerate_daemon_timeout = 30 + +Note, prior to 1.6, the timeout was hard-coded from the time of the daemon's launch. For version 1.6+, the timeout is now based on the last activity to the daemon and is configurable via this option. + +.. _accelerate_multi_key: + +accelerate_multi_key +==================== + +.. versionadded:: 1.6 + +If enabled, this setting allows multiple private keys to be uploaded to the daemon. Any clients connecting to the daemon must also enable this option:: + + accelerate_multi_key = yes + +New clients first connect to the target node over SSH to upload the key, which is done via a local socket file, so they must have the same access as the user that launched the daemon originally. diff --git a/examples/ansible.cfg b/examples/ansible.cfg index a4fc4c55aca..6e297d4f0e4 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -168,3 +168,14 @@ filter_plugins = /usr/share/ansible_plugins/filter_plugins accelerate_port = 5099 accelerate_timeout = 30 accelerate_connect_timeout = 5.0 + +# The daemon timeout is measured in minutes. This time is measured +# from the last activity to the accelerate daemon. +accelerate_daemon_timeout = 30 + +# If set to yes, accelerate_multi_key will allow multiple +# private keys to be uploaded to it, though each user must +# have access to the system via SSH to add a new key. The default +# is "no". +#accelerate_multi_key = yes + From 7a3fa63db5cb12820ceb97ca100c340afacbab5c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 20:45:04 -0500 Subject: [PATCH 550/772] Fix for unicode environment variable handling Fixes #5395 --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 51655388aaf..85f4b5d0678 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -291,7 +291,7 @@ class Runner(object): raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro) result = "" for (k,v) in enviro.iteritems(): - result = "%s=%s %s" % (k, pipes.quote(str(v)), result) + result = "%s=%s %s" % (k, pipes.quote(unicode(v)), result) return result # ***************************************************** From b3cdcbff75963abb9be6d792aaa2bb0cbfa65994 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 25 Mar 2014 22:21:31 -0500 Subject: [PATCH 551/772] Adding in a test for subversion export option --- test/integration/roles/test_subversion/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/integration/roles/test_subversion/tasks/main.yml b/test/integration/roles/test_subversion/tasks/main.yml index 22503de35c8..1b2d26529da 100644 --- a/test/integration/roles/test_subversion/tasks/main.yml +++ b/test/integration/roles/test_subversion/tasks/main.yml @@ -90,6 +90,10 @@ - debug: var=subverted3 +- name: checkout with export + subversion: repo={{ repo }} dest={{ checkout_dir }} export=True + register: subverted4 + # FIXME: this needs to be fixed in the code see GitHub 6079 #- name: verify on a reclone things are marked unchanged From 69e09b048c001a0e7c862c7a950265e26676500f Mon Sep 17 00:00:00 2001 From: Alexander Winkler Date: Wed, 26 Mar 2014 10:25:42 +0000 Subject: [PATCH 552/772] version_added for the new parameters documentation --- library/packaging/svr4pkg | 1 + 1 file changed, 1 insertion(+) diff --git a/library/packaging/svr4pkg b/library/packaging/svr4pkg index 51c79a85847..58961631b2b 100644 --- a/library/packaging/svr4pkg +++ b/library/packaging/svr4pkg @@ -64,6 +64,7 @@ options: required: false default: "all" choices: ["current", "all"] + version_added: "1.6" ''' EXAMPLES = ''' From 12c84dd6bdbf4dcd8f561d1884c43bb2b2c3f5e5 Mon Sep 17 00:00:00 2001 From: jjshoe Date: Wed, 26 Mar 2014 08:32:55 -0500 Subject: [PATCH 553/772] Show argument dependency failed: [192.168.1.2] => {"cmd": "rsync --delay-updates -FF --compress --timeout=10 --delete-after --rsh 'ssh -i /home/jjshoe/.vagrant.d/insecure_private_key -o StrictHostKeyChecking=no' --rsync-path 'sudo rsync' --out-format='<>%i %n%L' /tmp/app vagrant@192.168.1.2:/home/ubuntu/app", "failed": true, "item": "", "rc": 1} msg: rsync: --delete does not work without --recursive (-r) or --dirs (-d). rsync error: syntax or usage error (code 1) at main.c(1453) [client=3.0.9] --- library/files/synchronize | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/synchronize b/library/files/synchronize index 1401a326fa1..3409c6f0300 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -58,7 +58,7 @@ options: version_added: "1.5" delete: description: - - Delete files that don't exist (after transfer, not before) in the C(src) path. + - Delete files that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes). choices: [ 'yes', 'no' ] default: 'no' required: false From c900254ed0700d1183c667b883df16795873440b Mon Sep 17 00:00:00 2001 From: Hagai Date: Tue, 4 Mar 2014 21:37:15 +0200 Subject: [PATCH 554/772] ec2_snapshot: Add `wait' and `snapshot_tags' parameters, return more info on success --- library/cloud/ec2_snapshot | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index 2b8a9dabba6..1bf4f8b509e 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -124,6 +124,8 @@ def main(): ec2_url = dict(), ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), + wait = dict(choices=BOOLEANS, default='true'), + snapshot_tags = dict(type='dict', default=dict()), ) ) @@ -131,6 +133,8 @@ def main(): description = module.params.get('description') instance_id = module.params.get('instance_id') device_name = module.params.get('device_name') + wait = module.params.get('wait') + snapshot_tags = module.params.get('snapshot_tags') if not volume_id and not instance_id or volume_id and instance_id: module.fail_json('One and only one of volume_id or instance_id must be specified') @@ -150,10 +154,18 @@ def main(): try: snapshot = ec2.create_snapshot(volume_id, description=description) + if wait: + snapshot.update() + while snapshot.status != 'completed': + time.sleep(3) + snapshot.update() + for k, v in snapshot_tags.items(): + snapshot.add_tag(k, v) except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - module.exit_json(changed=True, snapshot_id=snapshot.id) + module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id, + volume_size=snapshot.volume_size, tags=snapshot.tags.copy()) # import module snippets from ansible.module_utils.basic import * From 436b59252c2b3eff79cb40c44c68b189f46c038b Mon Sep 17 00:00:00 2001 From: Hagai Date: Mon, 10 Mar 2014 19:15:01 +0200 Subject: [PATCH 555/772] Added wait_timeout option --- library/cloud/ec2_snapshot | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index 1bf4f8b509e..0b499e47765 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -125,6 +125,7 @@ def main(): ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), wait = dict(choices=BOOLEANS, default='true'), + wait_timeout = dict(type='number', default=0), snapshot_tags = dict(type='dict', default=dict()), ) ) @@ -134,6 +135,7 @@ def main(): instance_id = module.params.get('instance_id') device_name = module.params.get('device_name') wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') snapshot_tags = module.params.get('snapshot_tags') if not volume_id and not instance_id or volume_id and instance_id: @@ -154,11 +156,15 @@ def main(): try: snapshot = ec2.create_snapshot(volume_id, description=description) + time_waited = 0 if wait: snapshot.update() while snapshot.status != 'completed': time.sleep(3) snapshot.update() + time_waited += 3 + if wait_timeout and time_waited > wait_timeout: + module.fail_json('Timed out while creating snapshot.') for k, v in snapshot_tags.items(): snapshot.add_tag(k, v) except boto.exception.BotoServerError, e: From f1ab5109ea0624ae83f58062c26b4395f282416d Mon Sep 17 00:00:00 2001 From: Chris Conway Date: Tue, 25 Mar 2014 20:18:22 -0700 Subject: [PATCH 556/772] Fixes minor typos in the GCE guide, one of which causes a code block to fail to render. --- docsite/rst/guide_gce.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index b6313652560..f9e498ac0aa 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -88,11 +88,11 @@ The best way to interact with your hosts is to use the gce inventory plugin, whi Note that when using the inventory script ``gce.py``, you also need to populate the ``gce.ini`` file that you can find in the plugins/inventory directory of the ansible checkout. -To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugings/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script. +To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugins/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script. Let's see if inventory is working: -.. code-block: bash +.. code-block:: bash $ ./gce.py --list From 2a976ac3137e3fb0a8cdbc68769b92e70bd460dc Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 26 Mar 2014 11:28:43 -0400 Subject: [PATCH 557/772] Fixes #5059 Ignore lookup failures when templating a task name --- lib/ansible/utils/template.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 2f79d6ca591..3f26f3f9c0f 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -88,8 +88,14 @@ def lookup(name, *args, **kwargs): vars = kwargs.get('vars', None) if instance is not None: - ran = instance.run(*args, inject=vars, **kwargs) - return ",".join(ran) + # safely catch run failures per #5059 + try: + ran = instance.run(*args, inject=vars, **kwargs) + except Exception, e: + ran = None + if ran: + ran = ",".join(ran) + return ran else: raise errors.AnsibleError("lookup plugin (%s) not found" % name) From 9ede6f7f494d83cf95b11bb46a8eb4a8234c0a48 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 26 Mar 2014 12:01:15 -0500 Subject: [PATCH 558/772] Convert gather_facts to a boolean value if it is not None Fixes #5618 --- lib/ansible/playbook/play.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 2da555bd0f9..68c5dbf9ae2 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -117,7 +117,6 @@ class Play(object): self.sudo = ds.get('sudo', self.playbook.sudo) self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user) self.transport = ds.get('connection', self.playbook.transport) - self.gather_facts = ds.get('gather_facts', None) self.remote_port = self.remote_port self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false')) self.accelerate = utils.boolean(ds.get('accelerate', 'false')) @@ -128,6 +127,13 @@ class Play(object): self.su_user = ds.get('su_user', self.playbook.su_user) #self.vault_password = vault_password + # gather_facts is not a simple boolean, as None means that a 'smart' + # fact gathering mode will be used, so we need to be careful here as + # calling utils.boolean(None) returns False + self.gather_facts = ds.get('gather_facts', None) + if self.gather_facts: + self.gather_facts = utils.boolean(self.gather_facts) + # Fail out if user specifies a sudo param with a su param in a given play if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')): raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ' From 8991e403e849843720cf6a9459fccbb46a50324d Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 26 Mar 2014 13:29:34 -0400 Subject: [PATCH 559/772] Fixes #6705 Give each role a unique uuid and apply tags only to tasks matching the uuid --- lib/ansible/playbook/play.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 68c5dbf9ae2..631595eaa2a 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -26,6 +26,7 @@ import pipes import shlex import os import sys +import uuid class Play(object): @@ -363,6 +364,13 @@ class Play(object): new_tasks.append(dict(meta='flush_handlers')) roles = self._build_role_dependencies(roles, [], self.vars) + + # give each role a uuid + for idx, val in enumerate(roles): + this_uuid = str(uuid.uuid4()) + roles[idx][0]['role_uuid'] = this_uuid + roles[idx][-2]['role_uuid'] = this_uuid + role_names = [] for (role,role_path,role_vars,default_vars) in roles: @@ -725,21 +733,21 @@ class Play(object): role_tags = {} for task in self._ds['tasks']: if 'role_name' in task: - this_role = task['role_name'] + this_role = task['role_name'] + "-" + task['vars']['role_uuid'] if this_role not in role_tags: role_tags[this_role] = [] if 'tags' in task['vars']: if isinstance(task['vars']['tags'], basestring): - role_tags[task['role_name']] += shlex.split(task['vars']['tags']) + role_tags[this_role] += shlex.split(task['vars']['tags']) else: - role_tags[task['role_name']] += task['vars']['tags'] + role_tags[this_role] += task['vars']['tags'] # apply each role's tags to it's tasks for idx, val in enumerate(self._tasks): - if hasattr(val, 'role_name'): - this_role = val.role_name + if getattr(val, 'role_name', None) is not None: + this_role = val.role_name + "-" + val.module_vars['role_uuid'] if this_role in role_tags: self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role])) From 8960cba4a31bb3ecf6d397f251a875b0097f6c15 Mon Sep 17 00:00:00 2001 From: Jeff Geerling Date: Wed, 26 Mar 2014 12:42:15 -0500 Subject: [PATCH 560/772] Fixed template validation docs and example. --- library/files/template | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/files/template b/library/files/template index 39f92c72a72..3f2ea5bbf5e 100644 --- a/library/files/template +++ b/library/files/template @@ -40,7 +40,7 @@ options: default: "no" validate: description: - - validation to run before copying into place + - The validation command to run before copying into place. The path to the file to validate is passed in via '%s' which must be present as in the visudo example below. (added in Ansible 1.2) required: false default: "" version_added: "1.2" @@ -63,6 +63,6 @@ EXAMPLES = ''' # Example from Ansible Playbooks - template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644 -# Copy a new "sudoers file into place, after passing validation with visudo -- action: template src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' +# Copy a new "sudoers" file into place, after passing validation with visudo +- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' ''' From e093b216ffd44036a00c8862193e34188cd0861b Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 5 Mar 2014 12:38:28 -0600 Subject: [PATCH 561/772] Support arbitrary extra args in rax module Add extra_create_args and extra_client_args to rax module to support passing advanced configuration options to client instantiation and server create calls. --- library/cloud/rax | 40 +++++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/library/cloud/rax b/library/cloud/rax index c566206b403..0798ce4fd94 100644 --- a/library/cloud/rax +++ b/library/cloud/rax @@ -98,6 +98,17 @@ options: state=active/present default: no version_added: 1.4 + extra_client_args: + description: + - A hash of key/value pairs to be used when creating the cloudservers + client. This is considered an advanced option, use it wisely and + with caution. + version_added: 1.6 + extra_create_args: + description: + - A hash of key/value pairs to be used when creating a new server. + This is considered an advanced option, use it wisely and with caution. + version_added: 1.6 files: description: - Files to insert into the instance. remotefilename:localcontent @@ -246,7 +257,8 @@ def pyrax_object_to_dict(obj): def create(module, names, flavor, image, meta, key_name, files, - wait, wait_timeout, disk_config, group, nics): + wait, wait_timeout, disk_config, group, nics, + extra_create_args): cs = pyrax.cloudservers changed = False @@ -266,7 +278,8 @@ def create(module, names, flavor, image, meta, key_name, files, flavor=flavor, meta=meta, key_name=key_name, files=files, nics=nics, - disk_config=disk_config)) + disk_config=disk_config, + **extra_create_args)) except Exception, e: module.fail_json(msg='%s' % e.message) else: @@ -405,7 +418,7 @@ def delete(module, instance_ids, wait, wait_timeout): def cloudservers(module, state, name, flavor, image, meta, key_name, files, wait, wait_timeout, disk_config, count, group, instance_ids, exact_count, networks, count_offset, - auto_increment): + auto_increment, extra_create_args): cs = pyrax.cloudservers cnw = pyrax.cloud_networks servers = [] @@ -602,7 +615,7 @@ def cloudservers(module, state, name, flavor, image, meta, key_name, files, names = [name] * (count - len(servers)) create(module, names, flavor, image, meta, key_name, files, - wait, wait_timeout, disk_config, group, nics) + wait, wait_timeout, disk_config, group, nics, extra_create_args) elif state == 'absent': if instance_ids is None: @@ -647,6 +660,8 @@ def main(): count_offset=dict(default=1, type='int'), disk_config=dict(choices=['auto', 'manual']), exact_count=dict(default=False, type='bool'), + extra_client_args=dict(type='dict', default={}), + extra_create_args=dict(type='dict', default={}), files=dict(type='dict', default={}), flavor=dict(), group=dict(), @@ -682,6 +697,8 @@ def main(): if disk_config: disk_config = disk_config.upper() exact_count = module.params.get('exact_count', False) + extra_client_args = module.params.get('extra_client_args') + extra_create_args = module.params.get('extra_create_args') files = module.params.get('files') flavor = module.params.get('flavor') group = module.params.get('group') @@ -697,10 +714,23 @@ def main(): setup_rax_module(module, pyrax) + if pyrax.cloudservers is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + if extra_client_args: + pyrax.cloudservers = pyrax.connect_to_cloudservers( + region=pyrax.cloudservers.client.region_name, + **extra_client_args) + client = pyrax.cloudservers.client + if 'bypass_url' in extra_client_args: + client.management_url = extra_client_args['bypass_url'] + cloudservers(module, state, name, flavor, image, meta, key_name, files, wait, wait_timeout, disk_config, count, group, instance_ids, exact_count, networks, count_offset, - auto_increment) + auto_increment, extra_create_args) # import module snippets From b8cb23d309cccd41743b218726c918d56c3f1d07 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 21 Mar 2014 15:52:07 -0500 Subject: [PATCH 562/772] Share p.communicate alternative logic between exec_command and put_file --- lib/ansible/runner/connection_plugins/ssh.py | 151 ++++++++++--------- 1 file changed, 81 insertions(+), 70 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index bcf90cbc7d3..1c0a0175ebb 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -98,6 +98,28 @@ class Connection(object): return self + def _run(self, cmd, indata): + if indata: + # do not use pseudo-pty + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = p.stdin + else: + # try to use upseudo-pty + try: + # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors + master, slave = pty.openpty() + p = subprocess.Popen(cmd, stdin=slave, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = os.fdopen(master, 'w', 0) + os.close(slave) + except: + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = p.stdin + + return (p, stdin) + def _password_cmd(self): if self.password: try: @@ -116,6 +138,58 @@ class Connection(object): os.write(self.wfd, "%s\n" % self.password) os.close(self.wfd) + def _communicate(self, p, stdin, indata): + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) + # We can't use p.communicate here because the ControlMaster may have stdout open as well + stdout = '' + stderr = '' + rpipes = [p.stdout, p.stderr] + if indata: + try: + stdin.write(indata) + stdin.close() + except: + raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') + while True: + rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) + + # fail early if the sudo/su password is wrong + if self.runner.sudo and sudoable and self.runner.sudo_pass: + incorrect_password = gettext.dgettext( + "sudo", "Sorry, try again.") + if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): + raise errors.AnsibleError('Incorrect sudo password') + + if self.runner.su and su and self.runner.sudo_pass: + incorrect_password = gettext.dgettext( + "su", "Sorry") + if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): + raise errors.AnsibleError('Incorrect su password') + + if p.stdout in rfd: + dat = os.read(p.stdout.fileno(), 9000) + stdout += dat + if dat == '': + rpipes.remove(p.stdout) + if p.stderr in rfd: + dat = os.read(p.stderr.fileno(), 9000) + stderr += dat + if dat == '': + rpipes.remove(p.stderr) + # only break out if we've emptied the pipes, or there is nothing to + # read from and the process has finished. + if (not rpipes or not rfd) and p.poll() is not None: + break + # Calling wait while there are still pipes to read can cause a lock + elif not rpipes and p.poll() == None: + p.wait() + # the process has finished and the pipes are empty, + # if we loop and do the select it waits all the timeout + break + stdin.close() # close stdin after we read from stdout (see also issue #848) + return (p.returncode, stdout, stderr) + def not_in_host_file(self, host): if 'USER' in os.environ: user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts") @@ -203,24 +277,7 @@ class Connection(object): fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) # create process - if in_data: - # do not use pseudo-pty - p = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdin = p.stdin - else: - # try to use upseudo-pty - try: - # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors - master, slave = pty.openpty() - p = subprocess.Popen(ssh_cmd, stdin=slave, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdin = os.fdopen(master, 'w', 0) - os.close(slave) - except: - p = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdin = p.stdin + (p, stdin) = self._run(ssh_cmd, in_data) self._send_password() @@ -269,56 +326,9 @@ class Connection(object): stdin.write(self.runner.sudo_pass + '\n') elif su: stdin.write(self.runner.su_pass + '\n') - fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) - fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) - # We can't use p.communicate here because the ControlMaster may have stdout open as well - stdout = '' - stderr = '' - rpipes = [p.stdout, p.stderr] - if in_data: - try: - stdin.write(in_data) - stdin.close() - except: - raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') - while True: - rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) - # fail early if the sudo/su password is wrong - if self.runner.sudo and sudoable and self.runner.sudo_pass: - incorrect_password = gettext.dgettext( - "sudo", "Sorry, try again.") - if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): - raise errors.AnsibleError('Incorrect sudo password') + (returncode, stdout, stderr) = self._communicate(p, stdin, in_data) - if self.runner.su and su and self.runner.sudo_pass: - incorrect_password = gettext.dgettext( - "su", "Sorry") - if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): - raise errors.AnsibleError('Incorrect su password') - - if p.stdout in rfd: - dat = os.read(p.stdout.fileno(), 9000) - stdout += dat - if dat == '': - rpipes.remove(p.stdout) - if p.stderr in rfd: - dat = os.read(p.stderr.fileno(), 9000) - stderr += dat - if dat == '': - rpipes.remove(p.stderr) - # only break out if we've emptied the pipes, or there is nothing to - # read from and the process has finished. - if (not rpipes or not rfd) and p.poll() is not None: - break - # Calling wait while there are still pipes to read can cause a lock - elif not rpipes and p.poll() == None: - p.wait() - # the process has finished and the pipes are empty, - # if we loop and do the select it waits all the timeout - break - stdin.close() # close stdin after we read from stdout (see also issue #848) - if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. @@ -357,12 +367,13 @@ class Connection(object): cmd += ["sftp"] + self.common_args + [host] indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path)) - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (p, stdin) = self._run(cmd, indata) + self._send_password() - stdout, stderr = p.communicate(indata) - if p.returncode != 0: + (returncode, stdout, stderr) = self._communicate(p, stdin, indata) + + if returncode != 0: raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) def fetch_file(self, in_path, out_path): From 38de8cc87ecabc43b7cffa1aa9992d37ce9511d2 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 26 Mar 2014 14:43:07 -0400 Subject: [PATCH 563/772] Addresses #6705 Only set the role_uuid inside the role vars --- lib/ansible/playbook/play.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 631595eaa2a..53f097f5c86 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -368,7 +368,6 @@ class Play(object): # give each role a uuid for idx, val in enumerate(roles): this_uuid = str(uuid.uuid4()) - roles[idx][0]['role_uuid'] = this_uuid roles[idx][-2]['role_uuid'] = this_uuid role_names = [] From e09313120c96c00980aef12c3105647bdc3857ca Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 26 Mar 2014 13:20:52 -0500 Subject: [PATCH 564/772] Make async jid's unique per host The jid will now also contain the PID of the async_wrapper process, and can each unique jid from each host is tracked rather than just relying on one global jid per task. Fixes #5582 --- lib/ansible/runner/poller.py | 21 ++++++++++++--------- library/internal/async_wrapper | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/ansible/runner/poller.py b/lib/ansible/runner/poller.py index c69b2e76da6..5813377249d 100644 --- a/lib/ansible/runner/poller.py +++ b/lib/ansible/runner/poller.py @@ -30,18 +30,21 @@ class AsyncPoller(object): self.hosts_to_poll = [] self.completed = False - # Get job id and which hosts to poll again in the future - jid = None + # flag to determine if at least one host was contacted + self.active = False # True to work with & below skipped = True for (host, res) in results['contacted'].iteritems(): if res.get('started', False): self.hosts_to_poll.append(host) jid = res.get('ansible_job_id', None) + self.runner.setup_cache[host]['ansible_job_id'] = jid + self.active = True else: skipped = skipped & res.get('skipped', False) self.results['contacted'][host] = res for (host, res) in results['dark'].iteritems(): + self.runner.setup_cache[host]['ansible_job_id'] = '' self.results['dark'][host] = res if not skipped: @@ -49,14 +52,13 @@ class AsyncPoller(object): raise errors.AnsibleError("unexpected error: unable to determine jid") if len(self.hosts_to_poll)==0: raise errors.AnsibleError("unexpected error: no hosts to poll") - self.jid = jid def poll(self): """ Poll the job status. Returns the changes in this iteration.""" self.runner.module_name = 'async_status' - self.runner.module_args = "jid=%s" % self.jid + self.runner.module_args = "jid={{ansible_job_id}}" self.runner.pattern = "*" self.runner.background = 0 self.runner.complex_args = None @@ -75,13 +77,14 @@ class AsyncPoller(object): self.results['contacted'][host] = res poll_results['contacted'][host] = res if res.get('failed', False) or res.get('rc', 0) != 0: - self.runner.callbacks.on_async_failed(host, res, self.jid) + self.runner.callbacks.on_async_failed(host, res, self.runner.setup_cache[host]['ansible_job_id']) else: - self.runner.callbacks.on_async_ok(host, res, self.jid) + self.runner.callbacks.on_async_ok(host, res, self.runner.setup_cache[host]['ansible_job_id']) for (host, res) in results['dark'].iteritems(): self.results['dark'][host] = res poll_results['dark'][host] = res - self.runner.callbacks.on_async_failed(host, res, self.jid) + if host in self.hosts_to_poll: + self.runner.callbacks.on_async_failed(host, res, self.runner.setup_cache[host].get('ansible_job_id','XX')) self.hosts_to_poll = hosts if len(hosts)==0: @@ -92,7 +95,7 @@ class AsyncPoller(object): def wait(self, seconds, poll_interval): """ Wait a certain time for job completion, check status every poll_interval. """ # jid is None when all hosts were skipped - if self.jid is None: + if not self.active: return self.results clock = seconds - poll_interval @@ -103,7 +106,7 @@ class AsyncPoller(object): for (host, res) in poll_results['polled'].iteritems(): if res.get('started'): - self.runner.callbacks.on_async_poll(host, res, self.jid, clock) + self.runner.callbacks.on_async_poll(host, res, self.runner.setup_cache[host]['ansible_job_id'], clock) clock = clock - poll_interval diff --git a/library/internal/async_wrapper b/library/internal/async_wrapper index 278280ef1a8..2bc2dc21823 100644 --- a/library/internal/async_wrapper +++ b/library/internal/async_wrapper @@ -72,7 +72,7 @@ if len(sys.argv) < 3: }) sys.exit(1) -jid = sys.argv[1] +jid = "%s.%d" % (sys.argv[1], os.getpid()) time_limit = sys.argv[2] wrapped_module = sys.argv[3] argsfile = sys.argv[4] From 972a27b14e83a3e50a8c3d15dd589007295448db Mon Sep 17 00:00:00 2001 From: "Christopher H. Laco" Date: Fri, 8 Nov 2013 19:29:41 -0600 Subject: [PATCH 565/772] Add Rackspace Cloud Block Storage modules - Add rax_cbs to create/delete cloud block storage volumes - Add rax_cbs_attachments to attach/detach volumes from servers --- library/cloud/rax_cbs | 241 ++++++++++++++++++++++++++++ library/cloud/rax_cbs_attachments | 255 ++++++++++++++++++++++++++++++ 2 files changed, 496 insertions(+) create mode 100644 library/cloud/rax_cbs create mode 100644 library/cloud/rax_cbs_attachments diff --git a/library/cloud/rax_cbs b/library/cloud/rax_cbs new file mode 100644 index 00000000000..efa40c0ffc0 --- /dev/null +++ b/library/cloud/rax_cbs @@ -0,0 +1,241 @@ +#!/usr/bin/python -tt +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rax_cbs +short_description: Manipulate Rackspace Cloud Block Storage Volumes +description: + - Manipulate Rackspace Cloud Block Storage Volumes +version_added: "1.5" +options: + api_key: + description: + - Rackspace API key (overrides C(credentials)) + credentials: + description: + - File to find the Rackspace credentials in (ignored if C(api_key) and + C(username) are provided) + default: null + aliases: ['creds_file'] + description: + description: + - Description to give the volume being created + default: null + meta: + description: + - A hash of metadata to associate with the volume + default: null + name: + description: + - Name to give the volume being created + default: null + required: true + region: + description: + - Region to create the volume in + default: DFW + size: + description: + - Size of the volume to create in Gigabytes + default: 100 + required: true + snapshot_id: + description: + - The id of the snapshot to create the volume from + default: null + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + required: true + volume_type: + description: + - Type of the volume being created + choices: ['SATA', 'SSD'] + default: SATA + required: true + username: + description: + - Rackspace username (overrides C(credentials)) + wait: + description: + - wait for the volume to be in state 'available' before returning + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +requirements: [ "pyrax" ] +author: Christopher H. Laco, Matt Martz +notes: + - The following environment variables can be used, C(RAX_USERNAME), + C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). + - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file + appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) + - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file + - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +''' + +EXAMPLES = ''' +- name: Build a Block Storage Volume + gather_facts: False + hosts: local + connection: local + tasks: + - name: Storage volume create request + local_action: + module: rax_cbs + credentials: ~/.raxpub + name: my-volume + description: My Volume + volume_type: SSD + size: 150 + region: DFW + wait: yes + state: present + meta: + app: my-cool-app + register: my_volume +''' + +import sys + +from types import NoneType + +try: + import pyrax +except ImportError: + print("failed=True msg='pyrax required for this module'") + sys.exit(1) + +NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) +VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', + 'error', 'error_deleting') + + +def cloud_block_storage(module, state, name, description, meta, size, + snapshot_id, volume_type, wait, wait_timeout): + for arg in (state, name, size, volume_type): + if not arg: + module.fail_json(msg='%s is required for rax_clb' % arg) + + if int(size) < 100: + module.fail_json(msg='"size" must be greater than or equal to 100') + + changed = False + volumes = [] + instance = {} + + cbs = pyrax.cloud_blockstorage + + for volume in cbs.list(): + if name != volume.name and name != volume.id: + continue + + volumes.append(volume) + + if len(volumes) > 1: + module.fail_json(msg='Multiple Storage Volumes were matched by name, ' + 'try using the Volume ID instead') + + if state == 'present': + if not volumes: + try: + volume = cbs.create(name, size=size, volume_type=volume_type, + description=description, + metadata=meta, + snapshot_id=snapshot_id) + changed = True + except Exception, e: + module.fail_json(msg='%s' % e.message) + else: + volume = volumes[0] + + volume.get() + for key, value in vars(volume).iteritems(): + if (isinstance(value, NON_CALLABLES) and + not key.startswith('_')): + instance[key] = value + + result = dict(changed=changed, volume=instance) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + elif wait and volume.status not in VOLUME_STATUS: + result['msg'] = 'Timeout waiting on %s' % volume.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + if volumes: + volume = volumes[0] + try: + volume.delete() + changed = True + except Exception, e: + module.fail_json(msg='%s' % e.message) + + module.exit_json(changed=changed, volume=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + description=dict(), + meta=dict(type='dict', default={}), + name=dict(), + size=dict(type='int', default=100), + snapshot_id=dict(), + state=dict(default='present', choices=['present', 'absent']), + volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), + wait=dict(type='bool'), + wait_timeout=dict(type='int', default=300) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + description = module.params.get('description') + meta = module.params.get('meta') + name = module.params.get('name') + size = module.params.get('size') + snapshot_id = module.params.get('snapshot_id') + state = module.params.get('state') + volume_type = module.params.get('volume_type') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + setup_rax_module(module, pyrax) + + cloud_block_storage(module, state, name, description, meta, size, + snapshot_id, volume_type, wait, wait_timeout) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +### invoke the module +main() diff --git a/library/cloud/rax_cbs_attachments b/library/cloud/rax_cbs_attachments new file mode 100644 index 00000000000..2a0ac49775e --- /dev/null +++ b/library/cloud/rax_cbs_attachments @@ -0,0 +1,255 @@ +#!/usr/bin/python -tt +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rax_cbs_attachments +short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments +description: + - Manipulate Rackspace Cloud Block Storage Volume Attachments +version_added: "1.5" +options: + api_key: + description: + - Rackspace API key (overrides C(credentials)) + credentials: + description: + - File to find the Rackspace credentials in (ignored if C(api_key) and + C(username) are provided) + default: null + aliases: ['creds_file'] + mountpoint: + description: + - The mount point to attach the volume to + default: null + required: true + name: + description: + - Name or id of the volume to attach/detach + default: null + required: true + region: + description: + - Region the volume and server are located in + default: DFW + server: + description: + - Name or id of the server to attach/detach + default: null + required: true + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + required: true + username: + description: + - Rackspace username (overrides C(credentials)) + wait: + description: + - wait for the volume to be in 'in-use'/'available' state before returning + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +requirements: [ "pyrax" ] +author: Christopher H. Laco, Matt Martz +notes: + - The following environment variables can be used, C(RAX_USERNAME), + C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). + - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file + appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) + - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file + - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +''' + +EXAMPLES = ''' +- name: Attach a Block Storage Volume + gather_facts: False + hosts: local + connection: local + tasks: + - name: Storage volume attach request + local_action: + module: rax_cbs_attachments + credentials: ~/.raxpub + name: my-volume + server: my-server + mountpoint: /dev/xvdd + region: DFW + wait: yes + state: present + register: my_volume +''' + +import sys + +from types import NoneType + +try: + import pyrax +except ImportError: + print("failed=True msg='pyrax required for this module'") + sys.exit(1) + +NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) +VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', + 'error', 'error_deleting') + + +def cloud_block_storage_attachments(module, state, name, server, mountpoint, + wait, wait_timeout): + for arg in (state, name, server, mountpoint): + if not arg: + module.fail_json(msg='%s is required for rax_clb_attachments' % arg) + + cbs = pyrax.cloud_blockstorage + cs = pyrax.cloudservers + changed = False + volumes = [] + instance = {} + + for volume in cbs.list(): + if name == volume.display_name or name == volume.id: + volumes.append(volume) + + if len(volumes) > 1: + module.fail_json(msg='Multiple Storage Volumes were matched by name, ' + 'try using the Volume ID instead') + elif not volumes: + module.fail_json(msg='No Storage Volumes were matched by name, ' + 'try using the Volume ID instead') + + volume = volumes[0] + if state == 'present': + server = cs.servers.get(server) + + if not server: + module.fail_json(msg='No Server was matched by name, ' + 'try using the Server ID instead') + else: + if volume.attachments and volume.attachments[0]['server_id'] == server.id: + changed = False + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') + else: + try: + volume.attach_to_instance(server, mountpoint=mountpoint) + changed = True + except Exception, e: + module.fail_json(msg='%s' % e.message) + + volume.get() + + for key, value in vars(volume).iteritems(): + if (isinstance(value, NON_CALLABLES) and + not key.startswith('_')): + instance[key] = value + + result = dict(changed=changed, volume=instance) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + elif wait: + pyrax.utils.wait_until(volume, 'status', 'in-use', + interval=3, attempts=0, + verbose=False) + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + elif state == 'absent': + server = cs.servers.get(server) + + if not server: + module.fail_json(msg='No Server was matched by name, ' + 'try using the Server ID instead') + else: + if volume.attachments and volume.attachments[0]['server_id'] == server.id: + try: + volume.detach() + if wait: + pyrax.utils.wait_until(volume, 'status', 'available', + interval=3, attempts=0, + verbose=False) + changed = True + except Exception, e: + module.fail_json(msg='%s' % e.message) + + volume.get() + changed = True + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') + + for key, value in vars(volume).iteritems(): + if (isinstance(value, NON_CALLABLES) and + not key.startswith('_')): + instance[key] = value + + result = dict(changed=changed, volume=instance) + + if volume.status == 'error': + result['msg'] = '%s failed to build' % volume.id + + if 'msg' in result: + module.fail_json(**result) + else: + module.exit_json(**result) + + module.exit_json(changed=changed, volume=instance) + + +def main(): + argument_spec = rax_argument_spec() + argument_spec.update( + dict( + mountpoint=dict(), + name=dict(), + server=dict(), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool'), + wait_timeout=dict(type='int', default=300) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=rax_required_together() + ) + + mountpoint = module.params.get('mountpoint') + name = module.params.get('name') + server = module.params.get('server') + state = module.params.get('state') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) + + setup_rax_module(module, pyrax) + + cloud_block_storage_attachments(module, state, name, server, mountpoint, + wait, wait_timeout) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.rax import * + +### invoke the module +main() From 88462729aa04c6353b6388bf06f6a39dc8397790 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 26 Mar 2014 15:55:54 -0400 Subject: [PATCH 566/772] Addresses #6708 fill in missing keywords for ssh.py _communicate --- lib/ansible/runner/connection_plugins/ssh.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 1c0a0175ebb..876f2063848 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -138,7 +138,7 @@ class Connection(object): os.write(self.wfd, "%s\n" % self.password) os.close(self.wfd) - def _communicate(self, p, stdin, indata): + def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) # We can't use p.communicate here because the ControlMaster may have stdout open as well @@ -327,7 +327,7 @@ class Connection(object): elif su: stdin.write(self.runner.su_pass + '\n') - (returncode, stdout, stderr) = self._communicate(p, stdin, in_data) + (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add From 78bdb078fea7eb7dc14a50ae2476d441f97957c5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 26 Mar 2014 15:48:42 -0500 Subject: [PATCH 567/772] Fix for bug related to async jid polling change plus a new test --- lib/ansible/playbook/__init__.py | 4 ++-- test/integration/roles/test_async/tasks/main.yml | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index f624be1b297..6b03d060c45 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -304,7 +304,7 @@ class PlayBook(object): # since these likely got killed by async_wrapper for host in poller.hosts_to_poll: reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' } - self.runner_callbacks.on_async_failed(host, reason, poller.jid) + self.runner_callbacks.on_async_failed(host, reason, poller.runner.setup_cache[host]['ansible_job_id']) results['contacted'][host] = reason return results @@ -375,7 +375,7 @@ class PlayBook(object): results = self._async_poll(poller, task.async_seconds, task.async_poll_interval) else: for (host, res) in results.get('contacted', {}).iteritems(): - self.runner_callbacks.on_async_ok(host, res, poller.jid) + self.runner_callbacks.on_async_ok(host, res, poller.runner.setup_cache[host]['ansible_job_id']) contacted = results.get('contacted',{}) dark = results.get('dark', {}) diff --git a/test/integration/roles/test_async/tasks/main.yml b/test/integration/roles/test_async/tasks/main.yml index 350d5ef4701..502140599fc 100644 --- a/test/integration/roles/test_async/tasks/main.yml +++ b/test/integration/roles/test_async/tasks/main.yml @@ -43,3 +43,17 @@ - "'stdout_lines' in async_result" - "async_result.rc == 0" +- name: test async without polling + command: sleep 5 + async: 30 + poll: 0 + register: async_result + +- debug: var=async_result + +- name: validate async without polling returns + assert: + that: + - "'ansible_job_id' in async_result" + - "'started' in async_result" + - "'finished' not in async_result" From 3315ba0b99eb3906714638b04dd0c6b4d42f9fd8 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Wed, 26 Mar 2014 16:40:50 -0700 Subject: [PATCH 568/772] Treat 255 errors from raw as dark host Any other module is able to detect a dark host, but raw was treating 255 as a return code from the module execution, rather from the connection attempt. This change allows 255 to be treated as a connection failure when using the raw module. --- lib/ansible/runner/connection_plugins/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 876f2063848..410afc97841 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -343,7 +343,7 @@ class Connection(object): if p.returncode != 0 and controlpersisterror: raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ansible_ssh_args in the config file) before running again') - if p.returncode == 255 and in_data: + if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'): raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') return (p.returncode, '', stdout, stderr) From 7626eacc7a6eabad2c19a92d2cd235bc2922c62c Mon Sep 17 00:00:00 2001 From: Henry Finucane Date: Wed, 26 Mar 2014 17:34:04 -0700 Subject: [PATCH 569/772] Make ssh_config paramiko 1.12/1.13 compatible --- plugins/inventory/ssh_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/ssh_config.py b/plugins/inventory/ssh_config.py index 24017e62282..7c04c8cc6da 100755 --- a/plugins/inventory/ssh_config.py +++ b/plugins/inventory/ssh_config.py @@ -63,7 +63,8 @@ def get_config(): for d in cfg._config: _copy = dict(d) del _copy['host'] - ret_dict[d['host']] = _copy + for host in d['host']: + ret_dict[host] = _copy['config'] return ret_dict From d57910b93ab92792b8a238210d01d1d8856b7dea Mon Sep 17 00:00:00 2001 From: jjshoe Date: Wed, 26 Mar 2014 09:55:29 -0500 Subject: [PATCH 570/772] Allow you to pass in arbitrary rsync options. I also added help for it, and what version it was added in. --- library/files/synchronize | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/library/files/synchronize b/library/files/synchronize index 3409c6f0300..88721b3922c 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -124,6 +124,10 @@ options: - put user@ for the remote paths. If you have a custom ssh config to define the remote user for a host that does not match the inventory user, you should set this parameter to "no". default: yes + rsync_opts: + description: + - Specify additional rsync options by passing in an array. (added in Ansible 1.6) + default: required: false notes: - Inspect the verbose output to validate the destination user/host/path @@ -173,6 +177,9 @@ synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="sudo rs - var # exclude any path whose last part is 'var' - /var # exclude any path starting with 'var' starting at the source directory + /var/conf # include /var/conf even though it was previously excluded + +# Synchronize passing in extra rsync options +synchronize: src=/tmp/helloworld dest=/var/www/helloword rsync_opts=--no-motd,--exclude=.git ''' @@ -196,7 +203,8 @@ def main(): owner = dict(type='bool'), group = dict(type='bool'), set_remote_user = dict(default='yes', type='bool'), - rsync_timeout = dict(type='int', default=10) + rsync_timeout = dict(type='int', default=10), + rsync_opts = dict(type='list') ), supports_check_mode = True ) @@ -220,6 +228,7 @@ def main(): times = module.params['times'] owner = module.params['owner'] group = module.params['group'] + rsync_opts = module.params['rsync_opts'] cmd = '%s --delay-updates -FF --compress --timeout=%s' % (rsync, rsync_timeout) if module.check_mode: @@ -275,6 +284,8 @@ def main(): if rsync_path: cmd = cmd + " --rsync-path '%s'" %(rsync_path) + if rsync_opts: + cmd = cmd + " " + " ".join(rsync_opts) changed_marker = '<>' cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" From 4d55f86f95dbc3bd1aeff83bd78a5ab62bebaf57 Mon Sep 17 00:00:00 2001 From: Ahti Kitsik Date: Thu, 27 Mar 2014 17:22:39 +0200 Subject: [PATCH 571/772] Fixed ufw module doc tags, it's reset everywhere, not reseted. --- library/system/ufw | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/ufw b/library/system/ufw index caae2ad4672..07f51d853bc 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -38,9 +38,9 @@ options: - C(enabled) reloads firewall and enables firewall on boot. - C(disabled) unloads firewall and disables firewall on boot. - C(reloaded) reloads firewall. - - C(reseted) disables and resets firewall to installation defaults. + - C(reset) disables and resets firewall to installation defaults. required: false - choices: ['enabled', 'disabled', 'reloaded', 'reseted'] + choices: ['enabled', 'disabled', 'reloaded', 'reset'] policy: description: - Change the default policy for incoming or outgoing traffic. From c1f4142a295effd3a822c738c140c4fb61382e81 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 27 Mar 2014 11:47:49 -0400 Subject: [PATCH 572/772] Fixes #4109 Filter plays by tags when using --list-hosts --- bin/ansible-playbook | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index f54c17d7aa7..ecfaf20b922 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -207,11 +207,9 @@ def main(args): play = ansible.playbook.Play(pb, play_ds, play_basedir) label = play.name hosts = pb.inventory.list_hosts(play.hosts) - if options.listhosts: - print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts)) - for host in hosts: - print ' %s' % host - if options.listtasks: + + # Filter all tasks by given tags + if pb.only_tags != 'all': if options.subset and not hosts: continue matched_tags, unmatched_tags = play.compare_tags(pb.only_tags) @@ -225,9 +223,17 @@ def main(args): if unknown_tags: continue + + if options.listhosts: + print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts)) + for host in hosts: + print ' %s' % host + + if options.listtasks: print ' play #%d (%s):' % (playnum, label) for task in play.tasks(): + print "tags: %s" % task.tags if (set(task.tags).intersection(pb.only_tags) and not set(task.tags).intersection(pb.skip_tags)): if getattr(task, 'name', None) is not None: From 95a283ccaa96dbe0658a40bd6340b8a310553b57 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 27 Mar 2014 11:51:22 -0400 Subject: [PATCH 573/772] Addresses #4109 Remove debug print statement --- bin/ansible-playbook | 1 - 1 file changed, 1 deletion(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index ecfaf20b922..c365bbe7714 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -233,7 +233,6 @@ def main(args): print ' play #%d (%s):' % (playnum, label) for task in play.tasks(): - print "tags: %s" % task.tags if (set(task.tags).intersection(pb.only_tags) and not set(task.tags).intersection(pb.skip_tags)): if getattr(task, 'name', None) is not None: From 1942a69ed27387d48a338f755e32bc4feb01e1a1 Mon Sep 17 00:00:00 2001 From: Paul Oyston Date: Thu, 27 Mar 2014 17:06:20 +0000 Subject: [PATCH 574/772] Added template call to _executor_internal to allow the passing of arbitrary data to modules. --- lib/ansible/runner/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 85f4b5d0678..502df176df8 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -543,10 +543,12 @@ class Runner(object): # fireball, local, etc port = self.remote_port + module_vars = template.template(self.basedir, self.module_vars, host_variables) + inject = {} inject = utils.combine_vars(inject, self.default_vars) inject = utils.combine_vars(inject, host_variables) - inject = utils.combine_vars(inject, self.module_vars) + inject = utils.combine_vars(inject, module_vars) inject = utils.combine_vars(inject, self.setup_cache[host]) inject.setdefault('ansible_ssh_user', self.remote_user) inject['hostvars'] = HostVars(self.setup_cache, self.inventory) From bb8644352886b34922355f9c2bf69a6583d5935d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 26 Mar 2014 16:25:46 -0500 Subject: [PATCH 575/772] Implement recommendations from #4864 --- library/cloud/rax_cbs | 117 ++++++++++----- library/cloud/rax_cbs_attachments | 231 +++++++++++++++++++----------- 2 files changed, 224 insertions(+), 124 deletions(-) diff --git a/library/cloud/rax_cbs b/library/cloud/rax_cbs index efa40c0ffc0..73106eb41ab 100644 --- a/library/cloud/rax_cbs +++ b/library/cloud/rax_cbs @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -20,17 +20,48 @@ module: rax_cbs short_description: Manipulate Rackspace Cloud Block Storage Volumes description: - Manipulate Rackspace Cloud Block Storage Volumes -version_added: "1.5" +version_added: 1.6 options: api_key: description: - - Rackspace API key (overrides C(credentials)) + - Rackspace API key (overrides I(credentials)) + aliases: + - password + auth_endpoint: + description: + - The URI of the authentication service + default: https://identity.api.rackspacecloud.com/v2.0/ credentials: description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) + - File to find the Rackspace credentials in (ignored if I(api_key) and + I(username) are provided) default: null - aliases: ['creds_file'] + aliases: + - creds_file + env: + description: + - Environment as configured in ~/.pyrax.cfg, + see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) + identity_type: + description: + - Authentication machanism to use, such as rackspace or keystone + default: rackspace + region: + description: + - Region to create an instance in + default: DFW + tenant_id: + description: + - The tenant ID used for authentication + tenant_name: + description: + - The tenant name used for authentication + username: + description: + - Rackspace username (overrides I(credentials)) + verify_ssl: + description: + - Whether or not to require SSL validation of API endpoints description: description: - Description to give the volume being created @@ -44,10 +75,6 @@ options: - Name to give the volume being created default: null required: true - region: - description: - - Region to create the volume in - default: DFW size: description: - Size of the volume to create in Gigabytes @@ -60,28 +87,32 @@ options: state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present required: true volume_type: description: - Type of the volume being created - choices: ['SATA', 'SSD'] + choices: + - SATA + - SSD default: SATA required: true - username: - description: - - Rackspace username (overrides C(credentials)) wait: description: - wait for the volume to be in state 'available' before returning default: "no" - choices: [ "yes", "no" ] + choices: + - "yes" + - "no" wait_timeout: description: - how long before wait gives up, in seconds default: 300 -requirements: [ "pyrax" ] +requirements: + - pyrax author: Christopher H. Laco, Matt Martz notes: - The following environment variables can be used, C(RAX_USERNAME), @@ -116,13 +147,14 @@ EXAMPLES = ''' import sys +from uuid import UUID from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', @@ -133,29 +165,33 @@ def cloud_block_storage(module, state, name, description, meta, size, snapshot_id, volume_type, wait, wait_timeout): for arg in (state, name, size, volume_type): if not arg: - module.fail_json(msg='%s is required for rax_clb' % arg) + module.fail_json(msg='%s is required for rax_cbs' % arg) - if int(size) < 100: + if size < 100: module.fail_json(msg='"size" must be greater than or equal to 100') changed = False - volumes = [] + volume = None instance = {} cbs = pyrax.cloud_blockstorage - for volume in cbs.list(): - if name != volume.name and name != volume.id: - continue - - volumes.append(volume) + if cbs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') - if len(volumes) > 1: - module.fail_json(msg='Multiple Storage Volumes were matched by name, ' - 'try using the Volume ID instead') + try: + UUID(name) + volume = cbs.get(name) + except ValueError: + try: + volume = cbs.find(name=name) + except Exception, e: + module.fail_json(msg='%s' % e) if state == 'present': - if not volumes: + if not volume: try: volume = cbs.create(name, size=size, volume_type=volume_type, description=description, @@ -164,8 +200,11 @@ def cloud_block_storage(module, state, name, description, meta, size, changed = True except Exception, e: module.fail_json(msg='%s' % e.message) - else: - volume = volumes[0] + else: + if wait: + attempts = wait_timeout / 5 + pyrax.utils.wait_for_build(volume, interval=5, + attempts=attempts) volume.get() for key, value in vars(volume).iteritems(): @@ -186,8 +225,7 @@ def cloud_block_storage(module, state, name, description, meta, size, module.exit_json(**result) elif state == 'absent': - if volumes: - volume = volumes[0] + if volume: try: volume.delete() changed = True @@ -203,12 +241,12 @@ def main(): dict( description=dict(), meta=dict(type='dict', default={}), - name=dict(), + name=dict(required=True), size=dict(type='int', default=100), snapshot_id=dict(), state=dict(default='present', choices=['present', 'absent']), volume_type=dict(choices=['SSD', 'SATA'], default='SATA'), - wait=dict(type='bool'), + wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300) ) ) @@ -218,6 +256,9 @@ def main(): required_together=rax_required_together() ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + description = module.params.get('description') meta = module.params.get('meta') name = module.params.get('name') @@ -226,7 +267,7 @@ def main(): state = module.params.get('state') volume_type = module.params.get('volume_type') wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) + wait_timeout = module.params.get('wait_timeout') setup_rax_module(module, pyrax) diff --git a/library/cloud/rax_cbs_attachments b/library/cloud/rax_cbs_attachments index 2a0ac49775e..c20c03a69ea 100644 --- a/library/cloud/rax_cbs_attachments +++ b/library/cloud/rax_cbs_attachments @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -20,31 +20,58 @@ module: rax_cbs_attachments short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments description: - Manipulate Rackspace Cloud Block Storage Volume Attachments -version_added: "1.5" +version_added: 1.6 options: api_key: description: - - Rackspace API key (overrides C(credentials)) + - Rackspace API key (overrides I(credentials)) + aliases: + - password + auth_endpoint: + description: + - The URI of the authentication service + default: https://identity.api.rackspacecloud.com/v2.0/ credentials: description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) + - File to find the Rackspace credentials in (ignored if I(api_key) and + I(username) are provided) default: null - aliases: ['creds_file'] - mountpoint: + aliases: + - creds_file + env: + description: + - Environment as configured in ~/.pyrax.cfg, + see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) + identity_type: + description: + - Authentication machanism to use, such as rackspace or keystone + default: rackspace + region: + description: + - Region to create an instance in + default: DFW + tenant_id: + description: + - The tenant ID used for authentication + tenant_name: description: - - The mount point to attach the volume to + - The tenant name used for authentication + username: + description: + - Rackspace username (overrides I(credentials)) + verify_ssl: + description: + - Whether or not to require SSL validation of API endpoints + device: + description: + - The device path to attach the volume to, e.g. /dev/xvde default: null required: true - name: + volume: description: - Name or id of the volume to attach/detach default: null required: true - region: - description: - - Region the volume and server are located in - default: DFW server: description: - Name or id of the server to attach/detach @@ -53,22 +80,24 @@ options: state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present required: true - username: - description: - - Rackspace username (overrides C(credentials)) wait: description: - wait for the volume to be in 'in-use'/'available' state before returning default: "no" - choices: [ "yes", "no" ] + choices: + - "yes" + - "no" wait_timeout: description: - how long before wait gives up, in seconds default: 300 -requirements: [ "pyrax" ] +requirements: + - pyrax author: Christopher H. Laco, Matt Martz notes: - The following environment variables can be used, C(RAX_USERNAME), @@ -89,9 +118,9 @@ EXAMPLES = ''' local_action: module: rax_cbs_attachments credentials: ~/.raxpub - name: my-volume + volume: my-volume server: my-server - mountpoint: /dev/xvdd + device: /dev/xvdd region: DFW wait: yes state: present @@ -100,62 +129,78 @@ EXAMPLES = ''' import sys +from uuid import UUID from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) -VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', - 'error', 'error_deleting') -def cloud_block_storage_attachments(module, state, name, server, mountpoint, +def cloud_block_storage_attachments(module, state, volume, server, device, wait, wait_timeout): - for arg in (state, name, server, mountpoint): + for arg in (state, volume, server, device): if not arg: - module.fail_json(msg='%s is required for rax_clb_attachments' % arg) + module.fail_json(msg='%s is required for rax_cbs_attachments' % + arg) cbs = pyrax.cloud_blockstorage cs = pyrax.cloudservers + + if cbs is None or cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + changed = False - volumes = [] instance = {} - for volume in cbs.list(): - if name == volume.display_name or name == volume.id: - volumes.append(volume) + try: + UUID(volume) + volume = cbs.get(volume) + except ValueError: + try: + volume = cbs.find(name=volume) + except Exception, e: + module.fail_json(msg='%s' % e) - if len(volumes) > 1: - module.fail_json(msg='Multiple Storage Volumes were matched by name, ' - 'try using the Volume ID instead') - elif not volumes: - module.fail_json(msg='No Storage Volumes were matched by name, ' - 'try using the Volume ID instead') + if not volume: + module.fail_json(msg='No matching storage volumes were found') - volume = volumes[0] if state == 'present': - server = cs.servers.get(server) - - if not server: - module.fail_json(msg='No Server was matched by name, ' - 'try using the Server ID instead') + try: + UUID(server) + server = cs.servers.get(server) + except ValueError: + servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) + if not servers: + module.fail_json(msg='No Server was matched by name, ' + 'try using the Server ID instead') + if len(servers) > 1: + module.fail_json(msg='Multiple servers matched by name, ' + 'try using the Server ID instead') + + # We made it this far, grab the first and hopefully only server + # in the list + server = servers[0] + + if (volume.attachments and + volume.attachments[0]['server_id'] == server.id): + changed = False + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') else: - if volume.attachments and volume.attachments[0]['server_id'] == server.id: - changed = False - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') - else: - try: - volume.attach_to_instance(server, mountpoint=mountpoint) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - volume.get() + try: + volume.attach_to_instance(server, mountpoint=device) + changed = True + except Exception, e: + module.fail_json(msg='%s' % e.message) + + volume.get() for key, value in vars(volume).iteritems(): if (isinstance(value, NON_CALLABLES) and @@ -167,9 +212,9 @@ def cloud_block_storage_attachments(module, state, name, server, mountpoint, if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id elif wait: + attempts = wait_timeout / 5 pyrax.utils.wait_until(volume, 'status', 'in-use', - interval=3, attempts=0, - verbose=False) + interval=5, attempts=attempts) if 'msg' in result: module.fail_json(**result) @@ -177,27 +222,38 @@ def cloud_block_storage_attachments(module, state, name, server, mountpoint, module.exit_json(**result) elif state == 'absent': - server = cs.servers.get(server) - - if not server: - module.fail_json(msg='No Server was matched by name, ' - 'try using the Server ID instead') - else: - if volume.attachments and volume.attachments[0]['server_id'] == server.id: - try: - volume.detach() - if wait: - pyrax.utils.wait_until(volume, 'status', 'available', - interval=3, attempts=0, - verbose=False) - changed = True - except Exception, e: - module.fail_json(msg='%s' % e.message) - - volume.get() + try: + UUID(server) + server = cs.servers.get(server) + except ValueError: + servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) + if not servers: + module.fail_json(msg='No Server was matched by name, ' + 'try using the Server ID instead') + if len(servers) > 1: + module.fail_json(msg='Multiple servers matched by name, ' + 'try using the Server ID instead') + + # We made it this far, grab the first and hopefully only server + # in the list + server = servers[0] + + if (volume.attachments and + volume.attachments[0]['server_id'] == server.id): + try: + volume.detach() + if wait: + pyrax.utils.wait_until(volume, 'status', 'available', + interval=3, attempts=0, + verbose=False) changed = True - elif volume.attachments: - module.fail_json(msg='Volume is attached to another server') + except Exception, e: + module.fail_json(msg='%s' % e.message) + + volume.get() + changed = True + elif volume.attachments: + module.fail_json(msg='Volume is attached to another server') for key, value in vars(volume).iteritems(): if (isinstance(value, NON_CALLABLES) and @@ -221,11 +277,11 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - mountpoint=dict(), - name=dict(), - server=dict(), + device=dict(required=True), + volume=dict(required=True), + server=dict(required=True), state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool'), + wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300) ) ) @@ -235,16 +291,19 @@ def main(): required_together=rax_required_together() ) - mountpoint = module.params.get('mountpoint') - name = module.params.get('name') + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + + device = module.params.get('device') + volume = module.params.get('volume') server = module.params.get('server') state = module.params.get('state') wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) + wait_timeout = module.params.get('wait_timeout') setup_rax_module(module, pyrax) - cloud_block_storage_attachments(module, state, name, server, mountpoint, + cloud_block_storage_attachments(module, state, volume, server, device, wait, wait_timeout) # import module snippets From 1eaf85b89f31876f686103b45ee5a9a9d5c84095 Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Thu, 27 Mar 2014 19:56:33 +0200 Subject: [PATCH 576/772] Micro-optimization: replace s.find(x)!=-1 with x in s timeit shows a speedup of ~3x on Python 2.7.5 x86_64. It also makes the code a bit shorter. --- lib/ansible/module_utils/known_hosts.py | 2 +- lib/ansible/utils/__init__.py | 4 ++-- library/cloud/gc_storage | 2 +- library/cloud/s3 | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index e6912d91846..68ef2828319 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -98,7 +98,7 @@ def not_in_host_file(self, host): host_fh.close() for line in data.split("\n"): - if line is None or line.find(" ") == -1: + if line is None or " " not in line: continue tokens = line.split() if tokens[0].find(HASHED_KEY_MAGIC) == 0: diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 972089d6a3c..2b86034bde9 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -314,7 +314,7 @@ def parse_json(raw_data): raise for t in tokens: - if t.find("=") == -1: + if "=" not in t: raise errors.AnsibleError("failed to parse: %s" % orig_data) (key,value) = t.split("=", 1) if key == 'changed' or 'failed': @@ -1035,7 +1035,7 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # not sure why the "/" is in above code :) try: new_terms = template.template(basedir, "{{ %s }}" % terms, inject) - if isinstance(new_terms, basestring) and new_terms.find("{{") != -1: + if isinstance(new_terms, basestring) and "{{" in new_terms: pass else: terms = new_terms diff --git a/library/cloud/gc_storage b/library/cloud/gc_storage index cbf72aa8e92..4bbf9eabae7 100644 --- a/library/cloud/gc_storage +++ b/library/cloud/gc_storage @@ -154,7 +154,7 @@ def keysum(module, gs, bucket, obj): key_check = bucket.get_key(obj) if key_check: md5_remote = key_check.etag[1:-1] - etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5 + etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 if etag_multipart is True: module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.") return md5_remote diff --git a/library/cloud/s3 b/library/cloud/s3 index 6d64a3f43fe..aaa2e0f4ffb 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -147,7 +147,7 @@ def keysum(module, s3, bucket, obj): key_check = bucket.get_key(obj) if key_check: md5_remote = key_check.etag[1:-1] - etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5 + etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 if etag_multipart is True: module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.") return md5_remote From 96ff1444f09a95e80a5f6014a802e8afa37a0c82 Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Thu, 27 Mar 2014 20:30:09 +0200 Subject: [PATCH 577/772] Micro-optimization: replace s.find(x)==0 with s.startswith(x) timeit shows a speedup of ~1.4x on Python 2.7.5 x86_64. It also makes the code a bit shorter. --- lib/ansible/inventory/__init__.py | 2 +- library/system/service | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index a8cca8faaf2..830d74c01ef 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -108,7 +108,7 @@ class Inventory(object): inv_file = open(host_list) first_line = inv_file.readlines()[0] inv_file.close() - if first_line.find('#!') == 0: + if first_line.startswith('#!'): shebang_present = True except: pass diff --git a/library/system/service b/library/system/service index 97a970a9515..141b2ac418a 100644 --- a/library/system/service +++ b/library/system/service @@ -1008,7 +1008,7 @@ class SunOSService(Service): # enabled true # enabled false for line in stdout.split("\n"): - if line.find("enabled") == 0: + if line.startswith("enabled"): if "true" in line: enabled = True if "temporary" in line: From 647c92791c1abbab521d3093b5287e15398d56f1 Mon Sep 17 00:00:00 2001 From: jjshoe Date: Thu, 27 Mar 2014 15:08:20 -0500 Subject: [PATCH 578/772] Add meta to the directory layout for roles --- docsite/rst/playbooks_best_practices.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index 298b832ff0a..487262a4b75 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -51,6 +51,8 @@ The top level of the directory would contain files and directories like so:: foo.sh # <-- script files for use with the script resource vars/ # main.yml # <-- variables associated with this role + meta/ # + main.yml # <-- role dependencies webtier/ # same kind of structure as "common" was above, done for the webtier role monitoring/ # "" From 0306096feed295dabb02412e496da8267b927cac Mon Sep 17 00:00:00 2001 From: Tim Miller Date: Thu, 27 Mar 2014 13:42:40 -0700 Subject: [PATCH 579/772] Fix typo in with_first_found doc. --- docsite/rst/playbooks_loops.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index 3917228229f..f19776396ea 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -250,7 +250,7 @@ that matches a given criteria, and some of the filenames are determined by varia - name: INTERFACES | Create Ansible header for /etc/network/interfaces template: src={{ item }} dest=/etc/foo.conf with_first_found: - - "{{ansible_virtualization_type}_foo.conf" + - "{{ansible_virtualization_type}}_foo.conf" - "default_foo.conf" This tool also has a long form version that allows for configurable search paths. Here's an example:: From 9fbef265bf2fc3ca6b72a2a7eeafaeda22964f0d Mon Sep 17 00:00:00 2001 From: "Michael J. Schultz" Date: Fri, 28 Mar 2014 08:28:37 -0500 Subject: [PATCH 580/772] Add Amazon SNS notification plugin - Supports a default message or a special message for any of SNS's protocols (email, sqs, sms, http, https) - http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html --- library/notification/sns | 185 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 library/notification/sns diff --git a/library/notification/sns b/library/notification/sns new file mode 100644 index 00000000000..405759f9f2b --- /dev/null +++ b/library/notification/sns @@ -0,0 +1,185 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Michael J. Schultz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: sns +short_description: Send Amazon Simple Notification Service (SNS) messages +description: + - The M(sns) module sends notifications to a topic on your Amazon SNS account +version_added: 1.6 +author: Michael J. Schultz +options: + msg: + description: + - Default message to send. + required: true + aliases: [ "default" ] + subject: + description: + - Subject line for email delivery. + required: false + topic: + description: + - The topic you want to publish to. + required: true + email: + description: + - Message to send to email-only subscription + required: false + sqs: + description: + - Message to send to SQS-only subscription + required: false + sms: + description: + - Message to send to SMS-only subscription + required: false + http: + description: + - Message to send to HTTP-only subscription + required: false + https: + description: + - Message to send to HTTPS-only subscription + required: false + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key'] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key'] + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + +requirements: [ "boto" ] +author: Michael J. Schultz +""" + +EXAMPLES = """ +- name: Send default notification message via SNS + local_action: + module: sns + msg: "{{ inventory_hostname }} has completed the play." + subject: "Deploy complete!" + topic: "deploy" + +- name: Send notification messages via SNS with short message for SMS + local_action: + module: sns + msg: "{{ inventory_hostname }} has completed the play." + sms: "deployed!" + subject: "Deploy complete!" + topic: "deploy" +""" + +import sys + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +try: + import boto + import boto.sns +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +def arn_topic_lookup(connection, short_topic): + response = connection.get_all_topics() + result = response[u'ListTopicsResponse'][u'ListTopicsResult'] + # topic names cannot have colons, so this captures the full topic name + lookup_topic = ':{}'.format(short_topic) + for topic in result[u'Topics']: + if topic[u'TopicArn'].endswith(lookup_topic): + return topic[u'TopicArn'] + return None + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + msg=dict(type='str', required=True, aliases=['default']), + subject=dict(type='str', default=None), + topic=dict(type='str', required=True), + email=dict(type='str', default=None), + sqs=dict(type='str', default=None), + sms=dict(type='str', default=None), + http=dict(type='str', default=None), + https=dict(type='str', default=None), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + msg = module.params['msg'] + subject = module.params['subject'] + topic = module.params['topic'] + email = module.params['email'] + sqs = module.params['sqs'] + sms = module.params['sms'] + http = module.params['http'] + https = module.params['https'] + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + try: + connection = connect_to_aws(boto.sns, region, **aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + # .publish() takes full ARN topic id, but I'm lazy and type shortnames + # so do a lookup (topics cannot contain ':', so thats the decider) + if ':' in topic: + arn_topic = topic + else: + arn_topic = arn_topic_lookup(connection, topic) + + if not arn_topic: + module.fail_json(msg='Could not find topic: {}'.format(topic)) + + dict_msg = {'default': msg} + if email: + dict_msg.update(email=email) + if sqs: + dict_msg.update(sqs=sqs) + if sms: + dict_msg.update(sms=sms) + if http: + dict_msg.update(http=http) + if https: + dict_msg.update(https=https) + + json_msg = json.dumps(dict_msg) + connection.publish(topic=arn_topic, subject=subject, + message_structure='json', message=json_msg) + + module.exit_json(msg="OK") + +main() From 7bba229838354b472d7fdd735a01f0e223a89750 Mon Sep 17 00:00:00 2001 From: Joshua Conner Date: Mon, 17 Mar 2014 18:16:06 -0700 Subject: [PATCH 581/772] docker bugfixes: trim whitespace around list param elements, handle list params being coerced to int or long type --- library/cloud/docker | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/cloud/docker b/library/cloud/docker index 0248f5992af..f043e11c4cf 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -395,8 +395,10 @@ class DockerManager: """ param_list = self.module.params.get(param_name) if not isinstance(param_list, list): - param_list = param_list.split(delimiter) - return param_list + # if param_list is a number, like 3333, this will fail, so we coerce to a str first + param_list = str(param_list).split(delimiter) + # whitespace in between commas will cause problems if we don't strip each param + return [param.strip() for param in param_list] def get_exposed_ports(self, expose_list): From 7a6c60b437a38e7d56d8dc1c5113c4720af231bf Mon Sep 17 00:00:00 2001 From: Joshua Conner Date: Tue, 18 Mar 2014 13:13:12 -0700 Subject: [PATCH 582/772] docker: use type instead of manually casting strings to lists --- library/cloud/docker | 46 +++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/library/cloud/docker b/library/cloud/docker index f043e11c4cf..2954811483d 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -345,7 +345,7 @@ class DockerManager: if self.module.params.get('volumes'): self.binds = {} self.volumes = {} - vols = self.parse_list_from_param('volumes') + vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) @@ -359,48 +359,32 @@ class DockerManager: self.lxc_conf = None if self.module.params.get('lxc_conf'): self.lxc_conf = [] - options = self.parse_list_from_param('lxc_conf') + options = self.module.params.get('lxc_conf') for option in options: parts = option.split(':') self.lxc_conf.append({"Key": parts[0], "Value": parts[1]}) self.exposed_ports = None if self.module.params.get('expose'): - expose = self.parse_list_from_param('expose') - self.exposed_ports = self.get_exposed_ports(expose) + self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose')) self.port_bindings = None if self.module.params.get('ports'): - ports = self.parse_list_from_param('ports') - self.port_bindings = self.get_port_bindings(ports) + self.port_bindings = self.get_port_bindings(self.module.params.get('ports')) self.links = None if self.module.params.get('links'): - links = self.parse_list_from_param('links') - self.links = dict(map(lambda x: x.split(':'), links)) + self.links = dict(map(lambda x: x.split(':'), self.module.params.get('links'))) self.env = None if self.module.params.get('env'): - env = self.parse_list_from_param('env') - self.env = dict(map(lambda x: x.split("="), env)) + self.env = dict(map(lambda x: x.split("="), self.module.params.get('env'))) # connect to docker server docker_url = urlparse(module.params.get('docker_url')) self.client = docker.Client(base_url=docker_url.geturl()) - def parse_list_from_param(self, param_name, delimiter=','): - """ - Get a list from a module parameter, whether it's specified as a delimiter-separated string or is already in list form. - """ - param_list = self.module.params.get(param_name) - if not isinstance(param_list, list): - # if param_list is a number, like 3333, this will fail, so we coerce to a str first - param_list = str(param_list).split(delimiter) - # whitespace in between commas will cause problems if we don't strip each param - return [param.strip() for param in param_list] - - def get_exposed_ports(self, expose_list): """ Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. @@ -425,7 +409,9 @@ class DockerManager: """ binds = {} for port in ports: - parts = port.split(':') + # ports could potentially be an array like [80, 443], so we make sure they're strings + # before splitting + parts = str(port).split(':') container_port = parts[-1] if '/' not in container_port: container_port = int(parts[-1]) @@ -634,12 +620,12 @@ def main(): count = dict(default=1), image = dict(required=True), command = dict(required=False, default=None), - expose = dict(required=False, default=None), - ports = dict(required=False, default=None), + expose = dict(required=False, default=None, type='list'), + ports = dict(required=False, default=None, type='list'), publish_all_ports = dict(default=False, type='bool'), - volumes = dict(default=None), + volumes = dict(default=None, type='list'), volumes_from = dict(default=None), - links = dict(default=None), + links = dict(default=None, type='list'), memory_limit = dict(default=0), memory_swap = dict(default=0), docker_url = dict(default='unix://var/run/docker.sock'), @@ -647,16 +633,16 @@ def main(): password = dict(), email = dict(), hostname = dict(default=None), - env = dict(), + env = dict(type='list'), dns = dict(), detach = dict(default=True, type='bool'), state = dict(default='present', choices=['absent', 'present', 'stopped', 'killed', 'restarted']), debug = dict(default=False, type='bool'), privileged = dict(default=False, type='bool'), - lxc_conf = dict(default=None), - name = dict(default=None), stdin_open = dict(default=False, type='bool'), tty = dict(default=False, type='bool'), + lxc_conf = dict(default=None, type='list'), + name = dict(default=None) ) ) From a6b25e8657d73107b5333c73919befe117eb99c2 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 11:18:29 -0400 Subject: [PATCH 583/772] Allow type=list module arguments to accept integers and floats. --- lib/ansible/module_utils/basic.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 7f7054fc478..9246fd1df7b 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -685,6 +685,8 @@ class AnsibleModule(object): if not isinstance(value, list): if isinstance(value, basestring): self.params[k] = value.split(",") + elif isinstance(value, int) or isinstance(value, float): + self.params[k] = [ str(value) ] else: is_invalid = True elif wanted == 'dict': From 04292e53027be82af75743b0920a17e2766fb2d5 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 11:30:13 -0400 Subject: [PATCH 584/772] Changelog update for new modules. --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46a0f212ef9..0cfc9d86ac2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,8 @@ New Modules: * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey * cloud: rax_identity +* cloud: rax_cbs (cloud block storage) +* cloud: rax_cbs_attachments * cloud: ec2_asg (configure autoscaling groups) * cloud: ec2_scaling_policy * cloud: ec2_metric_alarm From 0e8c7b1c0356b275df32903b317ae91998420a25 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 11:41:35 -0400 Subject: [PATCH 585/772] Scrub choices=BOOLEANS from remaining core module references. Correct form in argument_spec is type='dict'. --- library/cloud/ec2_elb | 7 +++---- library/cloud/ec2_elb_lb | 6 ++---- library/cloud/ec2_snapshot | 2 +- library/cloud/elasticache | 4 ++-- library/cloud/gce_pd | 5 ++--- library/system/authorized_key | 2 +- library/system/sysctl | 4 ++-- library/system/ufw | 2 +- 8 files changed, 14 insertions(+), 18 deletions(-) diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index c7a4e0d3b3f..4fc6910691b 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -313,12 +313,11 @@ class ElbManager: def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - state={'required': True, - 'choices': ['present', 'absent']}, + state={'required': True, 'type' : 'bool'}, instance_id={'required': True}, ec2_elbs={'default': None, 'required': False, 'type':'list'}, - enable_availability_zone={'default': True, 'required': False, 'choices': BOOLEANS, 'type': 'bool'}, - wait={'required': False, 'choices': BOOLEANS, 'default': True, 'type': 'bool'}, + enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, + wait={'required': False, 'default': True, 'type': 'bool'}, wait_timeout={'requred': False, 'default': 0, 'type': 'int'} ) ) diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb index ca3adc31931..0737b1a087b 100644 --- a/library/cloud/ec2_elb_lb +++ b/library/cloud/ec2_elb_lb @@ -467,11 +467,9 @@ def main(): state={'required': True, 'choices': ['present', 'absent']}, name={'required': True}, listeners={'default': None, 'required': False, 'type': 'list'}, - purge_listeners={'default': True, 'required': False, - 'choices': BOOLEANS, 'type': 'bool'}, + purge_listeners={'default': True, 'required': False, 'type': 'bool'}, zones={'default': None, 'required': False, 'type': 'list'}, - purge_zones={'default': False, 'required': False, - 'choices': BOOLEANS, 'type': 'bool'}, + purge_zones={'default': False, 'required': False, 'type': 'bool'}, security_group_ids={'default': None, 'required': False, 'type': 'list'}, health_check={'default': None, 'required': False, 'type': 'dict'}, ) diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index 0b499e47765..8673525dfe0 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -124,7 +124,7 @@ def main(): ec2_url = dict(), ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), - wait = dict(choices=BOOLEANS, default='true'), + wait = dict(type='bool', default='true'), wait_timeout = dict(type='number', default=0), snapshot_tags = dict(type='dict', default=dict()), ) diff --git a/library/cloud/elasticache b/library/cloud/elasticache index c506b4719c8..54bf734d204 100644 --- a/library/cloud/elasticache +++ b/library/cloud/elasticache @@ -489,8 +489,8 @@ def main(): security_group_ids={'required': False, 'default': [], 'type': 'list'}, zone={'required': False, 'default': None}, - wait={'required': False, 'choices': BOOLEANS, 'default': True}, - hard_modify={'required': False, 'choices': BOOLEANS, 'default': False} + wait={'required': False, 'type' : 'bool', 'default': True}, + hard_modify={'required': False, 'type': 'bool', 'default': False} ) ) diff --git a/library/cloud/gce_pd b/library/cloud/gce_pd index a8e631a5522..04083aa89eb 100644 --- a/library/cloud/gce_pd +++ b/library/cloud/gce_pd @@ -127,10 +127,9 @@ except ImportError: def main(): module = AnsibleModule( argument_spec = dict( - detach_only = dict(choice=BOOLEANS), + detach_only = dict(type='bool'), instance_name = dict(), - mode = dict(default='READ_ONLY', - choices=['READ_WRITE', 'READ_ONLY']), + mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']), name = dict(required=True), size_gb = dict(default=10), state = dict(default='present'), diff --git a/library/system/authorized_key b/library/system/authorized_key index 1a7c8b97b0e..ac81c39d896 100644 --- a/library/system/authorized_key +++ b/library/system/authorized_key @@ -165,7 +165,7 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): uid = user_entry.pw_uid gid = user_entry.pw_gid - if manage_dir in BOOLEANS_TRUE: + if manage_dir: if not os.path.exists(sshdir): os.mkdir(sshdir, 0700) if module.selinux_enabled(): diff --git a/library/system/sysctl b/library/system/sysctl index ab1da5e0959..fd6960f2228 100644 --- a/library/system/sysctl +++ b/library/system/sysctl @@ -185,9 +185,9 @@ class SysctlModule(object): def _parse_value(self, value): if value is None: return '' - elif value.lower() in BOOLEANS_TRUE: + elif value.lower(): return '1' - elif value.lower() in BOOLEANS_FALSE: + elif not value.lower(): return '0' else: return value.strip() diff --git a/library/system/ufw b/library/system/ufw index 07f51d853bc..73dc9e8974b 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -165,7 +165,7 @@ def main(): default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), logging = dict(default=None, choises=['on', 'off', 'low', 'medium', 'high', 'full']), direction = dict(default=None, choises=['in', 'incoming', 'out', 'outgoing']), - delete = dict(default=False, choices=BOOLEANS), + delete = dict(default=False, type='bool'), insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), interface = dict(default=None, aliases=['if']), From c8a810df78e2fb72985410dd9eec95c1c73e498b Mon Sep 17 00:00:00 2001 From: "Michael J. Schultz" Date: Fri, 28 Mar 2014 11:08:50 -0500 Subject: [PATCH 586/772] Add some failure handling for `region` and `publish` --- library/notification/sns | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/library/notification/sns b/library/notification/sns index 405759f9f2b..f2ed178554e 100644 --- a/library/notification/sns +++ b/library/notification/sns @@ -149,6 +149,8 @@ def main(): https = module.params['https'] region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region must be specified") try: connection = connect_to_aws(boto.sns, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: @@ -177,8 +179,11 @@ def main(): dict_msg.update(https=https) json_msg = json.dumps(dict_msg) - connection.publish(topic=arn_topic, subject=subject, - message_structure='json', message=json_msg) + try: + connection.publish(topic=arn_topic, subject=subject, + message_structure='json', message=json_msg) + except boto.exception.BotoServerError, e: + module.fail_json(msg=str(e)) module.exit_json(msg="OK") From 7f7297f9ca2c1cfc394cc927ecedc89b3fcfa9b3 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 12:53:36 -0400 Subject: [PATCH 587/772] Rename vgoptions to vg_options. --- library/system/lvg | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/system/lvg b/library/system/lvg index 48d1fd3d4e9..906e13d6469 100644 --- a/library/system/lvg +++ b/library/system/lvg @@ -41,12 +41,12 @@ options: - The size of the physical extent in megabytes. Must be a power of 2. default: 4 required: false - vgoptions: + vg_options: description: - Additional options to pass to C(vgcreate) when creating the volume group. default: null required: false - version_added: "1.5" + version_added: "1.6" state: choices: [ "present", "absent" ] default: present @@ -105,7 +105,7 @@ def main(): vg=dict(required=True), pvs=dict(type='list'), pesize=dict(type='int', default=4), - vgoptions=dict(), + vg_options=dict(), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), ), @@ -116,7 +116,7 @@ def main(): state = module.params['state'] force = module.boolean(module.params['force']) pesize = module.params['pesize'] - vgoptions = module.params.get('vgoptions', '').split() + vgoptions = module.params.get('vg_options', '').split() if module.params['pvs']: dev_string = ' '.join(module.params['pvs']) From 11895fa43734ce246a7bf64746df618540251c1f Mon Sep 17 00:00:00 2001 From: addshore Date: Mon, 20 Jan 2014 00:54:53 +0100 Subject: [PATCH 588/772] Add --remote to git submodule_update This simply adds --remote to the git submodule update command. This means that if a branch is defined in .gitmodules then we should track said branch when updating. --- library/source_control/git | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/source_control/git b/library/source_control/git index 29dd8489f68..8acd817b8ed 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -376,8 +376,8 @@ def submodule_update(git_path, module, dest): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] - (rc, out, err) = module.run_command(cmd, cwd=dest) + cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] + (rc, out, err) = module.run_command(cmd) if rc != 0: module.fail_json(msg="Failed to init/update submodules") return (rc, out, err) From 78e5f6129f7d8c5d86771a804806b821f2ff4c36 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 13:07:13 -0400 Subject: [PATCH 589/772] Documentation updates for supervisor module changes. --- library/web_infrastructure/supervisorctl | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/library/web_infrastructure/supervisorctl b/library/web_infrastructure/supervisorctl index a53a93d22ee..2d458169e76 100644 --- a/library/web_infrastructure/supervisorctl +++ b/library/web_infrastructure/supervisorctl @@ -30,48 +30,50 @@ version_added: "0.7" options: name: description: - - The name of the supervisord program/group to manage. It will be taken as group name when it end with a colon I(:). + - The name of the supervisord program or group to manage. + - The name will be taken as group name when it ends with a colon I(:) + - Group support is only available in Ansible version 1.6 or later. required: true default: null config: description: - - configuration file path, passed as -c to supervisorctl. + - The supervisor configuration file path required: false default: null version_added: "1.3" server_url: description: - - URL on which supervisord server is listening, passed as -s to supervisorctl. + - URL on which supervisord server is listening required: false default: null version_added: "1.3" username: description: - - username to use for authentication with server, passed as -u to supervisorctl. + - username to use for authentication required: false default: null version_added: "1.3" password: description: - - password to use for authentication with server, passed as -p to supervisorctl. + - password to use for authentication required: false default: null version_added: "1.3" state: description: - - The desired state of program/group. Affected programs' name will be returned in I(affected) field of the result. + - The desired state of program/group. required: true default: null choices: [ "present", "started", "stopped", "restarted" ] supervisorctl_path: description: - - Path to supervisorctl executable to use. + - path to supervisorctl executable required: false default: null version_added: "1.4" notes: - - When C(state) = I(present), will call C(supervisorctl reread) then call C(supervisorctl add) if the program/group is not exists. - - When C(state) = I(restarted), will call C(supervisorctl update) then call C(supervisorctl restart). + - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. + - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). requirements: [ "supervisorctl" ] author: Matt Wright, Aaron Wang ''' From e1a1ac1e76320a4c7202890f62777c283e36c2c6 Mon Sep 17 00:00:00 2001 From: Bob Zoller Date: Wed, 18 Dec 2013 11:34:08 -0800 Subject: [PATCH 590/772] gem module supports prerelease (--pre) --- library/packaging/gem | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/library/packaging/gem b/library/packaging/gem index cf64fea8677..643f38d8db4 100644 --- a/library/packaging/gem +++ b/library/packaging/gem @@ -67,6 +67,12 @@ options: description: - Version of the gem to be installed/removed. required: false + prerelease: + description: + - Allow installation of prerelease versions of the gem. + required: false + default: "no" + version_added: "1.6" author: Johan Wiren ''' @@ -174,6 +180,8 @@ def install(module): cmd.append('--user-install') else: cmd.append('--no-user-install') + if module.params['prerelease']: + cmd.append('--pre') cmd.append('--no-rdoc') cmd.append('--no-ri') cmd.append(module.params['gem_source']) @@ -190,6 +198,7 @@ def main(): repository = dict(required=False, aliases=['source'], type='str'), state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), + prerelease = dict(required=False, default=False, type='bool'), version = dict(required=False, type='str'), ), supports_check_mode = True, From 5f14b6e637ca833e32d7dfa1426d78ac502bf50e Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 13:08:46 -0400 Subject: [PATCH 591/772] Rename prerelease to pre_release. --- library/packaging/gem | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/packaging/gem b/library/packaging/gem index 643f38d8db4..0d1a157a1f4 100644 --- a/library/packaging/gem +++ b/library/packaging/gem @@ -67,9 +67,9 @@ options: description: - Version of the gem to be installed/removed. required: false - prerelease: + pre_release: description: - - Allow installation of prerelease versions of the gem. + - Allow installation of pre-release versions of the gem. required: false default: "no" version_added: "1.6" @@ -180,7 +180,7 @@ def install(module): cmd.append('--user-install') else: cmd.append('--no-user-install') - if module.params['prerelease']: + if module.params['pre_release']: cmd.append('--pre') cmd.append('--no-rdoc') cmd.append('--no-ri') @@ -198,7 +198,7 @@ def main(): repository = dict(required=False, aliases=['source'], type='str'), state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), - prerelease = dict(required=False, default=False, type='bool'), + pre_release = dict(required=False, default=False, type='bool'), version = dict(required=False, type='str'), ), supports_check_mode = True, From a4372eafaa9a0f566930b7bf256e77818f041aac Mon Sep 17 00:00:00 2001 From: Alex Coomans Date: Mon, 9 Dec 2013 22:08:01 -0600 Subject: [PATCH 592/772] Add the dnsimple module --- library/net_infrastructure/dnsimple | 299 ++++++++++++++++++++++++++++ 1 file changed, 299 insertions(+) create mode 100755 library/net_infrastructure/dnsimple diff --git a/library/net_infrastructure/dnsimple b/library/net_infrastructure/dnsimple new file mode 100755 index 00000000000..2d993611463 --- /dev/null +++ b/library/net_infrastructure/dnsimple @@ -0,0 +1,299 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: dnsimple +version_added: "1.6" +short_description: Interface with dnsimple.com (a DNS hosting service). +description: + - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)" +options: + account_email: + description: + - "Account email. If ommitted, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)" + required: false + default: null + + account_api_token: + description: + - Account API token. See I(account_email) for info. + required: false + default: null + + domain: + description: + - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If ommitted, a list of domains will be returned. + - If domain is present but the domain doesn't exist, it will be created. + required: false + default: null + + record: + description: + - Record to add, if blank a record for the domain will be created, supports the wildcard (*) + required: false + default: null + + record_ids: + description: + - List of records to ensure they either exist or don't exist + required: false + default: null + + type: + description: + - The type of DNS record to create + required: false + choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ] + default: null + + ttl: + description: + - The TTL to give the new record + required: false + default: 3600 (one hour) + + value: + description: + - Record value + - "Must be specified when trying to ensure a record exists" + required: false + default: null + + priority: + description: + - Record priority + required: false + default: null + + state: + description: + - whether the record should exist or not + required: false + choices: [ 'present', 'absent' ] + default: null + + solo: + description: + - Whether the record should be the only one for that record type and record name. Only use with state=present on a record + required: false + default: null + +requirements: [ dnsimple ] +author: Alex Coomans +''' + +EXAMPLES = ''' +# authenicate using email and API token +- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken + +# fetch all domains +- local_action dnsimple + register: domains + +# fetch my.com domain records +- local_action: dnsimple domain=my.com state=present + register: records + +# delete a domain +- local_action: dnsimple domain=my.com state=absent + +# create a test.my.com A record to point to 127.0.0.01 +- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1 + register: record + +# and then delete it +- local_action: dnsimple domain=my.com record_ids={{ record['id'] }} + +# create a my.com CNAME record to example.com +- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present + +# change it's ttl +- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present + +# and delete the record +- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent + +''' + +try: + from dnsimple import DNSimple + from dnsimple.dnsimple import DNSimpleException +except ImportError: + print "failed=True msg='dnsimple required for this module'" + sys.exit(1) + +def main(): + module = AnsibleModule( + argument_spec = dict( + account_email = dict(required=False), + account_api_token = dict(required=False, no_log=True), + domain = dict(required=False), + record = dict(required=False), + record_ids = dict(required=False, type='list'), + type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), + ttl = dict(required=False, default=3600, type='int'), + value = dict(required=False), + priority = dict(required=False, type='int'), + state = dict(required=False, choices=['present', 'absent']), + solo = dict(required=False, type='bool'), + ), + required_together = ( + ['record', 'value'] + ), + supports_check_mode = True, + ) + + account_email = module.params.get('account_email') + account_api_token = module.params.get('account_api_token') + domain = module.params.get('domain') + record = module.params.get('record') + record_ids = module.params.get('record_ids') + record_type = module.params.get('type') + ttl = module.params.get('ttl') + value = module.params.get('value') + priority = module.params.get('priority') + state = module.params.get('state') + is_solo = module.params.get('solo') + + if account_email and account_api_token: + client = DNSimple(email=account_email, api_token=account_api_token) + else: + client = DNSimple() + + try: + # Let's figure out what operation we want to do + + # No domain, return a list + if not domain: + domains = client.domains() + module.exit_json(changed=False, result=[d['domain'] for d in domains]) + + # Domain & No record + if domain and record is None and not record_ids: + domains = [d['domain'] for d in client.domains()] + if domain.isdigit(): + dr = next((d for d in domains if d['id'] == int(domain)), None) + else: + dr = next((d for d in domains if d['name'] == domain), None) + if state == 'present': + if dr: + module.exit_json(changed=False, result=dr) + else: + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.add_domain(domain)['domain']) + elif state == 'absent': + if dr: + if not module.check_mode: + client.delete(domain) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.fail_json(msg="'%s' is an unknown value for the state argument" % state) + + # need the not none check since record could be an empty string + if domain and record is not None: + records = [r['record'] for r in client.records(str(domain))] + + if not record_type: + module.fail_json(msg="Missing the record type") + + if not value: + module.fail_json(msg="Missing the record value") + + rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None) + + if state == 'present': + changed = False + if is_solo: + # delete any records that have the same name and record type + same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type] + if rr: + same_type = [rid for rid in same_type if rid != rr['id']] + if same_type: + if not module.check_mode: + for rid in same_type: + client.delete_record(str(domain), rid) + changed = True + if rr: + # check if we need to update + if rr['ttl'] != ttl or rr['prio'] != priority: + data = {} + if ttl: data['ttl'] = ttl + if priority: data['prio'] = priority + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) + else: + module.exit_json(changed=changed, result=rr) + else: + # create it + data = { + 'name': record, + 'record_type': record_type, + 'content': value, + } + if ttl: data['ttl'] = ttl + if priority: data['prio'] = priority + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) + elif state == 'absent': + if rr: + if not module.check_mode: + client.delete_record(str(domain), rr['id']) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.fail_json(msg="'%s' is an unknown value for the state argument" % state) + + # Make sure these record_ids either all exist or none + if domain and record_ids: + current_records = [str(r['record']['id']) for r in client.records(str(domain))] + wanted_records = [str(r) for r in record_ids] + if state == 'present': + difference = list(set(wanted_records) - set(current_records)) + if difference: + module.fail_json(msg="Missing the following records: %s" % difference) + else: + module.exit_json(changed=False) + elif state == 'absent': + difference = list(set(wanted_records) & set(current_records)) + if difference: + if not module.check_mode: + for rid in difference: + client.delete_record(str(domain), rid) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.fail_json(msg="'%s' is an unknown value for the state argument" % state) + + except DNSimpleException, e: + module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) + + module.fail_json(msg="Unknown what you wanted me to do") + +# import module snippets +from ansible.module_utils.basic import * + +main() From 2de398b4175f776b087700b20c69727f3d039fd8 Mon Sep 17 00:00:00 2001 From: Alex Coomans Date: Tue, 10 Dec 2013 21:55:18 -0600 Subject: [PATCH 593/772] Allow setting env variables over .dnsimple file --- library/net_infrastructure/dnsimple | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/net_infrastructure/dnsimple b/library/net_infrastructure/dnsimple index 2d993611463..5bb53198945 100755 --- a/library/net_infrastructure/dnsimple +++ b/library/net_infrastructure/dnsimple @@ -24,7 +24,7 @@ description: options: account_email: description: - - "Account email. If ommitted, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)" + - "Account email. If ommitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)" required: false default: null @@ -129,6 +129,7 @@ EXAMPLES = ''' ''' +import os try: from dnsimple import DNSimple from dnsimple.dnsimple import DNSimpleException @@ -171,6 +172,8 @@ def main(): if account_email and account_api_token: client = DNSimple(email=account_email, api_token=account_api_token) + elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): + client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) else: client = DNSimple() From 40c2e7646cb05319ee1b97ec80ddf5993402e95c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 13:13:04 -0400 Subject: [PATCH 594/772] Changelog updates --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0cfc9d86ac2..4884b525833 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ New Modules: * system: alternatives * system: capabilities * net_infrastructure: bigip_facts +* net_infrastructure: dnssimple * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey * cloud: rax_identity From dfbe35d4be52cb12d6860f1563d69f07a37495ff Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 13:18:49 -0400 Subject: [PATCH 595/772] Rename apt-rpm to apt_rpm. --- CHANGELOG.md | 1 + library/packaging/{apt-rpm => apt_rpm} | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) rename library/packaging/{apt-rpm => apt_rpm} (95%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4884b525833..0f05bb279e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ New Modules: * packaging: composer (PHP) * packaging: homebrew_tap (OS X) * packaging: homebrew_cask (OS X) +* packaging: apt_rpm * notification: nexmo (SMS) * notification: twilio (SMS) * notification: slack (Slack.com) diff --git a/library/packaging/apt-rpm b/library/packaging/apt_rpm similarity index 95% rename from library/packaging/apt-rpm rename to library/packaging/apt_rpm index e8302f1bd02..0eca3132224 100755 --- a/library/packaging/apt-rpm +++ b/library/packaging/apt_rpm @@ -21,8 +21,8 @@ DOCUMENTATION = ''' --- -module: apt-rpm -short_description: apt-rpm package manager +module: apt_rpm +short_description: apt_rpm package manager description: - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. version_added: "1.5" @@ -50,13 +50,13 @@ notes: [] EXAMPLES = ''' # install package foo -- apt-rpm: pkg=foo state=present +- apt_rpm: pkg=foo state=present # remove package foo -- apt-rpm: pkg=foo state=absent +- apt_rpm: pkg=foo state=absent # description: remove packages foo and bar -- apt-rpm: pkg=foo,bar state=absent +- apt_rpm: pkg=foo,bar state=absent # description: update the package database and install bar (bar will be the updated if a newer version exists) -- apt-rpm: name=bar state=present update_cache=yes +- apt_rpm: name=bar state=present update_cache=yes ''' From 5424c933c5c0e400ff1848115f4191811081b986 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 13:37:43 -0400 Subject: [PATCH 596/772] Update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f05bb279e0..d242b1bb9a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ New Modules: * notification: twilio (SMS) * notification: slack (Slack.com) * notification: typetalk (Typetalk.in) +* notification: sns (Amazon) * system: debconf * system: ufw * system: locale_gen From 5b5ab78183a4df8bfcfd81079ce8f87ee4033983 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 13:51:22 -0400 Subject: [PATCH 597/772] Add version_added to docs. --- library/cloud/ec2_group | 1 + 1 file changed, 1 insertion(+) diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index bf40e7b83b7..12a49fe0d82 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -30,6 +30,7 @@ options: description: - List of firewall outbound rules to enforce in this group (see example). required: false + version_added: "1.6" region: description: - the EC2 region to use From f9ec53cdef636d9989c8ceb69d00ae00c6e8058a Mon Sep 17 00:00:00 2001 From: "Andrea.Mandolo" Date: Wed, 5 Mar 2014 12:17:36 +0100 Subject: [PATCH 598/772] Add "checksum" option support to 'synchronize' module --- library/files/synchronize | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/library/files/synchronize b/library/files/synchronize index 3409c6f0300..d4b174f4259 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -49,6 +49,12 @@ options: choices: [ 'yes', 'no' ] default: 'yes' required: false + checksum: + description: + - Skip based on checksum, not mod-time & size; Pay attention that "archive" option is enable by default: "checksum" may not work as you would like. + choices: [ 'yes', 'no' ] + default: 'no' + required: false existing_only: description: - Skip creating new files on receiver. @@ -148,6 +154,9 @@ synchronize: src=some/relative/path dest=/some/absolute/path archive=no # Synchronization with --archive options enabled except for --recursive synchronize: src=some/relative/path dest=/some/absolute/path recursive=no +# Synchronization with --archive options enabled except for --times, with --checksum option enabled +synchronize: src=some/relative/path dest=/some/absolute/path checksum=yes times=no + # Synchronization without --archive options enabled except use --links synchronize: src=some/relative/path dest=/some/absolute/path archive=no links=yes @@ -186,6 +195,7 @@ def main(): private_key = dict(default=None), rsync_path = dict(default=None), archive = dict(default='yes', type='bool'), + checksum = dict(default='no', type='bool'), existing_only = dict(default='no', type='bool'), dirs = dict(default='no', type='bool'), recursive = dict(type='bool'), @@ -210,6 +220,7 @@ def main(): rsync = module.params.get('local_rsync_path', 'rsync') rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout') archive = module.params['archive'] + checksum = module.params['checksum'] existing_only = module.params['existing_only'] dirs = module.params['dirs'] # the default of these params depends on the value of archive @@ -228,6 +239,8 @@ def main(): cmd = cmd + ' --delete-after' if existing_only: cmd = cmd + ' --existing' + if checksum: + cmd = cmd + ' --checksum' if archive: cmd = cmd + ' --archive' if recursive is False: From c039dbe300d93e55eb641fb9ce34e53d2dc942a6 Mon Sep 17 00:00:00 2001 From: "Andrea.Mandolo" Date: Mon, 10 Mar 2014 17:27:05 +0100 Subject: [PATCH 599/772] Add STDOUT lines in response of "synchronize" module --- library/files/synchronize | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/library/files/synchronize b/library/files/synchronize index d4b174f4259..29b169b870c 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -304,8 +304,11 @@ def main(): return module.fail_json(msg=err, rc=rc, cmd=cmdstr) else: changed = changed_marker in out - return module.exit_json(changed=changed, msg=out.replace(changed_marker,''), - rc=rc, cmd=cmdstr) + out_clean=out.replace(changed_marker,'') + out_lines=out_clean.split('\n') + while '' in out_lines: out_lines.remove('') + return module.exit_json(changed=changed, msg=out_clean, + rc=rc, cmd=cmdstr, stdout_lines=out_lines) # import module snippets from ansible.module_utils.basic import * From 838b914a0ddaabe36b4f74ba3b72565e346ff374 Mon Sep 17 00:00:00 2001 From: "Andrea.Mandolo" Date: Mon, 17 Mar 2014 09:26:44 +0100 Subject: [PATCH 600/772] correct documentation comment and version_added in "synchronize" module --- library/files/synchronize | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/files/synchronize b/library/files/synchronize index 29b169b870c..c1bb836af9d 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -51,10 +51,11 @@ options: required: false checksum: description: - - Skip based on checksum, not mod-time & size; Pay attention that "archive" option is enable by default: "checksum" may not work as you would like. + - Skip based on checksum, not mod-time & size; Pay attention that "archive" option is enable by default: "checksum" option will not disable it. choices: [ 'yes', 'no' ] default: 'no' required: false + version_added: "1.6" existing_only: description: - Skip creating new files on receiver. From c1fbf80f70a32823ae11bb811f0aac6c71f29517 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 14:03:24 -0400 Subject: [PATCH 601/772] Slight docs tweak, split lines according to code standards --- library/files/synchronize | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/library/files/synchronize b/library/files/synchronize index c1bb836af9d..2edae8c9e1b 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -51,7 +51,7 @@ options: required: false checksum: description: - - Skip based on checksum, not mod-time & size; Pay attention that "archive" option is enable by default: "checksum" option will not disable it. + - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default: the "checksum" option will not disable it. choices: [ 'yes', 'no' ] default: 'no' required: false @@ -307,7 +307,8 @@ def main(): changed = changed_marker in out out_clean=out.replace(changed_marker,'') out_lines=out_clean.split('\n') - while '' in out_lines: out_lines.remove('') + while '' in out_lines: + out_lines.remove('') return module.exit_json(changed=changed, msg=out_clean, rc=rc, cmd=cmdstr, stdout_lines=out_lines) From e02ae3476fae17cf881fe92cd06f5a235d1601b5 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 14:56:59 -0400 Subject: [PATCH 602/772] Resolve merge. --- library/packaging/pacman | 150 +++++++++++++++++++++------------------ 1 file changed, 79 insertions(+), 71 deletions(-) diff --git a/library/packaging/pacman b/library/packaging/pacman index 46b7f4c755f..5bf2d931e6e 100644 --- a/library/packaging/pacman +++ b/library/packaging/pacman @@ -1,82 +1,82 @@ #!/usr/bin/python -tt # -*- coding: utf-8 -*- -# (c) 2012, Afterburn -# Written by Afterburn -# Based on apt module written by Matthew Williams +# (c) 2012, Afterburn +# (c) 2013, Aaron Bull Schaefer # -# This module is free software: you can redistribute it and/or modify +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# This software is distributed in the hope that it will be useful, +# Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this software. If not, see . - +# along with Ansible. If not, see . DOCUMENTATION = ''' --- module: pacman -short_description: Package manager for Archlinux +short_description: Manage packages with I(pacman) description: - - Manages Archlinux packages - + - Manage packages with the I(pacman) package manager, which is used by + Arch Linux and its variants. version_added: "1.0" +author: Afterburn +notes: [] +requirements: [] options: name: description: - - name of package to install, upgrade or remove. - required: true + - Name of the package to install, upgrade, or remove. + required: false + default: null state: description: - - desired state of the package. + - Desired state of the package. required: false - choices: [ "installed", "absent" ] + default: "present" + choices: ["present", "absent"] - update_cache: + recurse: description: - - update the package database first (pacman -Syy). + - When removing a package, also remove its dependencies, provided + that they are not required by other packages and were not + explicitly installed by a user. required: false default: "no" - choices: [ "yes", "no" ] + choices: ["yes", "no"] + version_added: "1.3" - recurse: + update_cache: description: - - remove all not explicitly installed dependencies not required - by other packages of the package to remove + - Whether or not to refresh the master package lists. This can be + run as part of a package installation or as a separate step. required: false default: "no" - choices: [ "yes", "no" ] - version_added: "1.3" - -author: Afterburn -notes: [] + choices: ["yes", "no"] ''' EXAMPLES = ''' # Install package foo -- pacman: name=foo state=installed - -# Remove package foo -- pacman: name=foo state=absent +- pacman: name=foo state=present -# Remove packages foo and bar +# Remove packages foo and bar - pacman: name=foo,bar state=absent # Recursively remove package baz - pacman: name=baz state=absent recurse=yes -# Update the package database (pacman -Syy) and install bar (bar will be the updated if a newer version exists) -- pacman: name=bar, state=installed, update_cache=yes +# Run the equivalent of "pacman -Syy" as a separate step +- pacman: update_cache=yes ''' - import json import shlex import os @@ -85,11 +85,10 @@ import sys PACMAN_PATH = "/usr/bin/pacman" -def query_package(module, name, state="installed"): - +def query_package(module, name, state="present"): # pacman -Q returns 0 if the package is installed, # 1 if it is not installed - if state == "installed": + if state == "present": cmd = "pacman -Q %s" % (name) rc, stdout, stderr = module.run_command(cmd, check_rc=False) @@ -103,16 +102,18 @@ def update_package_db(module): cmd = "pacman -Syy" rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc != 0: + if rc == 0: + return True + else: module.fail_json(msg="could not update package db") - + def remove_packages(module, packages): if module.params["recurse"]: args = "Rs" else: args = "R" - + remove_c = 0 # Using a for loop incase of error, we can report the package that failed for package in packages: @@ -125,7 +126,7 @@ def remove_packages(module, packages): if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) - + remove_c += 1 if remove_c > 0: @@ -136,7 +137,6 @@ def remove_packages(module, packages): def install_packages(module, packages, package_files): - install_c = 0 for i, package in enumerate(packages): @@ -155,7 +155,7 @@ def install_packages(module, packages, package_files): module.fail_json(msg="failed to install %s" % (package)) install_c += 1 - + if install_c > 0: module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) @@ -166,7 +166,7 @@ def check_packages(module, packages, state): would_be_changed = [] for package in packages: installed = query_package(module, package) - if ((state == "installed" and not installed) or + if ((state == "present" and not installed) or (state == "absent" and installed)): would_be_changed.append(package) if would_be_changed: @@ -180,42 +180,50 @@ def check_packages(module, packages, state): def main(): module = AnsibleModule( - argument_spec = dict( - state = dict(default="installed", choices=["installed","absent"]), - update_cache = dict(default="no", aliases=["update-cache"], type='bool'), - recurse = dict(default="no", type='bool'), - name = dict(aliases=["pkg"], required=True)), - supports_check_mode = True) - + argument_spec = dict( + name = dict(aliases=['pkg']), + state = dict(default='present', choices=['present', 'installed', 'absent', 'removed']), + recurse = dict(default='no', choices=BOOLEANS, type='bool'), + update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')), + required_one_of = [['name', 'update_cache']], + supports_check_mode = True) if not os.path.exists(PACMAN_PATH): module.fail_json(msg="cannot find pacman, looking for %s" % (PACMAN_PATH)) p = module.params + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + if p["update_cache"] and not module.check_mode: update_package_db(module) - - pkgs = p["name"].split(",") - - pkg_files = [] - for i, pkg in enumerate(pkgs): - if pkg.endswith('.pkg.tar.xz'): - # The package given is a filename, extract the raw pkg name from - # it and store the filename - pkg_files.append(pkg) - pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1]) - else: - pkg_files.append(None) - - if module.check_mode: - check_packages(module, pkgs, p['state']) - - if p["state"] == "installed": - install_packages(module, pkgs, pkg_files) - - elif p["state"] == "absent": - remove_packages(module, pkgs) + if not p['name']: + module.exit_json(changed=True, msg='updated the package master lists') + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + if pkg.endswith('.pkg.tar.xz'): + # The package given is a filename, extract the raw pkg name from + # it and store the filename + pkg_files.append(pkg) + pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1]) + else: + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] == 'present': + install_packages(module, pkgs, pkg_files) + elif p['state'] == 'absent': + remove_packages(module, pkgs) # import module snippets from ansible.module_utils.basic import * From 8baaf458c794b86096dcbbdcbf652f9cdab809ee Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 14:55:48 -0400 Subject: [PATCH 603/772] Repairs for documentation build. --- library/files/synchronize | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/synchronize b/library/files/synchronize index 2edae8c9e1b..628f1ab5a21 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -51,7 +51,7 @@ options: required: false checksum: description: - - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default: the "checksum" option will not disable it. + - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it. choices: [ 'yes', 'no' ] default: 'no' required: false From 9190ddfcdaf7c3d9769a17b72099a370df140e51 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 15:03:04 -0400 Subject: [PATCH 604/772] Documentation updates / changelog. --- CHANGELOG.md | 1 + library/monitoring/logentries | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d242b1bb9a4..565b49af64c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ New Modules: * packaging: homebrew_tap (OS X) * packaging: homebrew_cask (OS X) * packaging: apt_rpm +* monitoring: logentries * notification: nexmo (SMS) * notification: twilio (SMS) * notification: slack (Slack.com) diff --git a/library/monitoring/logentries b/library/monitoring/logentries index 08a2d18264c..373f4f777ff 100644 --- a/library/monitoring/logentries +++ b/library/monitoring/logentries @@ -20,10 +20,10 @@ DOCUMENTATION = ''' --- module: logentries author: Ivan Vanderbyl -short_description: Log aggregation service +short_description: Module for tracking logs via logentries.com description: - Sends logs to LogEntries in realtime -version_added: "1.0" +version_added: "1.6" options: path: description: @@ -36,7 +36,7 @@ options: required: false default: present notes: - - Requires the LogEntries agent which can be installed following the instructions at logentires.com + - Requires the LogEntries agent which can be installed following the instructions at logentries.com ''' EXAMPLES = ''' - logentries: path=/var/log/nginx/access.log state=present @@ -124,7 +124,7 @@ def main(): elif p["state"] in ["absent", "unfollowed"]: unfollow_log(module, le_path, logs) -# this is magic, see lib/ansible/module_common.py -#<> +# import module snippets +from ansible.module_utils.basic import * main() From bef544dd0fd0a0e043e400066a7f2e4562b3dfa1 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 28 Mar 2014 15:35:52 -0400 Subject: [PATCH 605/772] Did my best to rebase. Now includes the latest changes made to devel, along with my change of adding category option to module --- library/packaging/svr4pkg | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/library/packaging/svr4pkg b/library/packaging/svr4pkg index 58961631b2b..4e790b46c52 100644 --- a/library/packaging/svr4pkg +++ b/library/packaging/svr4pkg @@ -65,6 +65,12 @@ options: default: "all" choices: ["current", "all"] version_added: "1.6" + category: + description: + - Install/Remove category instead of a single package. + required: false + choices: ["true", "false"] + version_added: "1.6" ''' EXAMPLES = ''' @@ -79,15 +85,20 @@ EXAMPLES = ''' # Ensure that a package is not installed. - svr4pkg: name=SUNWgnome-sound-recorder state=absent + +# Ensure that a category is not installed. +- svr4pkg: name=FIREFOX state=absent category=true ''' import os import tempfile -def package_installed(module, name): +def package_installed(module, name, category): cmd = [module.get_bin_path('pkginfo', True)] cmd.append('-q') + if category: + cmd.append('-c') cmd.append(name) rc, out, err = module.run_command(' '.join(cmd)) if rc == 0: @@ -124,7 +135,7 @@ def run_command(module, cmd): cmd[0] = module.get_bin_path(progname, True) return module.run_command(cmd) -def package_install(module, name, src, proxy, response_file, zone): +def package_install(module, name, src, proxy, response_file, zone, category): adminfile = create_admin_file() cmd = [ 'pkgadd', '-n'] if zone == 'current': @@ -134,6 +145,8 @@ def package_install(module, name, src, proxy, response_file, zone): cmd += [ '-x', proxy ] if response_file is not None: cmd += [ '-r', response_file ] + if category: + cmd += [ '-Y' ] cmd.append(name) (rc, out, err) = run_command(module, cmd) os.unlink(adminfile) @@ -141,7 +154,10 @@ def package_install(module, name, src, proxy, response_file, zone): def package_uninstall(module, name, src): adminfile = create_admin_file() - cmd = [ 'pkgrm', '-na', adminfile, name] + if category: + cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ] + else: + cmd = [ 'pkgrm', '-na', adminfile, name] (rc, out, err) = run_command(module, cmd) os.unlink(adminfile) return (rc, out, err) @@ -154,7 +170,8 @@ def main(): src = dict(default = None), proxy = dict(default = None), response_file = dict(default = None), - zone = dict(required=False, default = 'all', choices=['current','all']) + zone = dict(required=False, default = 'all', choices=['current','all']), + category = dict(default=False, type='bool') ), supports_check_mode=True ) @@ -164,6 +181,7 @@ def main(): proxy = module.params['proxy'] response_file = module.params['response_file'] zone = module.params['zone'] + category = module.params['category'] rc = None out = '' err = '' @@ -175,20 +193,20 @@ def main(): if src is None: module.fail_json(name=name, msg="src is required when state=present") - if not package_installed(module, name): + if not package_installed(module, name, category): if module.check_mode: module.exit_json(changed=True) - (rc, out, err) = package_install(module, name, src, proxy, response_file, zone) + (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) # Stdout is normally empty but for some packages can be # very long and is not often useful if len(out) > 75: out = out[:75] + '...' elif state == 'absent': - if package_installed(module, name): + if package_installed(module, name, category): if module.check_mode: module.exit_json(changed=True) - (rc, out, err) = package_uninstall(module, name, src) + (rc, out, err) = package_uninstall(module, name, src, category) out = out[:75] if rc is None: From d89d2432fd77ec1bdb08c0e5280d107b500f55c0 Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Fri, 28 Mar 2014 22:47:46 +0200 Subject: [PATCH 606/772] Bugfix for gc_storage and s3 Make keysum return None if not key_check (this case wasn't covered). --- library/cloud/gc_storage | 11 ++++++----- library/cloud/s3 | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/library/cloud/gc_storage b/library/cloud/gc_storage index 4bbf9eabae7..8696f8e965d 100644 --- a/library/cloud/gc_storage +++ b/library/cloud/gc_storage @@ -152,11 +152,12 @@ def key_check(module, gs, bucket, obj): def keysum(module, gs, bucket, obj): bucket = gs.lookup(bucket) key_check = bucket.get_key(obj) - if key_check: - md5_remote = key_check.etag[1:-1] - etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 - if etag_multipart is True: - module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.") + if not key_check: + return None + md5_remote = key_check.etag[1:-1] + etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 + if etag_multipart is True: + module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.") return md5_remote def bucket_check(module, gs, bucket): diff --git a/library/cloud/s3 b/library/cloud/s3 index aaa2e0f4ffb..715c0e00ab9 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -145,11 +145,12 @@ def key_check(module, s3, bucket, obj): def keysum(module, s3, bucket, obj): bucket = s3.lookup(bucket) key_check = bucket.get_key(obj) - if key_check: - md5_remote = key_check.etag[1:-1] - etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 - if etag_multipart is True: - module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.") + if not key_check: + return None + md5_remote = key_check.etag[1:-1] + etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 + if etag_multipart is True: + module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.") return md5_remote def bucket_check(module, s3, bucket): From 372c4e6d3103c39eb493bb2866156aff552c68d9 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 17:01:15 -0400 Subject: [PATCH 607/772] Changelog and version updates. --- CHANGELOG.md | 1 + library/packaging/layman | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 565b49af64c..b6cb442b0ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ New Modules: * packaging: homebrew_tap (OS X) * packaging: homebrew_cask (OS X) * packaging: apt_rpm +* packaging: layman * monitoring: logentries * notification: nexmo (SMS) * notification: twilio (SMS) diff --git a/library/packaging/layman b/library/packaging/layman index 0f7b986d491..a0b12202812 100644 --- a/library/packaging/layman +++ b/library/packaging/layman @@ -26,7 +26,7 @@ DOCUMENTATION = ''' --- module: layman author: Jakub Jirutka -version_added: "1.4.5" +version_added: "1.6" short_description: Manage Gentoo overlays description: - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. From cc6d4bc3f0b3d2379693a5b8ffada2ba0d357401 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 17:04:31 -0400 Subject: [PATCH 608/772] changelog and version bump --- CHANGELOG.md | 1 + library/monitoring/rollbar_deployment | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b6cb442b0ce..3f2161b68f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ New Modules: * packaging: apt_rpm * packaging: layman * monitoring: logentries +* monitoring: rollbar_deployment * notification: nexmo (SMS) * notification: twilio (SMS) * notification: slack (Slack.com) diff --git a/library/monitoring/rollbar_deployment b/library/monitoring/rollbar_deployment index ee67dc58151..772e78fc5c2 100644 --- a/library/monitoring/rollbar_deployment +++ b/library/monitoring/rollbar_deployment @@ -21,6 +21,7 @@ DOCUMENTATION = ''' --- module: rollbar_deployment +version_added: 1.6 author: Max Riveiro short_description: Notify Rollbar about app deployments description: From c958c301ae03c71bf6ba3567fa978df3b8acf375 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Mon, 28 Oct 2013 13:22:19 +0100 Subject: [PATCH 609/772] added module to enable/disable Apache2 modules This module uses a2enmod or a2dismod to enable or disable Apache2 modules. --- library/web_infrastructure/apache2_module | 97 +++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 library/web_infrastructure/apache2_module diff --git a/library/web_infrastructure/apache2_module b/library/web_infrastructure/apache2_module new file mode 100644 index 00000000000..66058fcb641 --- /dev/null +++ b/library/web_infrastructure/apache2_module @@ -0,0 +1,97 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Christian Berendt +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +# this is magic, see lib/ansible/module.params['common.py +#<> + +DOCUMENTATION = ''' +--- +module: apache2_module +short_description: enables/disables a module of the Apache2 webserver +description: + - Enables or disables a specified module of the Apache2 webserver. +options: + name: + description: + - name of the module to enable/disable + required: true + state: + description: + - indicate the desired state of the resource + choices: ['present', 'absent'] + default: present + +''' + +EXAMPLES = ''' +# enables the Apache2 module "wsgi" +- apache2_module: state=present name=wsgi + +# disables the Apache2 module "wsgi" +- apache2_module: state=absent name wsgi +''' + +def _module_is_enabled(module): + name = module.params['name'] + result, stdout, stderr = module.run_command("a2enmod -q %s" % name) + return result == 0 + +def _module_is_disabled(module): + return _module_is_enabled(module) == False + +def _disable_module(module): + name = module.params['name'] + + if _module_is_disabled(module): + module.exit_json(changed = False, result = "Success") + + result, stdout, stderr = module.run_command("a2dismod %s" % name) + if result != 0: + module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout)) + + module.exit_json(changed = True, result = "Disabled") + +def _enable_module(module): + name = module.params['name'] + + if _module_is_enabled(module): + module.exit_json(changed = False, result = "Success") + + result, stdout, stderr = module.run_command("a2enmod %s" % name) + if result != 0: + module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout)) + + module.exit_json(changed = True, result = "Enabled") + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(default='present', choices=['absent', 'present']) + ), + ) + + if module.params['state'] == 'present': + _enable_module(module) + + if module.params['state'] == 'absent': + _disable_module(module) + +# this is magic, see lib/ansible/module.params['common.py +#<> +main() From b8a8e9fbec76c4c1bf2e681e40ac7f9d22125b3e Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Mon, 28 Oct 2013 13:46:28 +0100 Subject: [PATCH 610/772] removed duplicated <> line --- library/web_infrastructure/apache2_module | 3 --- 1 file changed, 3 deletions(-) diff --git a/library/web_infrastructure/apache2_module b/library/web_infrastructure/apache2_module index 66058fcb641..864d95e7550 100644 --- a/library/web_infrastructure/apache2_module +++ b/library/web_infrastructure/apache2_module @@ -16,9 +16,6 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . -# this is magic, see lib/ansible/module.params['common.py -#<> - DOCUMENTATION = ''' --- module: apache2_module From 9a1c1d3242899147775bbb83fb182bd301bdb06b Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Mon, 25 Nov 2013 12:50:22 +0100 Subject: [PATCH 611/772] use module.get_bin_path() for a2enmod --- library/web_infrastructure/apache2_module | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/library/web_infrastructure/apache2_module b/library/web_infrastructure/apache2_module index 864d95e7550..94e33adcbbd 100644 --- a/library/web_infrastructure/apache2_module +++ b/library/web_infrastructure/apache2_module @@ -45,7 +45,8 @@ EXAMPLES = ''' def _module_is_enabled(module): name = module.params['name'] - result, stdout, stderr = module.run_command("a2enmod -q %s" % name) + a2enmod_binary = module.get_bin_path("a2enmod") + result, stdout, stderr = module.run_command("%s -q %s" % (a2enmod_binary, name)) return result == 0 def _module_is_disabled(module): @@ -69,7 +70,8 @@ def _enable_module(module): if _module_is_enabled(module): module.exit_json(changed = False, result = "Success") - result, stdout, stderr = module.run_command("a2enmod %s" % name) + a2enmod_binary = module.get_bin_path("a2enmod") + result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) if result != 0: module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout)) From 2d116aca5e8f7df419a32ca226b40554477a4235 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 17:11:21 -0400 Subject: [PATCH 612/772] Add version_added info + changelog --- CHANGELOG.md | 1 + library/web_infrastructure/apache2_module | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f2161b68f7..c3b98a1717a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ New Modules: * system: capabilities * net_infrastructure: bigip_facts * net_infrastructure: dnssimple +* web_infrastructure: apache2_module * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey * cloud: rax_identity diff --git a/library/web_infrastructure/apache2_module b/library/web_infrastructure/apache2_module index 94e33adcbbd..73a92f40434 100644 --- a/library/web_infrastructure/apache2_module +++ b/library/web_infrastructure/apache2_module @@ -19,6 +19,7 @@ DOCUMENTATION = ''' --- module: apache2_module +version_added: 1.6 short_description: enables/disables a module of the Apache2 webserver description: - Enables or disables a specified module of the Apache2 webserver. @@ -91,6 +92,7 @@ def main(): if module.params['state'] == 'absent': _disable_module(module) -# this is magic, see lib/ansible/module.params['common.py -#<> +# import module snippets +from ansible.module_utils.basic import * main() + From cb97599a9710e3a6f740f2f9375f5f66bf502072 Mon Sep 17 00:00:00 2001 From: Seth Edwards Date: Fri, 28 Mar 2014 09:57:35 -0700 Subject: [PATCH 613/772] add librato annotation module --- library/monitoring/librato_annotation | 171 ++++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 library/monitoring/librato_annotation diff --git a/library/monitoring/librato_annotation b/library/monitoring/librato_annotation new file mode 100644 index 00000000000..2dc1ea7f002 --- /dev/null +++ b/library/monitoring/librato_annotation @@ -0,0 +1,171 @@ +#!/usr/bin/python + +import base64 + +DOCUMENTATION = ''' +--- +module: librato_annotation +short_description: create an annotation in librato +description: + - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically +version_added: "1.6" +author: Seth Edwards +requirements: + - urllib2 + - base64 +options: + user: + description: + - Librato account username + required: true + default: null + aliases: [] + api_key: + description: + - Librato account api key + required: true + default: null + aliases: [] + name: + description: + - The annotation stream name + - If the annotation stream does not exist, it will be created automatically + required: false + default: null + aliases: [] + title: + description: + - The title of an annotation is a string and may contain spaces + - The title should be a short, high-level summary of the annotation e.g. v45 Deployment + required: true + default: null + aliases: [] + source: + description: + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population + required: false + default: null + aliases: [] + description: + description: + - The description contains extra meta-data about a particular annotation + - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2: shipped new feature foo! + required: false + default: null + aliases: [] + start_time: + description: + - The unix timestamp indicating the the time at which the event referenced by this annotation started + required: false + default: "the current time" + aliases: [] + end_time: + description: + - The unix timestamp indicating the the time at which the event referenced by this annotation ended + - For events that have a duration, this is a useful way to annotate the duration of the event + required: false + default: null + aliases: [] + links: + description: + - Words go here + - that describe + - this option + required: true or false + default: a string or the word null + aliases: [] + + +''' + +EXAMPLES = ''' +# Create a simple annotation event with a source +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + source: 'foo.bar' + description: 'This is a detailed description of the config change' + +# Create an annotation that includes a link +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: 'code.deploy' + title: 'app code deploy' + description: 'this is a detailed description of a deployment' + links: + - { rel: 'example', href: 'http://www.example.com/deploy' } + +# Create an annotation with a start_time and end_time +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: 'maintenance' + title: 'Maintenance window' + description: 'This is a detailed description of maintenance' + start_time: 1395940006 + end_time: 1395954406 +''' + + +try: + import urllib2 + HAS_URLLIB2 = True +except ImportError: + HAS_URLLIB2 = False + +def post_annotation(module): + user = module.params['user'] + api_key = module.params['api_key'] + name = module.params['name'] + title = module.params['title'] + + url = 'https://metrics-api.librato.com/v1/annotations/%s' % name + params = {} + params['title'] = title + + if module.params['source'] != None: + params['source'] = module.params['source'] + if module.params['description'] != None: + params['description'] = module.params['description'] + if module.params['start_time'] != None: + params['start_time'] = module.params['start_time'] + if module.params['end_time'] != None: + params['end_time'] = module.params['end_time'] + if module.params['links'] != None: + params['links'] = module.params['links'] + + json_body = module.jsonify(params) + + headers = {} + headers['Content-Type'] = 'application/json' + headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip() + req = urllib2.Request(url, json_body, headers) + try: + response = urllib2.urlopen(req) + except urllib2.HTTPError as e: + module.fail_json(msg="Request Failed", reason=e.reason) + response = response.read() + module.exit_json(changed=True, annotation=response) + +def main(): + + module = AnsibleModule( + argument_spec = dict( + user = dict(required=True), + api_key = dict(required=True), + name = dict(required=False), + title = dict(required=True), + source = dict(required=False), + description = dict(required=False), + start_time = dict(required=False, default=None, type='int'), + end_time = dict(require=False, default=None, type='int'), + links = dict(type='list') + ) + ) + + post_annotation(module) + +from ansible.module_utils.basic import * +main() From c24d4acb2a80abc4ee14b9a0760ea8c2b13f312c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 17:19:47 -0400 Subject: [PATCH 614/772] Documentation fixes + add missing license header --- library/monitoring/librato_annotation | 46 +++++++++++++-------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/library/monitoring/librato_annotation b/library/monitoring/librato_annotation index 2dc1ea7f002..495a2c16699 100644 --- a/library/monitoring/librato_annotation +++ b/library/monitoring/librato_annotation @@ -1,4 +1,24 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) Seth Edwards, 2014 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + import base64 @@ -18,64 +38,42 @@ options: description: - Librato account username required: true - default: null - aliases: [] api_key: description: - Librato account api key required: true - default: null - aliases: [] name: description: - The annotation stream name - If the annotation stream does not exist, it will be created automatically required: false - default: null - aliases: [] title: description: - The title of an annotation is a string and may contain spaces - The title should be a short, high-level summary of the annotation e.g. v45 Deployment required: true - default: null - aliases: [] source: description: - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population required: false - default: null - aliases: [] description: description: - The description contains extra meta-data about a particular annotation - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2: shipped new feature foo! required: false - default: null - aliases: [] start_time: description: - The unix timestamp indicating the the time at which the event referenced by this annotation started required: false - default: "the current time" - aliases: [] end_time: description: - The unix timestamp indicating the the time at which the event referenced by this annotation ended - For events that have a duration, this is a useful way to annotate the duration of the event required: false - default: null - aliases: [] links: description: - - Words go here - - that describe - - this option - required: true or false - default: a string or the word null - aliases: [] - - + - See examples + required: true ''' EXAMPLES = ''' From 7382e416dd2c676aaae558fb321d7fa223c80a60 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 17:20:06 -0400 Subject: [PATCH 615/772] Add module to changelog. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c3b98a1717a..8597aec3166 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ New Modules: * packaging: layman * monitoring: logentries * monitoring: rollbar_deployment +* monitoring: librato_annotation * notification: nexmo (SMS) * notification: twilio (SMS) * notification: slack (Slack.com) From 8ca70ec4872d7c148b6c7419394c1a3f7298a4c5 Mon Sep 17 00:00:00 2001 From: Andy Hill Date: Fri, 13 Dec 2013 18:44:54 -0600 Subject: [PATCH 616/772] lldp support in Ansible This PR introduces support for a lldp module. lldpd is similar to CDP and can return useful details about a server's network like ports, switches, and VLANs. --- library/net_infrastructure/lldp | 67 +++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100755 library/net_infrastructure/lldp diff --git a/library/net_infrastructure/lldp b/library/net_infrastructure/lldp new file mode 100755 index 00000000000..eb027231d84 --- /dev/null +++ b/library/net_infrastructure/lldp @@ -0,0 +1,67 @@ +#!/usr/bin/python -tt +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import subprocess + +DOCUMENTATION = ''' +--- +module: lldp +short_description: get details reported by lldp +description: + - Reads data out of lldp + +author: Andy Hill +notes: + - Requires lldpd running and lldp enabled on switches +''' + +def gather_lldp(): + cmd = ['lldpctl', '-f', 'keyvalue'] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + (output, err) = proc.communicate() + if output: + output_dict = {} + lldp_entries = output.split("\n") + + for entry in lldp_entries: + if entry: + path, value = entry.strip().split("=", 1) + path = path.split(".") + path_components, final = path[:-1], path[-1] + + current_dict = output_dict + for path_component in path_components: + current_dict[path_component] = current_dict.get(path_component, {}) + current_dict = current_dict[path_component] + current_dict[final] = value + return output_dict + + +def main(): + module = AnsibleModule({}) + + lldp_output = gather_lldp() + try: + data = {'lldp': lldp_output['lldp']} + module.exit_json(ansible_facts=data) + except TypeError: + module.fail_json(msg="lldpctl command failed. is lldpd running?") + +# this is magic, see lib/ansible/module_common.py +#<> + +main() + From 5b5f2e6d45d987a97fb868d60074f36e03c9a27e Mon Sep 17 00:00:00 2001 From: Andy Hill Date: Wed, 18 Dec 2013 07:52:17 -0700 Subject: [PATCH 617/772] lldp: Added EXAMPLES --- library/net_infrastructure/lldp | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/library/net_infrastructure/lldp b/library/net_infrastructure/lldp index eb027231d84..047cd39d0b2 100755 --- a/library/net_infrastructure/lldp +++ b/library/net_infrastructure/lldp @@ -19,15 +19,31 @@ import subprocess DOCUMENTATION = ''' --- module: lldp -short_description: get details reported by lldp +short_description: get details reported by lldp description: - - Reads data out of lldp + - Reads data out of lldpctl author: Andy Hill notes: - Requires lldpd running and lldp enabled on switches ''' +EXAMPLES = ''' +# Retrieve switch/port information + - name: Gather information from lldp + lldp: + + - name: Print each switch/port + debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }} + with_items: lldp.keys() + +# TASK: [Print each switch/port] *********************************************************** +# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} +# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} +# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} + +''' + def gather_lldp(): cmd = ['lldpctl', '-f', 'keyvalue'] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) From ae1b1838552820a6de293ab69ad1574798c1ebfc Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 28 Mar 2014 17:41:48 -0400 Subject: [PATCH 618/772] Add LLDP module --- CHANGELOG.md | 1 + library/net_infrastructure/lldp | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8597aec3166..8b27b880300 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ New Modules: * system: capabilities * net_infrastructure: bigip_facts * net_infrastructure: dnssimple +* net_infrastructure: lldp * web_infrastructure: apache2_module * cloud: digital_ocean_domain * cloud: digital_ocean_sshkey diff --git a/library/net_infrastructure/lldp b/library/net_infrastructure/lldp index 047cd39d0b2..efe4c8ab2ec 100755 --- a/library/net_infrastructure/lldp +++ b/library/net_infrastructure/lldp @@ -19,6 +19,7 @@ import subprocess DOCUMENTATION = ''' --- module: lldp +version_added: 1.6 short_description: get details reported by lldp description: - Reads data out of lldpctl @@ -76,8 +77,7 @@ def main(): except TypeError: module.fail_json(msg="lldpctl command failed. is lldpd running?") -# this is magic, see lib/ansible/module_common.py -#<> - +# import module snippets +from ansible.module_utils.basic import * main() From 6412bdf003a94a7eb60e6aa01e98315ec7a464c2 Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Fri, 28 Mar 2014 19:20:51 -0400 Subject: [PATCH 619/772] Update homebrew module. --- library/packaging/homebrew | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index 12fbf89c0f4..249dee33f09 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -362,16 +362,18 @@ class Homebrew(object): cmd = [ "{brew_path}".format(brew_path=self.brew_path), - "list", - "-m1", + "info", + self.current_package, ] rc, out, err = self.module.run_command(cmd) - packages = [package for package in out.split('\n') if package] - - if rc == 0 and self.current_package in packages: - return True - else: - return False + for line in out.split('\n'): + if ( + re.search(r'Built from source', line) + or re.search(r'Poured from bottle', line) + ): + return True + + return False def _outdated_packages(self): rc, out, err = self.module.run_command([ From 7f6d2a94afd2137bf32bd2fefb3ff0657dc60c1b Mon Sep 17 00:00:00 2001 From: Daniel Jaouen Date: Fri, 28 Mar 2014 19:37:05 -0400 Subject: [PATCH 620/772] Miscellaneous fixes to homebrew module. --- library/packaging/homebrew | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index 249dee33f09..38413fa3c4e 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -539,7 +539,7 @@ class Homebrew(object): cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if not self._current_package_is_outdated(): + if self._current_package_is_installed() and not self._current_package_is_outdated(): self.changed_count += 1 self.changed = True self.message = 'Package upgraded: {0}'.format(self.current_package) From e0f3105ef927069d61f8838e0c3635fb3593227e Mon Sep 17 00:00:00 2001 From: Ahti Kitsik Date: Sat, 29 Mar 2014 11:06:51 +0200 Subject: [PATCH 621/772] Bugfix for ufw to support "logging" properly: It's a separate parameter so updated docs and set it as mutually exclusive param. Also due to an array construction typo it was not working in any situation (ufw LOGLEVEL was passed to cmd instead of ufw logging LOGLEVEL). Also fixed doc and parameters parsing typo ("choises" should be "choices") --- library/system/ufw | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/library/system/ufw b/library/system/ufw index 73dc9e8974b..8496997b279 100644 --- a/library/system/ufw +++ b/library/system/ufw @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# (c) 2014, Ahti Kitsik # (c) 2014, Jarno Keskikangas # (c) 2013, Aleksey Ovcharenko # (c) 2013, James Martin @@ -27,7 +28,7 @@ short_description: Manage firewall with UFW description: - Manage firewall with UFW. version_added: 1.6 -author: Aleksey Ovcharenko, Jarno Keskikangas +author: Aleksey Ovcharenko, Jarno Keskikangas, Ahti Kitsik notes: - See C(man ufw) for more examples. requirements: @@ -65,12 +66,12 @@ options: description: - Add firewall rule required: false - choises: ['allow', 'deny', 'reject', 'limit'] + choices: ['allow', 'deny', 'reject', 'limit'] log: description: - Log new connections matched to this rule required: false - choises: ['yes', 'no'] + choices: ['yes', 'no'] from_ip: description: - Source IP address. @@ -111,7 +112,10 @@ options: EXAMPLES = ''' # Allow everything and enable UFW -ufw: state=enable policy=allow logging=on +ufw: state=enabled policy=allow + +# Set logging +ufw: logging=on # Sometimes it is desirable to let the sender know when traffic is # being denied, rather than simply ignoring it. In these cases, use @@ -163,8 +167,8 @@ def main(): argument_spec = dict( state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']), default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), - logging = dict(default=None, choises=['on', 'off', 'low', 'medium', 'high', 'full']), - direction = dict(default=None, choises=['in', 'incoming', 'out', 'outgoing']), + logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), + direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing']), delete = dict(default=False, type='bool'), insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), @@ -178,13 +182,14 @@ def main(): app = dict(default=None, aliases=['name']) ), supports_check_mode = True, - mutually_exclusive = [['app', 'proto']] + mutually_exclusive = [['app', 'proto', 'logging']] ) cmds = [] def execute(cmd): cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) + cmds.append(cmd) (rc, out, err) = module.run_command(cmd) @@ -217,7 +222,7 @@ def main(): execute(cmd + [['-f'], [states[value]]]) elif command == 'logging': - execute(cmd + [[command, value]]) + execute(cmd + [[command], [value]]) elif command == 'default': execute(cmd + [[command], [value], [params['direction']]]) From f6abc17e8a20933611d13bb763ab626def1e7378 Mon Sep 17 00:00:00 2001 From: cgtx Date: Sat, 29 Mar 2014 18:14:56 -0500 Subject: [PATCH 622/772] backreferences should be double escaped --- library/files/lineinfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/lineinfile b/library/files/lineinfile index bad0cf093ce..06e946f130b 100644 --- a/library/files/lineinfile +++ b/library/files/lineinfile @@ -138,7 +138,7 @@ EXAMPLES = r""" # Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs. - lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'" -- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes +- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\\1Xms${xms}m\\3' backrefs=yes # Validate a the sudoers file before saving - lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s' From 6c8d8a746fc8b8a9169221850ab1ce6f29ad8c9f Mon Sep 17 00:00:00 2001 From: James Laska Date: Sat, 29 Mar 2014 19:15:42 -0400 Subject: [PATCH 623/772] Fix YAML docstring that broke webdocs --- library/cloud/ec2_vpc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 66685e3cf61..50cc3dd6dd4 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -46,7 +46,7 @@ options: choices: [ "yes", "no" ] subnets: description: - - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed." + - 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed.' required: false default: null aliases: [] @@ -58,7 +58,7 @@ options: aliases: [] resource_tags: description: - - A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. + - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored.' required: false default: null aliases: [] @@ -72,7 +72,7 @@ options: aliases: [] route_tables: description: - - "A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the 'main' route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly." + - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the 'main' route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly.' required: false default: null aliases: [] From 1a9c5947edbaafcba78474f686024c9b12858dd3 Mon Sep 17 00:00:00 2001 From: Adam Nielsen Date: Sun, 30 Mar 2014 11:46:13 +1000 Subject: [PATCH 624/772] More descriptive error message if init system is unknown --- library/system/service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/service b/library/system/service index c256b70c435..9820c28a06d 100644 --- a/library/system/service +++ b/library/system/service @@ -565,7 +565,7 @@ class LinuxService(Service): def service_enable(self): if self.enable_cmd is None: - self.module.fail_json(msg='service name not recognized') + self.module.fail_json(msg='unknown init system, cannot enable service') # FIXME: we use chkconfig or systemctl # to decide whether to run the command here but need something From 6e809c2be360b9684baf72f52861add1dd5c1a45 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 29 Mar 2014 23:47:11 -0500 Subject: [PATCH 625/772] Removing wait loop around initial state check during registration Also fixed a bug in which the state parameter was marked as a 'bool' type, which was breaking execution of the module. Fixes #6543 --- library/cloud/ec2_elb | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index 4fc6910691b..21d771221b5 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -162,18 +162,7 @@ class ElbManager: """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: - if wait: - tries = 1 - while True: - initial_state = self._get_instance_health(lb) - if initial_state: - break - time.sleep(1) - tries += 1 - # FIXME: this should be configurable, but since it didn't - # wait at all before this is at least better - if tries > 10: - self.module.fail_json(msg='failed to find the initial state of the load balancer') + initial_state = self._get_instance_health(lb) if enable_availability_zone: self._enable_availailability_zone(lb) @@ -313,7 +302,7 @@ class ElbManager: def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - state={'required': True, 'type' : 'bool'}, + state={'required': True}, instance_id={'required': True}, ec2_elbs={'default': None, 'required': False, 'type':'list'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, From b698f7a44b1480fa857656fdc5b69ce49d306be0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 30 Mar 2014 01:34:33 -0500 Subject: [PATCH 626/772] Add option to create host_key directory if it doesn't exist Fixes #6731 --- lib/ansible/module_utils/known_hosts.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 68ef2828319..62600d7b4da 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -30,7 +30,7 @@ import hmac from hashlib import sha1 HASHED_KEY_MAGIC = "|1|" -def add_git_host_key(module, url, accept_hostkey=True): +def add_git_host_key(module, url, accept_hostkey=True, create_dir=True): """ idempotently add a git url hostkey """ @@ -40,7 +40,7 @@ def add_git_host_key(module, url, accept_hostkey=True): known_host = check_hostkey(module, fqdn) if not known_host: if accept_hostkey: - rc, out, err = add_host_key(module, fqdn) + rc, out, err = add_host_key(module, fqdn, create_dir=create_dir) if rc != 0: module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err)) else: @@ -120,7 +120,7 @@ def not_in_host_file(self, host): return True -def add_host_key(module, fqdn, key_type="rsa"): +def add_host_key(module, fqdn, key_type="rsa", create_dir=False): """ use ssh-keyscan to add the hostkey """ @@ -136,7 +136,15 @@ def add_host_key(module, fqdn, key_type="rsa"): user_ssh_dir = os.path.expanduser(user_ssh_dir) if not os.path.exists(user_ssh_dir): - module.fail_json(msg="%s does not exist" % user_ssh_dir) + if create_dir: + try: + os.makedirs(user_ssh_dir, 0700) + except: + module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) + else: + module.fail_json(msg="%s does not exist" % user_ssh_dir) + elif not os.path.isdir(user_ssh_dir): + module.fail_json(msg="%s is not a directory" % user_ssh_dir) this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) From f96618c96704baf2a515034694d7038f9bf217e4 Mon Sep 17 00:00:00 2001 From: James Laska Date: Sun, 30 Mar 2014 14:26:18 -0400 Subject: [PATCH 627/772] Use proper YAML quotes to fix webdocs --- library/cloud/ec2_vpc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 50cc3dd6dd4..44d207b3896 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -72,7 +72,7 @@ options: aliases: [] route_tables: description: - - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the 'main' route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly.' + - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly.' required: false default: null aliases: [] From 034ac93ca209ff5a5affeccbe91e7e20209edb05 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Sun, 30 Mar 2014 15:27:29 -0400 Subject: [PATCH 628/772] don't parse empty stream chunks --- library/cloud/docker_image | 3 +++ 1 file changed, 3 insertions(+) diff --git a/library/cloud/docker_image b/library/cloud/docker_image index 5fcdfad573c..c2ae423fe9a 100644 --- a/library/cloud/docker_image +++ b/library/cloud/docker_image @@ -137,6 +137,9 @@ class DockerImageManager: self.changed = True for chunk in stream: + if not chunk: + continue + chunk_json = json.loads(chunk) if 'error' in chunk_json: From 2dc315333060cd8c002c4b633f0d35a7ce4429f9 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 30 Mar 2014 22:33:24 +0200 Subject: [PATCH 629/772] fix typo in the capabilities module description --- library/system/capabilities | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/capabilities b/library/system/capabilities index 872473001c2..f4a9f62c0d0 100644 --- a/library/system/capabilities +++ b/library/system/capabilities @@ -24,7 +24,7 @@ DOCUMENTATION = ''' module: capabilities short_description: Manage Linux capabilities description: - - This module manipulates files' priviliges using the Linux capabilities(7) system. + - This module manipulates files privileges using the Linux capabilities(7) system. version_added: "1.6" options: path: From 9309b6b0e469f721717b74e673a601a57f1e9879 Mon Sep 17 00:00:00 2001 From: Eric Dahl Date: Sun, 30 Mar 2014 18:45:55 -0500 Subject: [PATCH 630/772] Documentation: fix various small typos --- docsite/rst/faq.rst | 2 +- docsite/rst/guide_rax.rst | 2 +- docsite/rst/guide_rolling_upgrade.rst | 6 +++--- docsite/rst/guide_vagrant.rst | 2 +- docsite/rst/guides.rst | 2 +- docsite/rst/guru.rst | 2 +- docsite/rst/playbooks_variables.rst | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 13ab9437cdb..af9d4930600 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -246,7 +246,7 @@ Great question! Documentation for Ansible is kept in the main project git repos How do I keep secret data in my playbook? +++++++++++++++++++++++++++++++++++++++++ -If you would like to keep secret data in your Ansible content and still share it publically or keep things in source control, see :doc:`playbooks_vault`. +If you would like to keep secret data in your Ansible content and still share it publicly or keep things in source control, see :doc:`playbooks_vault`. .. _i_dont_see_my_question: diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst index 37ca6b796c6..66381bbf843 100644 --- a/docsite/rst/guide_rax.rst +++ b/docsite/rst/guide_rax.rst @@ -579,7 +579,7 @@ Autoscaling with Tower :doc:`tower` also contains a very nice feature for auto-scaling use cases. In this mode, a simple curl script can call a defined URL and the server will "dial out" to the requester -and configure an instance that is spinning up. This can be a great way to reconfigure ephmeral nodes. +and configure an instance that is spinning up. This can be a great way to reconfigure ephemeral nodes. See the Tower documentation for more details. A benefit of using the callback in Tower over pull mode is that job results are still centrally recorded diff --git a/docsite/rst/guide_rolling_upgrade.rst b/docsite/rst/guide_rolling_upgrade.rst index b464ef11a42..f730e8d7899 100644 --- a/docsite/rst/guide_rolling_upgrade.rst +++ b/docsite/rst/guide_rolling_upgrade.rst @@ -172,7 +172,7 @@ Here's another example, from the same template:: {% endfor %} This loops over all of the hosts in the group called ``monitoring``, and adds an ACCEPT line for -each monitoring hosts's default IPV4 address to the current machine's iptables configuration, so that Nagios can monitor those hosts. +each monitoring hosts' default IPV4 address to the current machine's iptables configuration, so that Nagios can monitor those hosts. You can learn a lot more about Jinja2 and its capabilities `here `_, and you can read more about Ansible variables in general in the :doc:`playbooks_variables` section. @@ -184,7 +184,7 @@ The Rolling Upgrade Now you have a fully-deployed site with web servers, a load balancer, and monitoring. How do you update it? This is where Ansible's orchestration features come into play. While some applications use the term 'orchestration' to mean basic ordering or command-blasting, Ansible -referes to orchestration as 'conducting machines like an orchestra', and has a pretty sophisticated engine for it. +refers to orchestration as 'conducting machines like an orchestra', and has a pretty sophisticated engine for it. Ansible has the capability to do operations on multi-tier applications in a coordinated way, making it easy to orchestrate a sophisticated zero-downtime rolling upgrade of our web application. This is implemented in a separate playbook, called ``rolling_upgrade.yml``. @@ -201,7 +201,7 @@ The next part is the update play. The first part looks like this:: user: root serial: 1 -This is just a normal play definition, operating on the ``webservers`` group. The ``serial`` keyword tells Ansible how many servers to operate on at once. If it's not specified, Ansible will paralleize these operations up to the default "forks" limit specified in the configuration file. But for a zero-downtime rolling upgrade, you may not want to operate on that many hosts at once. If you had just a handful of webservers, you may want to set ``serial`` to 1, for one host at a time. If you have 100, maybe you could set ``serial`` to 10, for ten at a time. +This is just a normal play definition, operating on the ``webservers`` group. The ``serial`` keyword tells Ansible how many servers to operate on at once. If it's not specified, Ansible will parallelize these operations up to the default "forks" limit specified in the configuration file. But for a zero-downtime rolling upgrade, you may not want to operate on that many hosts at once. If you had just a handful of webservers, you may want to set ``serial`` to 1, for one host at a time. If you have 100, maybe you could set ``serial`` to 10, for ten at a time. Here is the next part of the update play:: diff --git a/docsite/rst/guide_vagrant.rst b/docsite/rst/guide_vagrant.rst index 4fb40d569f2..9472b74dd2f 100644 --- a/docsite/rst/guide_vagrant.rst +++ b/docsite/rst/guide_vagrant.rst @@ -7,7 +7,7 @@ Introduction ```````````` Vagrant is a tool to manage virtual machine environments, and allows you to -configure and use reproducable work environments on top of various +configure and use reproducible work environments on top of various virtualization and cloud platforms. It also has integration with Ansible as a provisioner for these virtual machines, and the two tools work together well. diff --git a/docsite/rst/guides.rst b/docsite/rst/guides.rst index 0585d966097..cf9c821bdbb 100644 --- a/docsite/rst/guides.rst +++ b/docsite/rst/guides.rst @@ -12,5 +12,5 @@ This section is new and evolving. The idea here is explore particular use cases guide_vagrant guide_rolling_upgrade -Pending topics may include: Docker, Jenkins, Google Compute Engine, Linode/Digital Ocean, Continous Deployment, and more. +Pending topics may include: Docker, Jenkins, Google Compute Engine, Linode/Digital Ocean, Continuous Deployment, and more. diff --git a/docsite/rst/guru.rst b/docsite/rst/guru.rst index 4267396c94a..e4f07fd3478 100644 --- a/docsite/rst/guru.rst +++ b/docsite/rst/guru.rst @@ -3,7 +3,7 @@ Ansible Guru While many users should be able to get on fine with the documentation, mailing list, and IRC, sometimes you want a bit more. -`Ansible Guru `_ is an offering from Ansible, Inc that helps users who would like more dedicated help with Ansible, including building playbooks, best practices, architecture suggestions, and more -- all from our awesome support and services team. It also includes some useful discounts and also some free T-shirts, though you shoudn't get it just for the free shirts! It's a great way to train up to becoming an Ansible expert. +`Ansible Guru `_ is an offering from Ansible, Inc that helps users who would like more dedicated help with Ansible, including building playbooks, best practices, architecture suggestions, and more -- all from our awesome support and services team. It also includes some useful discounts and also some free T-shirts, though you shouldn't get it just for the free shirts! It's a great way to train up to becoming an Ansible expert. For those interested, click through the link above. You can sign up in minutes! diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 0ab668135cf..3b5907e3301 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -880,7 +880,7 @@ See :doc:`playbooks_roles` for more info about this:: --- # file: roles/x/defaults/main.yml - # if not overriden in inventory or as a parameter, this is the value that will be used + # if not overridden in inventory or as a parameter, this is the value that will be used http_port: 80 if you are writing a role and want to ensure the value in the role is absolutely used in that role, and is not going to be overridden From 6792c76c50c38c50b0da3d22e5503f0ffdcbb3ca Mon Sep 17 00:00:00 2001 From: James Tanner Date: Sun, 30 Mar 2014 22:28:31 -0400 Subject: [PATCH 631/772] Fix librato_annotation docstring --- library/monitoring/librato_annotation | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/monitoring/librato_annotation b/library/monitoring/librato_annotation index 495a2c16699..63979f41bfb 100644 --- a/library/monitoring/librato_annotation +++ b/library/monitoring/librato_annotation @@ -59,7 +59,7 @@ options: description: description: - The description contains extra meta-data about a particular annotation - - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2: shipped new feature foo! + - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! required: false start_time: description: From 48dc27ea7424fb16b3277e4b0736fc1a730b8d9b Mon Sep 17 00:00:00 2001 From: Jim Yeh Date: Mon, 31 Mar 2014 11:05:31 +0800 Subject: [PATCH 632/772] Fix the default url of get_xml function. * This enable a user to configure uri in get_xml function --- library/cloud/virt | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/library/cloud/virt b/library/cloud/virt index 78d2aa1ab91..f1d36fc1964 100644 --- a/library/cloud/virt +++ b/library/cloud/virt @@ -108,7 +108,7 @@ VIRT_STATE_NAME_MAP = { 6 : "crashed" } -class VMNotFound(Exception): +class VMNotFound(Exception): pass class LibvirtConnection(object): @@ -197,6 +197,10 @@ class LibvirtConnection(object): def get_type(self): return self.conn.getType() + def get_xml(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.XMLDesc(0) + def get_maxVcpus(self, vmid): vm = self.conn.lookupByName(vmid) return vm.maxVcpus() @@ -361,14 +365,8 @@ class Virt(object): Return an xml describing vm config returned by a libvirt call """ - conn = libvirt.openReadOnly(None) - if not conn: - return (-1,'Failed to open connection to the hypervisor') - try: - domV = conn.lookupByName(vmid) - except: - return (-1,'Failed to find the main domain') - return domV.XMLDesc(0) + self.__get_conn() + return self.conn.get_xml(vmid) def get_maxVcpus(self, vmid): """ From 225eca6311cc559c3cc9eb0cb3e64b184929cec5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 30 Mar 2014 23:08:53 -0500 Subject: [PATCH 633/772] Adding an options sections to the docstring in the lldp module --- library/net_infrastructure/lldp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/net_infrastructure/lldp b/library/net_infrastructure/lldp index efe4c8ab2ec..6b8836852f6 100755 --- a/library/net_infrastructure/lldp +++ b/library/net_infrastructure/lldp @@ -23,7 +23,7 @@ version_added: 1.6 short_description: get details reported by lldp description: - Reads data out of lldpctl - +options: {} author: Andy Hill notes: - Requires lldpd running and lldp enabled on switches From d6943e46ef4b4009f1ed029499c9ac7f03013e29 Mon Sep 17 00:00:00 2001 From: Sven Schliesing Date: Mon, 31 Mar 2014 14:32:07 +0200 Subject: [PATCH 634/772] Example for queueing build in Jenkins --- library/network/uri | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/library/network/uri b/library/network/uri index 71954fc20f3..6f5ab725307 100644 --- a/library/network/uri +++ b/library/network/uri @@ -160,6 +160,14 @@ EXAMPLES = ''' # access the app in later tasks. - action: uri url=https://your.form.based.auth.example.com/dashboard.php method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}" + +# Queue build of a project in Jenkins: +- action: uri url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} + method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 + +# Call Jenkins from host where you run ansible. Handy if the Henkins host isn't reachable from the target machine +- local_action: uri url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} + method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 ''' HAS_HTTPLIB2 = True From fb10ca73dc223afb98ba65f1384aaea8b62843e2 Mon Sep 17 00:00:00 2001 From: Sven Schliesing Date: Mon, 31 Mar 2014 14:32:56 +0200 Subject: [PATCH 635/772] Update uri --- library/network/uri | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/network/uri b/library/network/uri index 6f5ab725307..3098c5f3902 100644 --- a/library/network/uri +++ b/library/network/uri @@ -165,7 +165,7 @@ EXAMPLES = ''' - action: uri url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 -# Call Jenkins from host where you run ansible. Handy if the Henkins host isn't reachable from the target machine +# Call Jenkins from host where you run ansible. Handy if the Jenkins host isn't reachable from the target machine: - local_action: uri url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 ''' From b07113f358710008fdace2026b2373592608ef0c Mon Sep 17 00:00:00 2001 From: Till Maas Date: Mon, 31 Mar 2014 13:20:00 +0200 Subject: [PATCH 636/772] ssh: Properly check for wrong su password --- lib/ansible/runner/connection_plugins/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 876f2063848..4b9ea6307a3 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -161,7 +161,7 @@ class Connection(object): if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): raise errors.AnsibleError('Incorrect sudo password') - if self.runner.su and su and self.runner.sudo_pass: + if self.runner.su and su and self.runner.su_pass: incorrect_password = gettext.dgettext( "su", "Sorry") if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): From 2492c2714a8dfe5dc8de083580a5dcafd61ddb47 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 31 Mar 2014 12:25:40 -0400 Subject: [PATCH 637/772] Fixes #5307 set relative paths to the files dir for roles and the inventory dir for non-role tasks --- .../runner/action_plugins/synchronize.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py index 8bd0bcd0f5f..63331760e44 100644 --- a/lib/ansible/runner/action_plugins/synchronize.py +++ b/lib/ansible/runner/action_plugins/synchronize.py @@ -26,6 +26,19 @@ class ActionModule(object): def __init__(self, runner): self.runner = runner + self.inject = None + + def _get_absolute_path(self, path=None): + if 'vars' in self.inject: + if '_original_file' in self.inject['vars']: + # roles + path = utils.path_dwim_relative(self.inject['_original_file'], 'files', path, self.runner.basedir) + elif 'inventory_dir' in self.inject['vars']: + # non-roles + abs_dir = os.path.abspath(self.inject['vars']['inventory_dir']) + path = os.path.join(abs_dir, path) + + return path def _process_origin(self, host, path, user): @@ -35,6 +48,9 @@ class ActionModule(object): else: return '%s:%s' % (host, path) else: + if not ':' in path: + if not path.startswith('/'): + path = self._get_absolute_path(path=path) return path def _process_remote(self, host, path, user): @@ -48,10 +64,16 @@ class ActionModule(object): else: return_data = path + if not ':' in return_data: + if not return_data.startswith('/'): + return_data = self._get_absolute_path(path=return_data) + return return_data def setup(self, module_name, inject): ''' Always default to localhost as delegate if None defined ''' + + self.inject = inject # Store original transport and sudo values. self.original_transport = inject.get('ansible_connection', self.runner.transport) @@ -71,6 +93,8 @@ class ActionModule(object): ''' generates params and passes them on to the rsync module ''' + self.inject = inject + # load up options options = {} if complex_args: From 811553930aa86ca600286775617f1185aad269b4 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 31 Mar 2014 13:02:55 -0400 Subject: [PATCH 638/772] Update changelog for synchronize path changes --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b27b880300..c177f545ba1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ Other notable changes: * s3 module can specify metadata * security token additions to ec2 modules * setup module code moved into module_utils/, facts now accessible by other modules +* synchronize module sets relative dirs based on inventory or role path * misc bugfixes and other parameters ## 1.5.3 "Love Walks In" - March 13, 2014 From 7f7e2a6941aad3d7fab7f07a096fdc0f1b8f2331 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 31 Mar 2014 13:17:49 -0400 Subject: [PATCH 639/772] Update changelog for #5910 apt now has deb parameter --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c177f545ba1..8a597124c84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Major features/changes: - multiple users can connect with different keys, when `accelerate_multi_key = yes` is specified in the ansible.cfg. - daemon lifetime is now based on the time from the last activity, not the time from the daemon's launch. * ansible-playbook now accepts --force-handlers to run handlers even if tasks result in failures +* apt module now accepts "deb" parameter to install local dpkg files New Modules: From 98b1cb8297ec46d832b8fb33ae91fa3e4943bc24 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 31 Mar 2014 13:51:21 -0400 Subject: [PATCH 640/772] Update changelog for #5958 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a597124c84..e32a4471df8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Major features/changes: - daemon lifetime is now based on the time from the last activity, not the time from the daemon's launch. * ansible-playbook now accepts --force-handlers to run handlers even if tasks result in failures * apt module now accepts "deb" parameter to install local dpkg files +* regex_replace filter plugin added New Modules: From 06920b2e47668898f7b4ec13a2f012afa7fece94 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 31 Mar 2014 14:12:16 -0400 Subject: [PATCH 641/772] Add docs examples for regex_replace --- docsite/rst/playbooks_variables.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 3b5907e3301..44e9f1edca8 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -240,6 +240,14 @@ doesn't know it is a boolean value:: - debug: msg=test when: some_string_value | bool +To replace text in a string with regex, use the "regex_replace" filter:: + + # convert "ansible" to "able" + {{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }} + + # convert "foobar" to "bar" + {{ 'foobar' | regex_replace('^f.*o(.*)$', '\\1') }} + A few useful filters are typically added with each new Ansible release. The development documentation shows how to extend Ansible filters by writing your own as plugins, though in general, we encourage new ones to be added to core so everyone can make use of them. From f23bb344aa01264bc3f7768c0e81b240f12fe6b3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 31 Mar 2014 13:37:43 -0500 Subject: [PATCH 642/772] Catch errors when duping stdin Fixes #6657 --- lib/ansible/runner/__init__.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 502df176df8..8227083c55a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -507,10 +507,15 @@ class Runner(object): fileno = None try: + self._new_stdin = new_stdin if not new_stdin and fileno is not None: - self._new_stdin = os.fdopen(os.dup(fileno)) - else: - self._new_stdin = new_stdin + try: + self._new_stdin = os.fdopen(os.dup(fileno)) + except: + # couldn't dupe stdin, most likely because it's + # not a valid file descriptor, so we just rely on + # using the one that was passed in + pass exec_rc = self._executor_internal(host, new_stdin) if type(exec_rc) != ReturnData: @@ -1094,10 +1099,15 @@ class Runner(object): workers = [] for i in range(self.forks): + new_stdin = None if fileno is not None: - new_stdin = os.fdopen(os.dup(fileno)) - else: - new_stdin = None + try: + new_stdin = os.fdopen(os.dup(fileno)) + except: + # couldn't dupe stdin, most likely because it's + # not a valid file descriptor, so we just rely on + # using the one that was passed in + pass prc = multiprocessing.Process(target=_executor_hook, args=(job_queue, result_queue, new_stdin)) prc.start() From b9c044b8c9a92e88a4870e5f53fc0f56e9e5d72f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 31 Mar 2014 13:50:23 -0500 Subject: [PATCH 643/772] Making the exception caught during os.dup of stdin explicit Related to f23bb34 --- lib/ansible/runner/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 8227083c55a..f6b607e1d2a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -511,7 +511,7 @@ class Runner(object): if not new_stdin and fileno is not None: try: self._new_stdin = os.fdopen(os.dup(fileno)) - except: + except OSError, e: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in @@ -1103,7 +1103,7 @@ class Runner(object): if fileno is not None: try: new_stdin = os.fdopen(os.dup(fileno)) - except: + except OSError, e: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in From 612a1a64f07e8724c08f8b438d5297b58fd8821d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 31 Mar 2014 15:31:10 -0500 Subject: [PATCH 644/772] Apply extra vars after all other vars have been merged in a play Fixes #6677 --- lib/ansible/playbook/play.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 53f097f5c86..155994ef19c 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -93,6 +93,10 @@ class Play(object): self._update_vars_files_for_host(None) + # apply any extra_vars specified on the command line now + if type(self.playbook.extra_vars) == dict: + self.vars = utils.combine_vars(self.vars, self.playbook.extra_vars) + # template everything to be efficient, but do not pre-mature template # tasks/handlers as they may have inventory scope overrides _tasks = ds.pop('tasks', []) @@ -684,9 +688,6 @@ class Play(object): else: raise errors.AnsibleError("'vars_prompt' section is malformed, see docs") - if type(self.playbook.extra_vars) == dict: - vars = utils.combine_vars(vars, self.playbook.extra_vars) - return vars # ************************************************* From 33eec0dc55760e82f2fb884bf5517146869ccc63 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 31 Mar 2014 16:49:03 -0400 Subject: [PATCH 645/772] Fixes #6398 Add missing parameters to ansible-playbook manpage --- docs/man/man1/ansible-playbook.1 | 67 ++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1 index 2d221946a61..f435627f798 100644 --- a/docs/man/man1/ansible-playbook.1 +++ b/docs/man/man1/ansible-playbook.1 @@ -91,6 +91,66 @@ Prompt for the password to use for playbook plays that request sudo access, if a Desired sudo user (default=root)\&. .RE .PP +\fB\-S\fR, \fB\-\-su\fR +.RS 4 +run operations with su\&. +.RE +.PP +\fB\-\-ask\-su\-pass\fR +.RS 4 +Prompt for the password to use for playbook plays that request su access, if any\&. +.RE +.PP +\fB\-R\fR, \fISU_USER\fR, \fB\-\-sudo\-user=\fR\fISU_USER\fR +.RS 4 +Desired su user (default=root)\&. +.RE +.PP +\fB\-\-ask\-vault\-pass\fR +.RS 4 +Ask for vault password\&. +.RE +.PP +\fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR +.RS 4 +Vault password file\&. +.RE +.PP +\fB\-\-force\-handlers\fR +.RS 4 +Run play handlers even if a task fails\&. +.RE +.PP +\fB\-\-list\-hosts\fR +.RS 4 +Outputs a list of matching hosts without executing anything else\&. +.RE +.PP +\fB\-\-list\-tasks\fR +.RS 4 +List all tasks that would be executed\&. +.RE +.PP +\fB\-\-start\-at\-task=\fR\fISTART_AT\fR +.RS 4 +Start the playbook at the task matching this name\&. +.RE +.PP +\fB\-\-step\fR +.RS 4 +one-step-at-a-time: confirm each task before running\&. +.RE +.PP +\fB\-\-syntax\-check\fR +.RS 4 +Perform a syntax check on the playbook, but do not execute it\&. +.RE +.PP +\fB\-\-private\-key\fR +.RS 4 +Use this file to authenticate the connection\&. +.RE +.PP \fB\-t\fR, \fITAGS\fR, \fB\fI\-\-tags=\fR\fR\fB\*(AqTAGS\fR .RS 4 Only run plays and tasks tagged with these values\&. @@ -147,6 +207,13 @@ is mostly useful for crontab or kickstarts\&. .RS 4 Further limits the selected host/group patterns\&. .RE + +.PP +\fB\-\-version\fR +.RS 4 +Show program's version number and exit\&. +.RE + .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. From 57acf104c661da111d6d749118b090e69eb3994b Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 31 Mar 2014 18:48:28 -0400 Subject: [PATCH 646/772] Update CONTRIBUTING.md Disclosure policy should really be up here, so fixing that. --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 76aa8dc5167..ca27dda2d4f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -89,6 +89,8 @@ required. You're now live! Reporting A Bug --------------- +Ansible practices responsible disclosure - if this is a security related bug, email security@ansible.com instead of filing a ticket or posting to the Google Group and you will recieve a prompt response. + Bugs should be reported to [github.com/ansible/ansible](http://github.com/ansible/ansible) after signing up for a free github account. Before reporting a bug, please use the bug/issue search to see if the issue has already been reported. From 2cae7472ad1845ac10531e7e5c098dc8d3a5c693 Mon Sep 17 00:00:00 2001 From: Till Maas Date: Tue, 1 Apr 2014 09:23:52 +0200 Subject: [PATCH 647/772] ssh: propagate prompt to _communicate() _communicate() uses the prompt variable to detect wrong passwords early, therefore it needs to be passed to it from exec_command(). --- lib/ansible/runner/connection_plugins/ssh.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 876f2063848..5c60f884675 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -258,6 +258,7 @@ class Connection(object): sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd) ssh_cmd.append(sudocmd) elif not self.runner.sudo or not sudoable: + prompt = None if executable: ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) else: @@ -327,7 +328,7 @@ class Connection(object): elif su: stdin.write(self.runner.su_pass + '\n') - (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable) + (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add From e6cb32f284bb689d2128f4ac8486d8286b6061ea Mon Sep 17 00:00:00 2001 From: Till Maas Date: Wed, 26 Feb 2014 00:02:42 +0100 Subject: [PATCH 648/772] ssh connection plugin: Make comments more verbose Explain _communicate() more. --- lib/ansible/runner/connection_plugins/ssh.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 876f2063848..2526f560ab4 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -151,6 +151,7 @@ class Connection(object): stdin.close() except: raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') + # Read stdout/stderr from process while True: rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) @@ -177,17 +178,22 @@ class Connection(object): stderr += dat if dat == '': rpipes.remove(p.stderr) - # only break out if we've emptied the pipes, or there is nothing to - # read from and the process has finished. + # only break out if no pipes are left to read or + # the pipes are completely read and + # the process is terminated if (not rpipes or not rfd) and p.poll() is not None: break - # Calling wait while there are still pipes to read can cause a lock + # No pipes are left to read but process is not yet terminated + # Only then it is safe to wait for the process to be finished + # NOTE: Actually p.poll() is always None here if rpipes is empty elif not rpipes and p.poll() == None: p.wait() - # the process has finished and the pipes are empty, - # if we loop and do the select it waits all the timeout + # The process is terminated. Since no pipes to read from are + # left, there is no need to call select() again. break - stdin.close() # close stdin after we read from stdout (see also issue #848) + # close stdin after process is terminated and stdout/stderr are read + # completely (see also issue #848) + stdin.close() return (p.returncode, stdout, stderr) def not_in_host_file(self, host): From c8e80b2aa497199e7ada772837bb4bdc8c3a924a Mon Sep 17 00:00:00 2001 From: Sven Schliesing Date: Tue, 1 Apr 2014 13:55:58 +0200 Subject: [PATCH 649/772] Output URL for debugging --- library/packaging/apt_key | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt_key b/library/packaging/apt_key index f0d7f8ba81f..c45e76bf5fc 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -148,7 +148,7 @@ def download_key(module, url): rsp, info = fetch_url(module, url) return rsp.read() except Exception: - module.fail_json(msg="error getting key id from url", traceback=format_exc()) + module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc()) def import_key(module, keyserver, key_id): cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id) From 2387f044e40d13b9bb78d09069f023c4059ceacd Mon Sep 17 00:00:00 2001 From: jjshoe Date: Tue, 1 Apr 2014 07:49:51 -0500 Subject: [PATCH 650/772] module_retention isn't defined anywhere, including the help. --- library/cloud/rds | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/rds b/library/cloud/rds index 0e2c8ff2131..6c79b800fe6 100644 --- a/library/cloud/rds +++ b/library/cloud/rds @@ -343,7 +343,7 @@ def main(): maint_window = module.params.get('maint_window') subnet = module.params.get('subnet') backup_window = module.params.get('backup_window') - backup_retention = module.params.get('module_retention') + backup_retention = module.params.get('backup_retention') region = module.params.get('region') zone = module.params.get('zone') aws_secret_key = module.params.get('aws_secret_key') From 134c449cf6953187014af5d91d3b80f0f7ce82de Mon Sep 17 00:00:00 2001 From: Chao Luan Date: Tue, 1 Apr 2014 23:44:38 +1100 Subject: [PATCH 651/772] Remove extra quote from the mysqldump password argument The mysqldb Ansible module will fail if the state specified is import or dump with a '1045: Access Denied' mysql error for complex passwords. This is caused by the extra quote around the '--password' argument to mysqldump, as pipes.quotes already quotes the password string. >>> "--password='%s'" % pipes.quote('simple') "--password='simple'" >>> "--password='%s'" % pipes.quote('c0mplexp@ssword!') "--password=''c0mplexp@ssword!''" >>> "--password='%s'" % pipes.quote('password with space') "--password=''password with space''" --- library/database/mysql_db | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/database/mysql_db b/library/database/mysql_db index c9fd5b4e087..8eec1005893 100644 --- a/library/database/mysql_db +++ b/library/database/mysql_db @@ -124,7 +124,7 @@ def db_delete(cursor, db): def db_dump(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysqldump', True) - cmd += " --quick --user=%s --password='%s'" % (pipes.quote(user), pipes.quote(password)) + cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) if socket is not None: cmd += " --socket=%s" % pipes.quote(socket) else: @@ -141,7 +141,7 @@ def db_dump(module, host, user, password, db_name, target, port, socket=None): def db_import(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysql', True) - cmd += " --user=%s --password='%s'" % (pipes.quote(user), pipes.quote(password)) + cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) if socket is not None: cmd += " --socket=%s" % pipes.quote(socket) else: From 4be010b09bc7a3ed1bb3ceec0d7af7effb5f6146 Mon Sep 17 00:00:00 2001 From: follower Date: Wed, 2 Apr 2014 02:25:14 +1300 Subject: [PATCH 652/772] Clarify the port value always defaults to 3306 While the [boto docs](https://github.com/boto/boto/blob/develop/boto/rds/__init__.py#L253) make it seem like the default value of `port` is changed depending on the engine chosen, AFAICT from looking at the code the default value is never changed from 3306. I think the docs are intended to be read as "the default value used by is so you should change `port` to that value". If you don't specify the port value and chose the database engine as PostgreSQL you'll end up with a PostgreSQL instance running on port 3306. --- library/cloud/rds | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/rds b/library/cloud/rds index 0e2c8ff2131..7c5b8e0d441 100644 --- a/library/cloud/rds +++ b/library/cloud/rds @@ -131,7 +131,7 @@ options: aliases: [] port: description: - - Port number that the DB instance uses for connections. Defaults to 3306 for mysql, 1521 for Oracle, 1443 for SQL Server. Used only when command=create or command=replicate. + - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1443 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. required: false default: null aliases: [] From c92ec68fa35d5dd68bbeb4aeadcc6f35fb13bd23 Mon Sep 17 00:00:00 2001 From: Andrew Resch Date: Tue, 1 Apr 2014 11:22:29 -0700 Subject: [PATCH 653/772] redhat_subscription calls AnsibleModule() without argument_spec The Rhsm object requires an AnsibleModule but it isn't created with an argument_spec and fails. Since the rhn.module is set directly after, setting None for the required argument of Rhsm fixes the module. --- library/packaging/redhat_subscription | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/redhat_subscription b/library/packaging/redhat_subscription index 0e5ce0856d2..f9918ada4b0 100644 --- a/library/packaging/redhat_subscription +++ b/library/packaging/redhat_subscription @@ -327,7 +327,7 @@ class RhsmPools(object): def main(): # Load RHSM configuration from file - rhn = Rhsm(AnsibleModule()) + rhn = Rhsm(None) module = AnsibleModule( argument_spec = dict( From b4de76373a1066c1a4e14b8e2d481d1a851bddba Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 1 Apr 2014 13:54:37 -0500 Subject: [PATCH 654/772] Use /usr/bin/python instead of /usr/bin/env python --- library/cloud/docker_image | 2 +- library/database/riak | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/docker_image b/library/cloud/docker_image index 5fcdfad573c..612b503b4c1 100644 --- a/library/cloud/docker_image +++ b/library/cloud/docker_image @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # # (c) 2014, Pavel Antonov diff --git a/library/database/riak b/library/database/riak index 7ba4df3ea3a..b30e7dc485d 100644 --- a/library/database/riak +++ b/library/database/riak @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, James Martin , Drew Kerrigan From f89262c74f7ec1c50ba81b51ae05eaa9fabef54b Mon Sep 17 00:00:00 2001 From: John Barker Date: Tue, 1 Apr 2014 21:51:12 +0100 Subject: [PATCH 655/772] debconf detail requirements and fix typos --- library/system/debconf | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/library/system/debconf b/library/system/debconf index 1dade71f8ad..90a5177880f 100644 --- a/library/system/debconf +++ b/library/system/debconf @@ -34,6 +34,7 @@ notes: - A number of questions have to be answered (depending on the package). Use 'debconf-show ' on any Debian or derivative with the package installed to see questions/settings available. +requirements: [ debconf, debconf-utils ] options: name: description: @@ -127,10 +128,10 @@ def main(): supports_check_mode=True, ) - #TODO: enable passing array of optionas and/or debconf file from get-selections dump + #TODO: enable passing array of options and/or debconf file from get-selections dump pkg = module.params["name"] question = module.params["question"] - vtype = module.params["vtype"] + vtype = module.params["vtype"] value = module.params["value"] unseen = module.params["unseen"] @@ -142,7 +143,7 @@ def main(): if question is not None: if vtype is None or value is None: - module.fail_json(msg="when supliying a question you must supply a valide vtype and value") + module.fail_json(msg="when supplying a question you must supply a valid vtype and value") if not question in prev or prev[question] != value: changed = True From a4df906fc9bdcdaf93c8942d33dd366879b661e2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 31 Mar 2014 17:33:40 -0500 Subject: [PATCH 656/772] Fixes to safe_eval --- lib/ansible/utils/__init__.py | 99 +++++++++++++++++++++++++---------- 1 file changed, 72 insertions(+), 27 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 2b86034bde9..4abcd2cd82b 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -29,6 +29,7 @@ from ansible.utils.plugins import * from ansible.utils import template from ansible.callbacks import display import ansible.constants as C +import ast import time import StringIO import stat @@ -974,51 +975,95 @@ def is_list_of_strings(items): return False return True -def safe_eval(str, locals=None, include_exceptions=False): +def safe_eval(expr, locals={}, include_exceptions=False): ''' this is intended for allowing things like: with_items: a_list_variable where Jinja2 would return a string but we do not want to allow it to call functions (outside of Jinja2, where the env is constrained) + + Based on: + http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe ''' - # FIXME: is there a more native way to do this? - def is_set(var): - return not var.startswith("$") and not '{{' in var + # this is the whitelist of AST nodes we are going to + # allow in the evaluation. Any node type other than + # those listed here will raise an exception in our custom + # visitor class defined below. + SAFE_NODES = set( + ( + ast.Expression, + ast.Compare, + ast.Str, + ast.List, + ast.Tuple, + ast.Dict, + ast.Call, + ast.Load, + ast.BinOp, + ast.UnaryOp, + ast.Num, + ast.Name, + ast.Add, + ast.Sub, + ast.Mult, + ast.Div, + ) + ) + + # AST node types were expanded after 2.6 + if not sys.version.startswith('2.6'): + SAFE_NODES.union( + set( + (ast.Set,) + ) + ) - def is_unset(var): - return var.startswith("$") or '{{' in var + # builtin functions that are not safe to call + INVALID_CALLS = ( + 'classmethod', 'compile', 'delattr', 'eval', 'execfile', 'file', + 'filter', 'help', 'input', 'object', 'open', 'raw_input', 'reduce', + 'reload', 'repr', 'setattr', 'staticmethod', 'super', 'type', + ) - # do not allow method calls to modules - if not isinstance(str, basestring): + class CleansingNodeVisitor(ast.NodeVisitor): + def generic_visit(self, node): + if type(node) not in SAFE_NODES: + #raise Exception("invalid expression (%s) type=%s" % (expr, type(node))) + raise Exception("invalid expression (%s)" % expr) + super(CleansingNodeVisitor, self).generic_visit(node) + def visit_Call(self, call): + if call.func.id in INVALID_CALLS: + raise Exception("invalid function: %s" % call.func.id) + + if not isinstance(expr, basestring): # already templated to a datastructure, perhaps? if include_exceptions: - return (str, None) - return str - if re.search(r'\w\.\w+\(', str): - if include_exceptions: - return (str, None) - return str - # do not allow imports - if re.search(r'import \w+', str): - if include_exceptions: - return (str, None) - return str + return (expr, None) + return expr + try: - result = None - if not locals: - result = eval(str) - else: - result = eval(str, None, locals) + parsed_tree = ast.parse(expr, mode='eval') + cnv = CleansingNodeVisitor() + cnv.visit(parsed_tree) + compiled = compile(parsed_tree, expr, 'eval') + result = eval(compiled, {}, locals) + if include_exceptions: return (result, None) else: return result + except SyntaxError, e: + # special handling for syntax errors, we just return + # the expression string back as-is + if include_exceptions: + return (expr, None) + return expr except Exception, e: if include_exceptions: - return (str, e) - return str + return (expr, e) + return expr def listify_lookup_plugin_terms(terms, basedir, inject): @@ -1030,7 +1075,7 @@ def listify_lookup_plugin_terms(terms, basedir, inject): # with_items: {{ alist }} stripped = terms.strip() - if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/"): + if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/") and not stripped.startswith('set(['): # if not already a list, get ready to evaluate with Jinja2 # not sure why the "/" is in above code :) try: From e2d86e4f430ae809dfca79f4ca097aaeb544a718 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 1 Apr 2014 09:48:14 -0500 Subject: [PATCH 657/772] Splitting SETUP_CACHE into two caches, one for host vars and one for setup facts --- docsite/rst/playbooks_variables.rst | 3 +++ lib/ansible/playbook/__init__.py | 13 +++++++++---- lib/ansible/playbook/play.py | 14 +++++++++----- lib/ansible/runner/__init__.py | 20 +++++++++++++------- lib/ansible/runner/poller.py | 12 ++++++------ 5 files changed, 40 insertions(+), 22 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 44e9f1edca8..908eaa2fccb 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -845,8 +845,11 @@ If multiple variables of the same name are defined in different places, they win * -e variables always win * then comes "most everything else" * then comes variables defined in inventory + * then comes facts discovered about a system * then "role defaults", which are the most "defaulty" and lose in priority to everything. +.. note:: In versions prior to 1.5.4, facts discovered about a system were in the "most everything else" category above. + That seems a little theoretical. Let's show some examples and where you would choose to put what based on the kind of control you might want over values. diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 6b03d060c45..935828e4439 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -29,7 +29,11 @@ from play import Play import StringIO import pipes +# the setup cache stores all variables about a host +# gathered during the setup step, while the vars cache +# holds all other variables about a host SETUP_CACHE = collections.defaultdict(dict) +VARS_CACHE = collections.defaultdict(dict) class PlayBook(object): ''' @@ -98,6 +102,7 @@ class PlayBook(object): """ self.SETUP_CACHE = SETUP_CACHE + self.VARS_CACHE = VARS_CACHE arguments = [] if playbook is None: @@ -304,7 +309,7 @@ class PlayBook(object): # since these likely got killed by async_wrapper for host in poller.hosts_to_poll: reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' } - self.runner_callbacks.on_async_failed(host, reason, poller.runner.setup_cache[host]['ansible_job_id']) + self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id']) results['contacted'][host] = reason return results @@ -339,6 +344,7 @@ class PlayBook(object): default_vars=task.default_vars, private_key_file=self.private_key_file, setup_cache=self.SETUP_CACHE, + vars_cache=self.VARS_CACHE, basedir=task.play.basedir, conditional=task.when, callbacks=self.runner_callbacks, @@ -375,7 +381,7 @@ class PlayBook(object): results = self._async_poll(poller, task.async_seconds, task.async_poll_interval) else: for (host, res) in results.get('contacted', {}).iteritems(): - self.runner_callbacks.on_async_ok(host, res, poller.runner.setup_cache[host]['ansible_job_id']) + self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id']) contacted = results.get('contacted',{}) dark = results.get('dark', {}) @@ -434,8 +440,6 @@ class PlayBook(object): else: facts = result.get('ansible_facts', {}) self.SETUP_CACHE[host].update(facts) - # extra vars need to always trump - so update again following the facts - self.SETUP_CACHE[host].update(self.extra_vars) if task.register: if 'stdout' in result and 'stdout_lines' not in result: result['stdout_lines'] = result['stdout'].splitlines() @@ -512,6 +516,7 @@ class PlayBook(object): remote_port=play.remote_port, private_key_file=self.private_key_file, setup_cache=self.SETUP_CACHE, + vars_cache=self.VARS_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user, diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 155994ef19c..402ae0d5fd7 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -766,7 +766,7 @@ class Play(object): if host is not None: inject = {} inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password)) - inject.update(self.playbook.SETUP_CACHE[host]) + inject.update(self.playbook.VARS_CACHE[host]) for filename in self.vars_files: @@ -790,9 +790,9 @@ class Play(object): if host is not None: if self._has_vars_in(filename2) and not self._has_vars_in(filename3): # this filename has variables in it that were fact specific - # so it needs to be loaded into the per host SETUP_CACHE + # so it needs to be loaded into the per host VARS_CACHE data = utils.combine_vars(inject, data) - self.playbook.SETUP_CACHE[host].update(data) + self.playbook.VARS_CACHE[host].update(data) self.playbook.callbacks.on_import_for_host(host, filename4) elif not self._has_vars_in(filename4): # found a non-host specific variable, load into vars and NOT @@ -825,9 +825,13 @@ class Play(object): # running a host specific pass and has host specific variables # load into setup cache new_vars = utils.combine_vars(inject, new_vars) - self.playbook.SETUP_CACHE[host] = utils.combine_vars( - self.playbook.SETUP_CACHE[host], new_vars) + self.playbook.VARS_CACHE[host] = utils.combine_vars( + self.playbook.VARS_CACHE[host], new_vars) self.playbook.callbacks.on_import_for_host(host, filename4) elif host is None: # running a non-host specific pass and we can update the global vars instead self.vars = utils.combine_vars(self.vars, new_vars) + + # finally, update the VARS_CACHE for the host, if it is set + if host is not None: + self.playbook.VARS_CACHE[host].update(self.playbook.extra_vars) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index f6b607e1d2a..bb10ec1bb2b 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -80,18 +80,18 @@ def _executor_hook(job_queue, result_queue, new_stdin): traceback.print_exc() class HostVars(dict): - ''' A special view of setup_cache that adds values from the inventory when needed. ''' + ''' A special view of vars_cache that adds values from the inventory when needed. ''' - def __init__(self, setup_cache, inventory): - self.setup_cache = setup_cache + def __init__(self, vars_cache, inventory): + self.vars_cache = vars_cache self.inventory = inventory self.lookup = dict() - self.update(setup_cache) + self.update(vars_cache) def __getitem__(self, host): if host not in self.lookup: result = self.inventory.get_variables(host) - result.update(self.setup_cache.get(host, {})) + result.update(self.vars_cache.get(host, {})) self.lookup[host] = result return self.lookup[host] @@ -117,6 +117,7 @@ class Runner(object): background=0, # async poll every X seconds, else 0 for non-async basedir=None, # directory of playbook, if applicable setup_cache=None, # used to share fact data w/ other tasks + vars_cache=None, # used to store variables about hosts transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local' conditional='True', # run only if this fact expression evals to true callbacks=None, # used for output @@ -154,6 +155,7 @@ class Runner(object): self.check = check self.diff = diff self.setup_cache = utils.default(setup_cache, lambda: collections.defaultdict(dict)) + self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict)) self.basedir = utils.default(basedir, lambda: os.getcwd()) self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks()) self.generated_jid = str(random.randint(0, 999999999999)) @@ -550,13 +552,17 @@ class Runner(object): module_vars = template.template(self.basedir, self.module_vars, host_variables) + # merge the VARS and SETUP caches for this host + combined_cache = self.setup_cache.copy() + combined_cache.get(host, {}).update(self.vars_cache.get(host, {})) + inject = {} inject = utils.combine_vars(inject, self.default_vars) inject = utils.combine_vars(inject, host_variables) inject = utils.combine_vars(inject, module_vars) - inject = utils.combine_vars(inject, self.setup_cache[host]) + inject = utils.combine_vars(inject, combined_cache.get(host, {})) inject.setdefault('ansible_ssh_user', self.remote_user) - inject['hostvars'] = HostVars(self.setup_cache, self.inventory) + inject['hostvars'] = HostVars(combined_cache, self.inventory) inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars diff --git a/lib/ansible/runner/poller.py b/lib/ansible/runner/poller.py index 5813377249d..cb2da738b1f 100644 --- a/lib/ansible/runner/poller.py +++ b/lib/ansible/runner/poller.py @@ -38,13 +38,13 @@ class AsyncPoller(object): if res.get('started', False): self.hosts_to_poll.append(host) jid = res.get('ansible_job_id', None) - self.runner.setup_cache[host]['ansible_job_id'] = jid + self.runner.vars_cache[host]['ansible_job_id'] = jid self.active = True else: skipped = skipped & res.get('skipped', False) self.results['contacted'][host] = res for (host, res) in results['dark'].iteritems(): - self.runner.setup_cache[host]['ansible_job_id'] = '' + self.runner.vars_cache[host]['ansible_job_id'] = '' self.results['dark'][host] = res if not skipped: @@ -77,14 +77,14 @@ class AsyncPoller(object): self.results['contacted'][host] = res poll_results['contacted'][host] = res if res.get('failed', False) or res.get('rc', 0) != 0: - self.runner.callbacks.on_async_failed(host, res, self.runner.setup_cache[host]['ansible_job_id']) + self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id']) else: - self.runner.callbacks.on_async_ok(host, res, self.runner.setup_cache[host]['ansible_job_id']) + self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id']) for (host, res) in results['dark'].iteritems(): self.results['dark'][host] = res poll_results['dark'][host] = res if host in self.hosts_to_poll: - self.runner.callbacks.on_async_failed(host, res, self.runner.setup_cache[host].get('ansible_job_id','XX')) + self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX')) self.hosts_to_poll = hosts if len(hosts)==0: @@ -106,7 +106,7 @@ class AsyncPoller(object): for (host, res) in poll_results['polled'].iteritems(): if res.get('started'): - self.runner.callbacks.on_async_poll(host, res, self.runner.setup_cache[host]['ansible_job_id'], clock) + self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock) clock = clock - poll_interval From ba4838cde57b71a205ac4601e360f8697e32f1fb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 1 Apr 2014 22:14:35 -0500 Subject: [PATCH 658/772] Adding wait/wait_timeout parameters to the ec2_key module Fixes #6455 --- library/cloud/ec2_key | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/library/cloud/ec2_key b/library/cloud/ec2_key index 289deb6c9d6..9e94d22a0b3 100644 --- a/library/cloud/ec2_key +++ b/library/cloud/ec2_key @@ -70,6 +70,20 @@ options: default: null aliases: [] version_added: "1.6" + wait: + description: + - Wait for the specified action to complete before returning. + required: false + default: false + aliases: [] + version_added: "1.6" + wait_timeout: + description: + - How long before wait gives up, in seconds + required: false + default: 300 + aliases: [] + version_added: "1.6" requirements: [ "boto" ] author: Vincent Viallet @@ -124,6 +138,8 @@ def main(): name=dict(required=True), key_material=dict(required=False), state = dict(default='present', choices=['present', 'absent']), + wait = dict(type='bool', default=False), + wait_timeout = dict(default=300), ) ) module = AnsibleModule( @@ -134,6 +150,8 @@ def main(): name = module.params['name'] state = module.params.get('state') key_material = module.params.get('key_material') + wait = module.params.get('wait') + wait_timeout = int(module.params.get('wait_timeout')) changed = False @@ -148,6 +166,16 @@ def main(): '''found a match, delete it''' try: key.delete() + if wait: + start = time.time() + action_complete = False + while (time.time() - start) < wait_timeout: + if not ec2.get_key_pair(name): + action_complete = True + break + time.sleep(1) + if not action_complete: + module.fail_json(msg="timed out while waiting for the key to be removed") except Exception, e: module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) else: @@ -178,6 +206,18 @@ def main(): retrieve the private key ''' key = ec2.create_key_pair(name) + + if wait: + start = time.time() + action_complete = False + while (time.time() - start) < wait_timeout: + if ec2.get_key_pair(name): + action_complete = True + break + time.sleep(1) + if not action_complete: + module.fail_json(msg="timed out while waiting for the key to be created") + changed = True if key: From fe88fcb6d230e3848009f56773859d1bc9236912 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 1 Apr 2014 22:16:40 -0500 Subject: [PATCH 659/772] Updating CHANGELOG for ec2_key wait addition --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e32a4471df8..1d215e95793 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ Other notable changes: * setup module code moved into module_utils/, facts now accessible by other modules * synchronize module sets relative dirs based on inventory or role path * misc bugfixes and other parameters +* the ec2_key module now has wait/wait_timeout parameters ## 1.5.3 "Love Walks In" - March 13, 2014 From 933fb349f15b9ae93dabe8a2f1a4cbe0f5dd3844 Mon Sep 17 00:00:00 2001 From: "Oleg A. Mamontov" Date: Wed, 2 Apr 2014 13:21:26 +0400 Subject: [PATCH 660/772] Fixed cwd for submodules update --- library/source_control/git | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/source_control/git b/library/source_control/git index 63539f624fa..8fffdbfaf89 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -389,7 +389,7 @@ def submodule_update(git_path, module, dest): cmd = [ git_path, 'submodule', 'sync' ] (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] - (rc, out, err) = module.run_command(cmd) + (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Failed to init/update submodules") return (rc, out, err) From 579579654657da277bb1dd8e5a1c79aeb8f15f2f Mon Sep 17 00:00:00 2001 From: James Tanner Date: Wed, 2 Apr 2014 13:33:11 -0400 Subject: [PATCH 661/772] Remove limitation on number of files for encrypt subcommand in ansible-vault --- bin/ansible-vault | 3 --- 1 file changed, 3 deletions(-) diff --git a/bin/ansible-vault b/bin/ansible-vault index 0784c9cec81..1c2e48a0634 100755 --- a/bin/ansible-vault +++ b/bin/ansible-vault @@ -160,9 +160,6 @@ def execute_edit(args, options, parser): def execute_encrypt(args, options, parser): - if len(args) > 1: - raise errors.AnsibleError("'create' does not accept more than one filename") - if not options.password_file: password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) else: From b8efbb1cb3c94da9b8402c7e361cf89a8f9b1501 Mon Sep 17 00:00:00 2001 From: John Barker Date: Wed, 2 Apr 2014 20:43:12 +0100 Subject: [PATCH 662/772] debconf correctly quote strings --- library/system/debconf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/debconf b/library/system/debconf index 1dade71f8ad..fded0b0e0b4 100644 --- a/library/system/debconf +++ b/library/system/debconf @@ -107,7 +107,7 @@ def set_selection(module, pkg, question, vtype, value, unseen): data = ' '.join([ question, vtype, value ]) setsel = module.get_bin_path('debconf-set-selections', True) - cmd = ["echo '%s %s' |" % (pipes.quote(pkg), pipes.quote(data)), setsel] + cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel] if unseen: cmd.append('-u') From 684d46b1707f9ff7cda6007b7399d1a614c6b9fd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Apr 2014 14:25:24 -0500 Subject: [PATCH 663/772] Fixing some parsing issues in authorized_key module Also adds an integration test for authorized_key for future validation. Fixes #6700 --- library/system/authorized_key | 44 ++-- test/integration/non_destructive.yml | 1 + .../test_authorized_key/defaults/main.yml | 15 ++ .../roles/test_authorized_key/meta/main.yml | 2 + .../roles/test_authorized_key/tasks/main.yml | 244 ++++++++++++++++++ 5 files changed, 277 insertions(+), 29 deletions(-) create mode 100644 test/integration/roles/test_authorized_key/defaults/main.yml create mode 100644 test/integration/roles/test_authorized_key/meta/main.yml create mode 100644 test/integration/roles/test_authorized_key/tasks/main.yml diff --git a/library/system/authorized_key b/library/system/authorized_key index ac81c39d896..cebbcc3ca16 100644 --- a/library/system/authorized_key +++ b/library/system/authorized_key @@ -199,33 +199,19 @@ def parseoptions(module, options): ''' options_dict = keydict() #ordered dict if options: - token_exp = [ - # matches separator - (r',+', False), - # matches option with value, e.g. from="x,y" - (r'([a-z0-9-]+)="((?:[^"\\]|\\.)*)"', True), - # matches single option, e.g. no-agent-forwarding - (r'[a-z0-9-]+', True) - ] - - pos = 0 - while pos < len(options): - match = None - for pattern, is_valid_option in token_exp: - regex = re.compile(pattern, re.IGNORECASE) - match = regex.match(options, pos) - if match: - text = match.group(0) - if is_valid_option: - if len(match.groups()) == 2: - options_dict[match.group(1)] = match.group(2) - else: - options_dict[text] = None - break - if not match: - module.fail_json(msg="invalid option string: %s" % options) - else: - pos = match.end(0) + try: + # the following regex will split on commas while + # ignoring those commas that fall within quotes + regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') + parts = regex.split(options)[1:-1] + for part in parts: + if "=" in part: + (key, value) = part.split("=", 1) + options_dict[key] = value + elif part != ",": + options_dict[part] = None + except: + module.fail_json(msg="invalid option string: %s" % options) return options_dict @@ -254,7 +240,7 @@ def parsekey(module, raw_key): # split key safely lex = shlex.shlex(raw_key) - lex.quotes = ["'", '"'] + lex.quotes = [] lex.commenters = '' #keep comment hashes lex.whitespace_split = True key_parts = list(lex) @@ -315,7 +301,7 @@ def writekeys(module, filename, keys): option_strings = [] for option_key in options.keys(): if options[option_key]: - option_strings.append("%s=\"%s\"" % (option_key, options[option_key])) + option_strings.append("%s=%s" % (option_key, options[option_key])) else: option_strings.append("%s" % option_key) diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index f8c6772ee9f..c8d836896aa 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -36,3 +36,4 @@ - { role: test_command_shell, tags: test_command_shell } - { role: test_failed_when, tags: test_failed_when } - { role: test_script, tags: test_script } + - { role: test_authorized_key, tags: test_authorized_key } diff --git a/test/integration/roles/test_authorized_key/defaults/main.yml b/test/integration/roles/test_authorized_key/defaults/main.yml new file mode 100644 index 00000000000..e3a7606e01b --- /dev/null +++ b/test/integration/roles/test_authorized_key/defaults/main.yml @@ -0,0 +1,15 @@ +--- +dss_key_basic: > + ssh-dss DATA_BASIC root@testing +dss_key_unquoted_option: > + idle-timeout=5m ssh-dss DATA_UNQUOTED_OPTION root@testing +dss_key_command: > + command="/bin/true" ssh-dss DATA_COMMAND root@testing +dss_key_complex_command: > + command="echo foo 'bar baz'" ssh-dss DATA_COMPLEX_COMMAND root@testing +dss_key_command_single_option: > + no-port-forwarding,command="/bin/true" ssh-dss DATA_COMMAND_SINGLE_OPTIONS root@testing +dss_key_command_multiple_options: > + no-port-forwarding,idle-timeout=5m,command="/bin/true" ssh-dss DATA_COMMAND_MULTIPLE_OPTIONS root@testing +dss_key_trailing: > + ssh-dss DATA_TRAILING root@testing foo bar baz diff --git a/test/integration/roles/test_authorized_key/meta/main.yml b/test/integration/roles/test_authorized_key/meta/main.yml new file mode 100644 index 00000000000..145d4f7ca1f --- /dev/null +++ b/test/integration/roles/test_authorized_key/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/roles/test_authorized_key/tasks/main.yml b/test/integration/roles/test_authorized_key/tasks/main.yml new file mode 100644 index 00000000000..20f369e509c --- /dev/null +++ b/test/integration/roles/test_authorized_key/tasks/main.yml @@ -0,0 +1,244 @@ +# test code for the authorized_key module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# ------------------------------------------------------------- +# Setup steps + +- name: touch the authorized_keys file + file: dest="{{output_dir}}/authorized_keys" state=touch + register: result + +- name: assert that the authorized_keys file was created + assert: + that: + - ['result.changed == True'] + - ['result.state == "file"'] + +# ------------------------------------------------------------- +# basic ssh-dss key + +- name: add basic ssh-dss key + authorized_key: user=root key="{{ dss_key_basic }}" state=present path="{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - ['result.changed == True'] + - ['result.key == dss_key_basic'] + - ['result.key_options == None'] + +- name: re-add basic ssh-dss key + authorized_key: user=root key="{{ dss_key_basic }}" state=present path="{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - ['result.changed == False'] + +# ------------------------------------------------------------- +# ssh-dss key with an unquoted option + +- name: add ssh-dss key with an unquoted option + authorized_key: + user: root + key: "{{ dss_key_unquoted_option }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - ['result.changed == True'] + - ['result.key == dss_key_unquoted_option'] + - ['result.key_options == None'] + +- name: re-add ssh-dss key with an unquoted option + authorized_key: + user: root + key: "{{ dss_key_unquoted_option }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - ['result.changed == False'] + +# ------------------------------------------------------------- +# ssh-dss key with a leading command="/bin/foo" + +- name: add ssh-dss key with a leading command + authorized_key: + user: root + key: "{{ dss_key_command }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - ['result.changed == True'] + - ['result.key == dss_key_command'] + - ['result.key_options == None'] + +- name: re-add ssh-dss key with a leading command + authorized_key: + user: root + key: "{{ dss_key_command }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - ['result.changed == False'] + +# ------------------------------------------------------------- +# ssh-dss key with a complex quoted leading command +# ie. command="/bin/echo foo 'bar baz'" + +- name: add ssh-dss key with a complex quoted leading command + authorized_key: + user: root + key: "{{ dss_key_complex_command }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - ['result.changed == True'] + - ['result.key == dss_key_complex_command'] + - ['result.key_options == None'] + +- name: re-add ssh-dss key with a complex quoted leading command + authorized_key: + user: root + key: "{{ dss_key_complex_command }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - ['result.changed == False'] + +# ------------------------------------------------------------- +# ssh-dss key with a command and a single option, which are +# in a comma-separated list + +- name: add ssh-dss key with a command and a single option + authorized_key: + user: root + key: "{{ dss_key_command_single_option }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - ['result.changed == True'] + - ['result.key == dss_key_command_single_option'] + - ['result.key_options == None'] + +- name: re-add ssh-dss key with a command and a single option + authorized_key: + user: root + key: "{{ dss_key_command_single_option }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - ['result.changed == False'] + +# ------------------------------------------------------------- +# ssh-dss key with a command and multiple other options + +- name: add ssh-dss key with a command and multiple options + authorized_key: + user: root + key: "{{ dss_key_command_multiple_options }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - ['result.changed == True'] + - ['result.key == dss_key_command_multiple_options'] + - ['result.key_options == None'] + +- name: re-add ssh-dss key with a command and multiple options + authorized_key: + user: root + key: "{{ dss_key_command_multiple_options }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - ['result.changed == False'] + +# ------------------------------------------------------------- +# ssh-dss key with multiple trailing parts, which are space- +# separated and not quoted in any way + +- name: add ssh-dss key with trailing parts + authorized_key: + user: root + key: "{{ dss_key_trailing }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that the key was added + assert: + that: + - ['result.changed == True'] + - ['result.key == dss_key_trailing'] + - ['result.key_options == None'] + +- name: re-add ssh-dss key with trailing parts + authorized_key: + user: root + key: "{{ dss_key_trailing }}" + state: present + path: "{{output_dir|expanduser}}/authorized_keys" + register: result + +- name: assert that nothing changed + assert: + that: + - ['result.changed == False'] + From eb15d2f6fe067df1b215dab92575418292721dbe Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Apr 2014 15:25:17 -0500 Subject: [PATCH 664/772] Fix issue with sysctl parameter parsing introduced by 0e8c7b1 Fixes #6806 --- library/system/sysctl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/system/sysctl b/library/system/sysctl index fd6960f2228..be379798252 100644 --- a/library/system/sysctl +++ b/library/system/sysctl @@ -185,9 +185,9 @@ class SysctlModule(object): def _parse_value(self, value): if value is None: return '' - elif value.lower(): + elif value.lower() in BOOLEANS_TRUE: return '1' - elif not value.lower(): + elif not value.lower() in BOOLEANS_FALSE: return '0' else: return value.strip() From 6f34a6336fa405498420f232436a71f97c5d698b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Apr 2014 15:52:08 -0500 Subject: [PATCH 665/772] Differentiate decryption failures from empty data files in ansible-vault Fixes #6822 --- lib/ansible/utils/vault.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 0a71bd49cb2..b4d79a50388 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -123,7 +123,7 @@ class VaultLib(object): # try to unencrypt data data = this_cipher.decrypt(data, self.password) - if not data: + if data is None: raise errors.AnsibleError("Decryption failed") return data @@ -210,7 +210,7 @@ class VaultEditor(object): this_vault = VaultLib(self.password) if this_vault.is_encrypted(tmpdata): dec_data = this_vault.decrypt(tmpdata) - if not dec_data: + if dec_data is None: raise errors.AnsibleError("Decryption failed") else: self.write_data(dec_data, self.filename) From a6a4680e5c4aede0ee3ba617a7c0a12a2f62fa90 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Apr 2014 16:13:22 -0500 Subject: [PATCH 666/772] Fix variable naming issue in _load_tasks() Fixes #6800 --- lib/ansible/playbook/play.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 402ae0d5fd7..93af7e6a292 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -585,9 +585,9 @@ class Play(object): include_filename = utils.path_dwim(dirname, include_file) data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password) if 'role_name' in x and data is not None: - for x in data: - if 'include' in x: - x['role_name'] = new_role + for y in data: + if isinstance(y, dict) and 'include' in y: + y['role_name'] = new_role loaded = self._load_tasks(data, mv, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) results += loaded elif type(x) == dict: From 3ba01cac3c4a70b7697453040ebfe74a0418dcd2 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 2 Apr 2014 17:17:52 -0400 Subject: [PATCH 667/772] Note that keyserver was added in 1.6 --- library/packaging/apt_key | 1 + 1 file changed, 1 insertion(+) diff --git a/library/packaging/apt_key b/library/packaging/apt_key index f0d7f8ba81f..6eab86bb887 100644 --- a/library/packaging/apt_key +++ b/library/packaging/apt_key @@ -59,6 +59,7 @@ options: description: - url to retrieve key from. keyserver: + version_added: "1.6" required: false default: none description: From 10adf9f2cc0d1d85deb66573f1763794debdfa42 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 2 Apr 2014 17:29:05 -0400 Subject: [PATCH 668/772] Denote unit test requirements in README.md --- test/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/README.md b/test/README.md index 526b448e087..3e746062cd1 100644 --- a/test/README.md +++ b/test/README.md @@ -2,6 +2,7 @@ Ansible Test System =================== Folders +======= unit ---- @@ -11,6 +12,8 @@ mock interfaces rather than producing side effects. Playbook engine code is better suited for integration tests. +Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib + integration ----------- From 18b713fd74e0ecfd6c8d96ad05c45c0586d100c8 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 2 Apr 2014 17:32:44 -0400 Subject: [PATCH 669/772] Example cleanup. --- library/network/uri | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/library/network/uri b/library/network/uri index 3098c5f3902..b8b9b04ab9c 100644 --- a/library/network/uri +++ b/library/network/uri @@ -143,31 +143,29 @@ EXAMPLES = ''' when: 'AWESOME' not in "{{ webpage.content }}" -# Create a JIRA issue. -- action: > - uri url=https://your.jira.example.com/rest/api/2/issue/ - method=POST user=your_username password=your_pass - body="{{ lookup('file','issue.json') }}" force_basic_auth=yes - status_code=201 HEADER_Content-Type="application/json" - -- action: > - uri url=https://your.form.based.auth.examle.com/index.php - method=POST body="name=your_username&password=your_password&enter=Sign%20in" - status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded" - register: login +# Create a JIRA issue + +- uri: url=https://your.jira.example.com/rest/api/2/issue/ + method=POST user=your_username password=your_pass + body="{{ lookup('file','issue.json') }}" force_basic_auth=yes + status_code=201 HEADER_Content-Type="application/json" # Login to a form based webpage, then use the returned cookie to -# access the app in later tasks. -- action: uri url=https://your.form.based.auth.example.com/dashboard.php - method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}" +# access the app in later tasks + +- uri: url=https://your.form.based.auth.examle.com/index.php + method=POST body="name=your_username&password=your_password&enter=Sign%20in" + status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded" + register: login + +- uri: url=https://your.form.based.auth.example.com/dashboard.php + method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}" # Queue build of a project in Jenkins: -- action: uri url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} - method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 -# Call Jenkins from host where you run ansible. Handy if the Jenkins host isn't reachable from the target machine: -- local_action: uri url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} - method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 +- uri: url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} + method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 + ''' HAS_HTTPLIB2 = True From a4bce09ad1a5795b5f92913244570145c5f2565c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 2 Apr 2014 17:36:47 -0400 Subject: [PATCH 670/772] Clarify docs message. --- library/packaging/apt_repository | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index a9e6e6dcf29..af51618e682 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -45,7 +45,7 @@ options: - A source string state. update_cache: description: - - Run the equivalent of C(apt-get update) if has changed. + - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes. required: false default: "yes" choices: [ "yes", "no" ] From 876b8085aa9a0a13de9a2e61a45f2b0ee66f1939 Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Wed, 2 Apr 2014 17:41:11 -0400 Subject: [PATCH 671/772] Add directory_mode to common file parameters to prevent errors. --- lib/ansible/module_utils/basic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 9246fd1df7b..654042370b5 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -115,6 +115,7 @@ FILE_COMMON_ARGUMENTS=dict( backup = dict(), force = dict(), remote_src = dict(), # used by assemble + directory_mode = dict(), # used by copy ) From 90c137dea3c853209de084bda1b01ce7526eda82 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 2 Apr 2014 17:43:40 -0400 Subject: [PATCH 672/772] Slightly revise wording. --- library/system/authorized_key | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/library/system/authorized_key b/library/system/authorized_key index 053fd60eb4a..c40edb1f162 100644 --- a/library/system/authorized_key +++ b/library/system/authorized_key @@ -48,10 +48,11 @@ options: version_added: "1.2" manage_dir: description: - - Whether this module should manage (create it, change owner and - permissions) the directory of the authorized_keys file. Make sure to + - Whether this module should manage the directory of the authorized key file. If + set, the module will create the directory, as well as set the owner and permissions + of an existing directory. Be sure to set C(manage_dir=no) if you are using an alternate directory for - authorized_keys set with C(path), since you could lock yourself out of + authorized_keys, as set with C(path), since you could lock yourself out of SSH access. See the example below. required: false choices: [ "yes", "no" ] From 5a075e3b93f00511cd3983c0673ed26cc09410db Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 2 Apr 2014 17:47:01 -0400 Subject: [PATCH 673/772] Clarify docs section slightly. --- docsite/rst/guide_rax.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst index 515736edb82..ae145c96f10 100644 --- a/docsite/rst/guide_rax.rst +++ b/docsite/rst/guide_rax.rst @@ -66,7 +66,9 @@ https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authentic Running from a Python Virtual Environment (Optional) ++++++++++++++++++++++++++++++++++++++++++++++++++++ -There are special considerations when Ansible is installed to a Python virtualenv, rather than the default of installing at a global scope. Ansible assumes, unless otherwise instructed, that the python binary will live at /usr/bin/python. This is done so via the interpret line in modules, however when instructed using ansible_python_interpreter, Ansible will use this specified path instead to find Python. **This is a common cause of module failures**, because the user assumes that modules running on 'localhost', or perhaps running via 'local_action' are using the virtualenv Python interpreter. By setting this line in the inventory, the modules will execute in the virtualenv interpreter and have available the virtualenv packages, specifically pyrax. If using virtualenv, you may wish to modify your localhost inventory definition to find this location as follows: +Most users will not be using virtualenv, but some users, particularly Python developers sometimes like to. + +There are special considerations when Ansible is installed to a Python virtualenv, rather than the default of installing at a global scope. Ansible assumes, unless otherwise instructed, that the python binary will live at /usr/bin/python. This is done via the interpreter line in modules, however when instructed by setting the inventory variable 'ansible_python_interpreter', Ansible will use this specified path instead to find Python. This can be a cause of confusion as one may assume that modules running on 'localhost', or perhaps running via 'local_action', are using the virtualenv Python interpreter. By setting this line in the inventory, the modules will execute in the virtualenv interpreter and have available the virtualenv packages, specifically pyrax. If using virtualenv, you may wish to modify your localhost inventory definition to find this location as follows: .. code-block:: ini From 79c245f31f49a181067fc74eb1195ed76a3bf3f0 Mon Sep 17 00:00:00 2001 From: Joshua Lund Date: Wed, 2 Apr 2014 16:41:09 -0600 Subject: [PATCH 674/772] Update cron module documentation. The 'name' parameter is required. --- library/system/cron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/cron b/library/system/cron index 15c21fb157d..be17ede845d 100644 --- a/library/system/cron +++ b/library/system/cron @@ -44,7 +44,7 @@ options: name: description: - Description of a crontab entry. - required: false + required: true default: null user: description: From cd12d8a917ee744c85b37f9d4ee5527899e53779 Mon Sep 17 00:00:00 2001 From: Andrew Resch Date: Wed, 2 Apr 2014 15:59:05 -0700 Subject: [PATCH 675/772] Fix _parse_value always returning 0 when not true, false or None. --- library/system/sysctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/sysctl b/library/system/sysctl index be379798252..ab1da5e0959 100644 --- a/library/system/sysctl +++ b/library/system/sysctl @@ -187,7 +187,7 @@ class SysctlModule(object): return '' elif value.lower() in BOOLEANS_TRUE: return '1' - elif not value.lower() in BOOLEANS_FALSE: + elif value.lower() in BOOLEANS_FALSE: return '0' else: return value.strip() From 3763b32f86c5b61be92d0a9803a139286e7f2da7 Mon Sep 17 00:00:00 2001 From: Paul Durivage Date: Thu, 13 Mar 2014 16:19:28 -0500 Subject: [PATCH 676/772] Fix issue where a container is created but is not indicating that the state was changed --- library/cloud/rax_files | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/rax_files b/library/cloud/rax_files index 720ea53e191..bfd1a0c3d13 100644 --- a/library/cloud/rax_files +++ b/library/cloud/rax_files @@ -237,6 +237,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri except Exception, e: module.fail_json(msg=e.message) else: + EXIT_DICT['changed'] = True EXIT_DICT['created'] = True else: module.fail_json(msg=e.message) @@ -312,8 +313,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri _locals = locals().keys() - if ('cont_created' in _locals - or 'cont_deleted' in _locals + if ('cont_deleted' in _locals or 'meta_set' in _locals or 'cont_public' in _locals or 'cont_private' in _locals From 317c2f4bc03b3d800a9c983ae6141d48d15ad65f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Apr 2014 19:46:51 -0500 Subject: [PATCH 677/772] Fixes to variable issues introduced by recent changes Fixes #6801 Fixes #6832 --- lib/ansible/playbook/play.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 93af7e6a292..13fd0e471bf 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -688,6 +688,9 @@ class Play(object): else: raise errors.AnsibleError("'vars_prompt' section is malformed, see docs") + if type(self.playbook.extra_vars) == dict: + vars = utils.combine_vars(vars, self.playbook.extra_vars) + return vars # ************************************************* @@ -766,7 +769,8 @@ class Play(object): if host is not None: inject = {} inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password)) - inject.update(self.playbook.VARS_CACHE[host]) + inject.update(self.playbook.SETUP_CACHE.get(host, {})) + inject.update(self.playbook.VARS_CACHE.get(host, {})) for filename in self.vars_files: From 5770428e916ab62c6814e6dff3f1136a6f7689f2 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 19 Mar 2014 11:27:40 -0500 Subject: [PATCH 678/772] Add version_compare filter --- docsite/rst/playbooks_variables.rst | 26 ++++++++++++++++++ lib/ansible/runner/filter_plugins/core.py | 32 +++++++++++++++++++++++ test/units/TestFilters.py | 23 ++++++++++++++++ 3 files changed, 81 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 908eaa2fccb..18aa31dcf30 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -208,6 +208,32 @@ To get the symmetric difference of 2 lists (items exclusive to each list):: {{ list1 | symmetric_difference(list2) }} +.. _version_comparison_filters: + +Version Comparison Filters +-------------------------- + +.. versionadded:: 1.6 + +To compare a version number, such as checking if the ``ansible_distribution_version`` +version is greater than or equal to '12.04', you can use the ``version_compare`` filter:: + +The ``version_compare`` filter can also be used to evaluate the ``ansible_distribution_version``:: + + {{ ansible_distribution_version | version_compare('12.04', '>=') }} + +If ``ansible_distribution_version`` is greater than or equal to 12, this filter will return True, otherwise +it will return False. + +The ``version_compare`` filter accepts the following operators:: + + <, lt, <=, le, >, gt, >=, ge, ==, =, eq, !=, <>, ne + +This filter also accepts a 3rd parameter, ``strict`` which defines if strict version parsing should +be used. The default is ``False``, and if set as ``True`` will use more strict version parsing:: + + {{ sample_version_var | version_compare('1.0', operator='lt', strict=True) }} + .. _other_useful_filters: Other Useful Filters diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index a511b18bcb1..8bad776cbe8 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -23,8 +23,10 @@ import types import pipes import glob import re +import operator as py_operator from ansible import errors from ansible.utils import md5s +from distutils.version import LooseVersion, StrictVersion def to_nice_yaml(*a, **kw): '''Make verbose, human readable yaml''' @@ -151,6 +153,33 @@ def symmetric_difference(a, b): def union(a, b): return set(a).union(b) +def version_compare(value, version, operator='eq', strict=False): + ''' Perform a version comparison on a value ''' + op_map = { + '==': 'eq', '=': 'eq', 'eq': 'eq', + '<': 'lt', 'lt': 'lt', + '<=': 'le', 'le': 'le', + '>': 'gt', 'gt': 'gt', + '>=': 'ge', 'ge': 'ge', + '!=': 'ne', '<>': 'ne', 'ne': 'ne' + } + + if strict: + Version = StrictVersion + else: + Version = LooseVersion + + if operator in op_map: + operator = op_map[operator] + else: + raise errors.AnsibleFilterError('Invalid operator type') + + try: + method = getattr(py_operator, operator) + return method(Version(str(value)), Version(str(version))) + except Exception, e: + raise errors.AnsibleFilterError('Version comparison: %s' % e) + class FilterModule(object): ''' Ansible core jinja2 filters ''' @@ -213,5 +242,8 @@ class FilterModule(object): 'difference': difference, 'symmetric_difference': symmetric_difference, 'union': union, + + # version comparison + 'version_compare': version_compare, } diff --git a/test/units/TestFilters.py b/test/units/TestFilters.py index e79d4c8970d..9389147516c 100644 --- a/test/units/TestFilters.py +++ b/test/units/TestFilters.py @@ -152,3 +152,26 @@ class TestFilters(unittest.TestCase): #out = open(dest).read() #self.assertEqual(DEST, out) + def test_version_compare(self): + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(0, 1.1, 'lt', False)) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.2, '<')) + + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '==')) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '=')) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, 'eq')) + + + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'gt')) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '>')) + + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'ne')) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '!=')) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '<>')) + + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'ge')) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.1, '>=')) + + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'le')) + self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.0, 1.1, '<=')) + + self.assertTrue(ansible.runner.filter_plugins.core.version_compare('12.04', 12, 'ge')) From b0bcef06010449ec611bd1e5d7a466608b4d735d Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 3 Apr 2014 08:25:01 -0400 Subject: [PATCH 679/772] Update README.md Add counter --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 853025911f9..b6521eca4c2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) +[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) [![PyPI downloads](https://pypip.in/d/ansible/badge.png)[https://pypi.python.org/pypi/ansible) + Ansible ======= From 31cf5b39f01832cb05f3ebde17abbd480bd94590 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 3 Apr 2014 08:25:26 -0400 Subject: [PATCH 680/772] Update README.md formatting --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b6521eca4c2..5c6ecdecb22 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) [![PyPI downloads](https://pypip.in/d/ansible/badge.png)[https://pypi.python.org/pypi/ansible) +[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) [![PyPI downloads](https://pypip.in/d/ansible/badge.png)](https://pypi.python.org/pypi/ansible) Ansible From 1fc4b89e64dc75dd0bd8abe94d8302a726d07cfb Mon Sep 17 00:00:00 2001 From: Gael Pasgrimaud Date: Thu, 3 Apr 2014 14:34:43 +0200 Subject: [PATCH 681/772] Fix a typo in the subversion module documentation --- library/source_control/subversion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/source_control/subversion b/library/source_control/subversion index 80b77b17180..bda8a8620a1 100644 --- a/library/source_control/subversion +++ b/library/source_control/subversion @@ -27,7 +27,7 @@ description: version_added: "0.7" author: Dane Summers, njharman@gmail.com notes: - - Requres I(svn) to be installed on the client. + - Requires I(svn) to be installed on the client. requirements: [] options: repo: From cdc0819939cdab92515bd814a44f6a06d55e4d68 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 3 Apr 2014 12:02:40 -0400 Subject: [PATCH 682/772] Fixes #6088 turn off sudo and su if the connection is local the sudo user matches the current user --- lib/ansible/runner/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index bb10ec1bb2b..9324bfd5f40 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -31,6 +31,7 @@ import sys import pipes import jinja2 import subprocess +import getpass import ansible.constants as C import ansible.inventory @@ -931,6 +932,12 @@ class Runner(object): if conn.user == sudo_user or conn.user == su_user: sudoable = False su = False + else: + # assume connection type is local if no user attribute + this_user = getpass.getuser() + if this_user == sudo_user or this_user == su_user: + sudoable = False + su = False if su: rc, stdin, stdout, stderr = conn.exec_command(cmd, From b66006280186ab4295dc7a7fdee467076993c322 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 3 Apr 2014 13:53:43 -0400 Subject: [PATCH 683/772] Fixes #6454 verify ec2 key fingerprints --- library/cloud/ec2_key | 47 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 4 deletions(-) diff --git a/library/cloud/ec2_key b/library/cloud/ec2_key index 9e94d22a0b3..99ea5bcc3e0 100644 --- a/library/cloud/ec2_key +++ b/library/cloud/ec2_key @@ -132,6 +132,10 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) +import random +import string + + def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( @@ -187,10 +191,45 @@ def main(): # Ensure requested key is present elif state == 'present': if key: - '''existing key found''' - # Should check if the fingerprint is the same - but lack of info - # and different fingerprint provided (pub or private) depending if - # the key has been created of imported. + # existing key found + if key_material: + # EC2's fingerprints are non-trivial to generate, so push this key + # to a temporary name and make ec2 calculate the fingerprint for us. + # + # http://blog.jbrowne.com/?p=23 + # https://forums.aws.amazon.com/thread.jspa?messageID=352828 + + # find an unused name + test = 'empty' + while test: + randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)] + tmpkeyname = "ansible-" + ''.join(randomchars) + test = ec2.get_key_pair(tmpkeyname) + + # create tmp key + tmpkey = ec2.import_key_pair(tmpkeyname, key_material) + # get tmp key fingerprint + tmpfingerprint = tmpkey.fingerprint + # delete tmp key + tmpkey.delete() + + if key.fingerprint != tmpfingerprint: + if not module.check_mode: + key.delete() + key = ec2.import_key_pair(name, key_material) + + if wait: + start = time.time() + action_complete = False + while (time.time() - start) < wait_timeout: + if ec2.get_key_pair(name): + action_complete = True + break + time.sleep(1) + if not action_complete: + module.fail_json(msg="timed out while waiting for the key to be re-created") + + changed = True pass # if the key doesn't exist, create it now From cca028187e08ca6acc8e2b1ffe16a1b8b99f7265 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 3 Apr 2014 15:21:54 -0400 Subject: [PATCH 684/772] Fixes #6750 Check git subcommand for --remote and expose errors --- library/source_control/git | 39 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/library/source_control/git b/library/source_control/git index 8fffdbfaf89..d75ee137bc6 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -151,6 +151,34 @@ EXAMPLES = ''' import re import tempfile +def get_submodule_update_params(module, git_path, cwd): + + #or: git submodule [--quiet] update [--init] [-N|--no-fetch] + #[-f|--force] [--rebase] [--reference ] [--merge] + #[--recursive] [--] [...] + + params = [] + + # run a bad submodule command to get valid params + cmd = "%s submodule update -" % (git_path) + rc, stdout, stderr = module.run_command(cmd, cwd=cwd) + lines = stderr.split('\n') + update_line = None + for line in lines: + if 'git submodule [--quiet] update ' in line: + update_line = line + if update_line: + update_line = update_line.replace('[','') + update_line = update_line.replace(']','') + update_line = update_line.replace('|',' ') + parts = shlex.split(update_line) + for part in parts: + if part.startswith('--'): + part = part.replace('--', '') + params.append(part) + + return params + def write_ssh_wrapper(): module_dir = get_module_path() fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/') @@ -383,15 +411,22 @@ def fetch(git_path, module, repo, dest, version, remote, bare): def submodule_update(git_path, module, dest): ''' init and update any submodules ''' + + # get the valid submodule params + params = get_submodule_update_params(module, git_path, dest) + # skip submodule commands if .gitmodules is not present if not os.path.exists(os.path.join(dest, '.gitmodules')): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] + if 'remote' in params: + cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] + else: + cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: - module.fail_json(msg="Failed to init/update submodules") + module.fail_json(msg="Failed to init/update submodules: %s" % out + err) return (rc, out, err) def switch_version(git_path, module, dest, remote, version): From 7321e23b31c1cfb5fe7c329c82d471e19c13e24f Mon Sep 17 00:00:00 2001 From: Atlas Health Date: Thu, 3 Apr 2014 02:40:38 -0700 Subject: [PATCH 685/772] fixed error related to type being defined as a number --- library/cloud/ec2_snapshot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index 8673525dfe0..075fe143f84 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -125,7 +125,7 @@ def main(): ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), wait = dict(type='bool', default='true'), - wait_timeout = dict(type='number', default=0), + wait_timeout = dict(default=0), snapshot_tags = dict(type='dict', default=dict()), ) ) From 1c9950678a8a226020c6a1a32d6a60f942495072 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 3 Apr 2014 15:26:48 -0400 Subject: [PATCH 686/772] Addresses #6750 Use --help instead of - --- library/source_control/git | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/source_control/git b/library/source_control/git index d75ee137bc6..968b763b1a4 100644 --- a/library/source_control/git +++ b/library/source_control/git @@ -160,7 +160,7 @@ def get_submodule_update_params(module, git_path, cwd): params = [] # run a bad submodule command to get valid params - cmd = "%s submodule update -" % (git_path) + cmd = "%s submodule update --help" % (git_path) rc, stdout, stderr = module.run_command(cmd, cwd=cwd) lines = stderr.split('\n') update_line = None From 2657bbcefa7c1e6f634db12c07f9b2fdf09c81aa Mon Sep 17 00:00:00 2001 From: Atlas Health Date: Thu, 3 Apr 2014 02:45:47 -0700 Subject: [PATCH 687/772] ec2: added ability to set EBS optimized instances added version --- library/cloud/ec2 | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index a6bd32d58a4..50496d60ba3 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -217,6 +217,12 @@ options: required: false default: null aliases: [] + ebs_optimized: + version_added: "1.6" + description: + - whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + required: false + default: false exact_count: version_added: "1.5" description: @@ -604,7 +610,8 @@ def get_instance_info(inst): 'root_device_type': inst.root_device_type, 'root_device_name': inst.root_device_name, 'state': inst.state, - 'hypervisor': inst.hypervisor} + 'hypervisor': inst.hypervisor, + 'ebs_optimized': inst.ebs_optimized} try: instance_info['virtualization_type'] = getattr(inst,'virtualization_type') except AttributeError: @@ -767,6 +774,7 @@ def create_instances(module, ec2, override_count=None): private_ip = module.params.get('private_ip') instance_profile_name = module.params.get('instance_profile_name') volumes = module.params.get('volumes') + ebs_optimized = module.params.get('ebs_optimized') exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) @@ -825,6 +833,7 @@ def create_instances(module, ec2, override_count=None): 'instance_type': instance_type, 'kernel_id': kernel, 'ramdisk_id': ramdisk, + 'ebs_optimized': ebs_optimized, 'user_data': user_data} if boto_supports_profile_name_arg(ec2): @@ -1144,6 +1153,7 @@ def main(): exact_count = dict(type='int', default=None), count_tag = dict(), volumes = dict(type='list'), + ebs_optimized = dict(), ) ) From ee0a0b492b5536e0cc8c8e561875254698416eb4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Apr 2014 15:26:02 -0500 Subject: [PATCH 688/772] Allow isprintable() util function to work with unicode Fixes #6842 --- lib/ansible/utils/string_functions.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/ansible/utils/string_functions.py b/lib/ansible/utils/string_functions.py index 4972cc07625..3b452718f74 100644 --- a/lib/ansible/utils/string_functions.py +++ b/lib/ansible/utils/string_functions.py @@ -1,9 +1,12 @@ def isprintable(instring): - #http://stackoverflow.com/a/3637294 - import string - printset = set(string.printable) - isprintable = set(instring).issubset(printset) - return isprintable + if isinstance(instring, str): + #http://stackoverflow.com/a/3637294 + import string + printset = set(string.printable) + isprintable = set(instring).issubset(printset) + return isprintable + else: + return True def count_newlines_from_end(str): i = len(str) From bb6f7a267a4ed6b9afa87b8052026adf43e597c6 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 24 Feb 2014 12:47:42 -0600 Subject: [PATCH 689/772] Add support for shared module documentation fragments --- lib/ansible/utils/module_docs.py | 20 ++++ lib/ansible/utils/module_docs_fragments.py | 116 +++++++++++++++++++++ library/cloud/rax | 74 +++---------- library/cloud/rax_clb | 64 ++++++------ library/cloud/rax_clb_nodes | 45 +++----- library/cloud/rax_dns | 25 +---- library/cloud/rax_dns_record | 34 +++--- library/cloud/rax_facts | 55 +--------- library/cloud/rax_files | 31 ++---- library/cloud/rax_files_objects | 47 +++------ library/cloud/rax_keypair | 58 +---------- library/cloud/rax_network | 29 +----- library/cloud/rax_queue | 30 +----- 13 files changed, 253 insertions(+), 375 deletions(-) create mode 100644 lib/ansible/utils/module_docs_fragments.py diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 3a5d0782961..c356c973501 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -23,6 +23,9 @@ import ast import yaml import traceback +from ansible.utils import module_docs_fragments as fragments + + # modules that are ok that they do not have documentation strings BLACKLIST_MODULES = [ 'async_wrapper', 'accelerate', 'async_status' @@ -46,6 +49,23 @@ def get_docstring(filename, verbose=False): if isinstance(child, ast.Assign): if 'DOCUMENTATION' in (t.id for t in child.targets): doc = yaml.safe_load(child.value.s) + fragment_name = doc.get('extends_documentation_fragment', + 'DOESNOTEXIST').upper() + fragment_yaml = getattr(fragments, fragment_name, None) + if fragment_yaml: + fragment = yaml.safe_load(fragment_yaml) + if fragment.has_key('notes'): + notes = fragment.pop('notes') + if notes: + if not doc.has_key('notes'): + doc['notes'] = [] + doc['notes'].extend(notes) + for key, value in fragment.items(): + if not doc.has_key(key): + doc[key] = value + else: + doc[key].update(value) + if 'EXAMPLES' in (t.id for t in child.targets): plainexamples = child.value.s[1:] # Skip first empty line except: diff --git a/lib/ansible/utils/module_docs_fragments.py b/lib/ansible/utils/module_docs_fragments.py new file mode 100644 index 00000000000..23300ba85e9 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments.py @@ -0,0 +1,116 @@ +# (c) 2012, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +RACKSPACE_AND_OPENSTACK = """ +options: + api_key: + description: + - Rackspace API key (overrides I(credentials)) + aliases: + - password + auth_endpoint: + description: + - The URI of the authentication service + default: https://identity.api.rackspacecloud.com/v2.0/ + version_added: 1.5 + credentials: + description: + - File to find the Rackspace credentials in (ignored if I(api_key) and + I(username) are provided) + default: null + aliases: + - creds_file + env: + description: + - Environment as configured in ~/.pyrax.cfg, + see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) + version_added: 1.5 + identity_type: + description: + - Authentication machanism to use, such as rackspace or keystone + default: rackspace + version_added: 1.5 + region: + description: + - Region to create an instance in + default: DFW + tenant_id: + description: + - The tenant ID used for authentication + version_added: 1.5 + tenant_name: + description: + - The tenant name used for authentication + version_added: 1.5 + username: + description: + - Rackspace username (overrides I(credentials)) + verify_ssl: + description: + - Whether or not to require SSL validation of API endpoints + version_added: 1.5 +requirements: + - pyrax +notes: + - The following environment variables can be used, C(RAX_USERNAME), + C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). + - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file + appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) + - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file + - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +""" + +RACKSPACE = """ +options: + api_key: + description: + - Rackspace API key (overrides I(credentials)) + aliases: + - password + credentials: + description: + - File to find the Rackspace credentials in (ignored if I(api_key) and + I(username) are provided) + default: null + aliases: + - creds_file + env: + description: + - Environment as configured in ~/.pyrax.cfg, + see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) + version_added: 1.5 + region: + description: + - Region to create an instance in + default: DFW + username: + description: + - Rackspace username (overrides I(credentials)) + verify_ssl: + description: + - Whether or not to require SSL validation of API endpoints + version_added: 1.5 +requirements: + - pyrax +notes: + - The following environment variables can be used, C(RAX_USERNAME), + C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). + - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file + appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) + - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file + - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +""" diff --git a/library/cloud/rax b/library/cloud/rax index 0798ce4fd94..387640753cd 100644 --- a/library/cloud/rax +++ b/library/cloud/rax @@ -23,52 +23,6 @@ description: waits for it to be 'running'. version_added: "1.2" options: - api_key: - description: - - Rackspace API key (overrides I(credentials)) - aliases: - - password - auth_endpoint: - description: - - The URI of the authentication service - default: https://identity.api.rackspacecloud.com/v2.0/ - version_added: 1.5 - credentials: - description: - - File to find the Rackspace credentials in (ignored if I(api_key) and - I(username) are provided) - default: null - aliases: - - creds_file - env: - description: - - Environment as configured in ~/.pyrax.cfg, - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) - version_added: 1.5 - identity_type: - description: - - Authentication machanism to use, such as rackspace or keystone - default: rackspace - version_added: 1.5 - region: - description: - - Region to create an instance in - default: DFW - tenant_id: - description: - - The tenant ID used for authentication - version_added: 1.5 - tenant_name: - description: - - The tenant name used for authentication - version_added: 1.5 - username: - description: - - Rackspace username (overrides I(credentials)) - verify_ssl: - description: - - Whether or not to require SSL validation of API endpoints - version_added: 1.5 auto_increment: description: - Whether or not to increment a single number with the name of the @@ -89,7 +43,9 @@ options: disk_config: description: - Disk partitioning strategy - choices: ['auto', 'manual'] + choices: + - auto + - manual version_added: '1.4' default: auto exact_count: @@ -135,7 +91,8 @@ options: description: - key pair to use on the instance default: null - aliases: ['keypair'] + aliases: + - keypair meta: description: - A hash of metadata to associate with the instance @@ -149,31 +106,30 @@ options: - The network to attach to the instances. If specified, you must include ALL networks including the public and private interfaces. Can be C(id) or C(label). - default: ['public', 'private'] + default: + - public + - private version_added: 1.4 state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present wait: description: - wait for the instance to be in state 'running' before returning default: "no" - choices: [ "yes", "no" ] + choices: + - "yes" + - "no" wait_timeout: description: - how long before wait gives up, in seconds default: 300 -requirements: [ "pyrax" ] author: Jesse Keating, Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE_AND_OPENSTACK ''' EXAMPLES = ''' diff --git a/library/cloud/rax_clb b/library/cloud/rax_clb index bd653eff8e8..b462908d540 100644 --- a/library/cloud/rax_clb +++ b/library/cloud/rax_clb @@ -25,17 +25,13 @@ options: algorithm: description: - algorithm for the balancer being created - choices: ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] + choices: + - RANDOM + - LEAST_CONNECTIONS + - ROUND_ROBIN + - WEIGHTED_LEAST_CONNECTIONS + - WEIGHTED_ROUND_ROBIN default: LEAST_CONNECTIONS - api_key: - description: - - Rackspace API key (overrides C(credentials)) - credentials: - description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) - default: null - aliases: ['creds_file'] meta: description: - A hash of metadata to associate with the instance @@ -51,16 +47,32 @@ options: protocol: description: - Protocol for the balancer being created - choices: ['DNS_TCP', 'DNS_UDP' ,'FTP', 'HTTP', 'HTTPS', 'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] + choices: + - DNS_TCP + - DNS_UDP + - FTP + - HTTP + - HTTPS + - IMAPS + - IMAPv4 + - LDAP + - LDAPS + - MYSQL + - POP3 + - POP3S + - SMTP + - TCP + - TCP_CLIENT_FIRST + - UDP + - UDP_STREAM + - SFTP default: HTTP - region: - description: - - Region to create the load balancer in - default: DFW state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present timeout: description: @@ -69,11 +81,10 @@ options: type: description: - type of interface for the balancer being created - choices: ['PUBLIC', 'SERVICENET'] + choices: + - PUBLIC + - SERVICENET default: PUBLIC - username: - description: - - Rackspace username (overrides C(credentials)) vip_id: description: - Virtual IP ID to use when creating the load balancer for purposes of @@ -83,20 +94,15 @@ options: description: - wait for the balancer to be in state 'running' before returning default: "no" - choices: [ "yes", "no" ] + choices: + - "yes" + - "no" wait_timeout: description: - how long before wait gives up, in seconds default: 300 -requirements: [ "pyrax" ] author: Christopher H. Laco, Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE ''' EXAMPLES = ''' diff --git a/library/cloud/rax_clb_nodes b/library/cloud/rax_clb_nodes index f34fe6dde83..38b4d752676 100644 --- a/library/cloud/rax_clb_nodes +++ b/library/cloud/rax_clb_nodes @@ -26,21 +26,15 @@ options: required: false description: - IP address or domain name of the node - api_key: - required: false - description: - - Rackspace API key (overrides C(credentials)) condition: required: false - choices: [ "enabled", "disabled", "draining" ] + choices: + - enabled + - disabled + - draining description: - Condition for the node, which determines its role within the load balancer - credentials: - required: false - description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) load_balancer_id: required: true type: integer @@ -56,35 +50,27 @@ options: type: integer description: - Port number of the load balanced service on the node - region: - required: false - description: - - Region to authenticate in state: required: false default: "present" - choices: [ "present", "absent" ] + choices: + - present + - absent description: - Indicate desired state of the node type: required: false - choices: [ "primary", "secondary" ] + choices: + - primary + - secondary description: - Type of node - username: - required: false - description: - - Rackspace username (overrides C(credentials)) - virtualenv: - required: false - description: - - Path to a virtualenv that should be activated before doing anything. - The virtualenv has to already exist. Useful if installing pyrax - globally is not an option. wait: required: false default: "no" - choices: [ "yes", "no" ] + choices: + - "yes" + - "no" description: - Wait for the load balancer to become active before returning wait_timeout: @@ -97,11 +83,8 @@ options: required: false description: - Weight of node -requirements: [ "pyrax" ] author: Lukasz Kawczynski -notes: - - "The following environment variables can be used: C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDENTIALS) and C(RAX_REGION)." +extends_documentation_fragment: RACKSPACE ''' EXAMPLES = ''' diff --git a/library/cloud/rax_dns b/library/cloud/rax_dns index 4c47d55fbbf..d63a9aeaa09 100644 --- a/library/cloud/rax_dns +++ b/library/cloud/rax_dns @@ -22,18 +22,9 @@ description: - Manage domains on Rackspace Cloud DNS version_added: 1.5 options: - api_key: - description: - - Rackspace API key (overrides C(credentials)) comment: description: - Brief description of the domain. Maximum length of 160 characters - credentials: - description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) - default: null - aliases: ['creds_file'] email: desctiption: - Email address of the domain administrator @@ -43,24 +34,16 @@ options: state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present ttl: description: - Time to live of domain in seconds default: 3600 - username: - description: - - Rackspace username (overrides C(credentials)) -requirements: [ "pyrax" ] author: Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE ''' EXAMPLES = ''' diff --git a/library/cloud/rax_dns_record b/library/cloud/rax_dns_record index 3e7f37f0def..ca5b24de1e8 100644 --- a/library/cloud/rax_dns_record +++ b/library/cloud/rax_dns_record @@ -22,18 +22,9 @@ description: - Manage DNS records on Rackspace Cloud DNS version_added: 1.5 options: - api_key: - description: - - Rackspace API key (overrides C(credentials)) comment: description: - Brief description of the domain. Maximum length of 160 characters - credentials: - description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) - default: null - aliases: ['creds_file'] data: description: - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for @@ -54,7 +45,9 @@ options: state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present ttl: description: @@ -63,20 +56,17 @@ options: type: description: - DNS record type - choices: ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'] + choices: + - A + - AAAA + - CNAME + - MX + - NS + - SRV + - TXT default: A - username: - description: - - Rackspace username (overrides C(credentials)) -requirements: [ "pyrax" ] author: Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE ''' EXAMPLES = ''' diff --git a/library/cloud/rax_facts b/library/cloud/rax_facts index ca117a665a1..655b1bbf199 100644 --- a/library/cloud/rax_facts +++ b/library/cloud/rax_facts @@ -22,52 +22,6 @@ description: - Gather facts for Rackspace Cloud Servers. version_added: "1.4" options: - api_key: - description: - - Rackspace API key (overrides I(credentials)) - aliases: - - password - auth_endpoint: - description: - - The URI of the authentication service - default: https://identity.api.rackspacecloud.com/v2.0/ - version_added: 1.5 - credentials: - description: - - File to find the Rackspace credentials in (ignored if I(api_key) and - I(username) are provided) - default: null - aliases: - - creds_file - env: - description: - - Environment as configured in ~/.pyrax.cfg, - see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration - version_added: 1.5 - identity_type: - description: - - Authentication machanism to use, such as rackspace or keystone - default: rackspace - version_added: 1.5 - region: - description: - - Region to create an instance in - default: DFW - tenant_id: - description: - - The tenant ID used for authentication - version_added: 1.5 - tenant_name: - description: - - The tenant name used for authentication - version_added: 1.5 - username: - description: - - Rackspace username (overrides I(credentials)) - verify_ssl: - description: - - Whether or not to require SSL validation of API endpoints - version_added: 1.5 address: description: - Server IP address to retrieve facts for, will match any IP assigned to @@ -79,15 +33,8 @@ options: description: - Server name to retrieve facts for default: null -requirements: [ "pyrax" ] author: Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE_AND_OPENSTACK ''' EXAMPLES = ''' diff --git a/library/cloud/rax_files b/library/cloud/rax_files index bfd1a0c3d13..66dc1b91be5 100644 --- a/library/cloud/rax_files +++ b/library/cloud/rax_files @@ -25,25 +25,18 @@ description: - Manipulate Rackspace Cloud Files Containers version_added: "1.5" options: - api_key: - description: - - Rackspace API key (overrides I(credentials)) clear_meta: description: - Optionally clear existing metadata when applying metadata to existing containers. Selecting this option is only appropriate when setting type=meta - choices: ["yes", "no"] + choices: + - "yes" + - "no" default: "no" container: description: - The container to use for container or metadata operations. required: true - credentials: - description: - - File to find the Rackspace credentials in (ignored if I(api_key) and - I(username) are provided) - default: null - aliases: ['creds_file'] meta: description: - A hash of items to set as metadata values on a container @@ -71,26 +64,18 @@ options: type: description: - Type of object to do work on, i.e. metadata object or a container object - choices: ["file", "meta"] - default: "file" - username: - description: - - Rackspace username (overrides I(credentials)) + choices: + - file + - meta + default: file web_error: description: - Sets an object to be presented as the HTTP error page when accessed by the CDN URL web_index: description: - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -requirements: [ "pyrax" ] author: Paul Durivage -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE ''' EXAMPLES = ''' diff --git a/library/cloud/rax_files_objects b/library/cloud/rax_files_objects index 1c200b65c88..ef229d7a95b 100644 --- a/library/cloud/rax_files_objects +++ b/library/cloud/rax_files_objects @@ -25,26 +25,19 @@ description: - Upload, download, and delete objects in Rackspace Cloud Files version_added: "1.5" options: - api_key: - description: - - Rackspace API key (overrides I(credentials)) - default: null clear_meta: description: - Optionally clear existing metadata when applying metadata to existing objects. Selecting this option is only appropriate when setting type=meta - choices: ["yes", "no"] + choices: + - "yes" + - "no" default: "no" container: description: - The container to use for file object operations. required: true default: null - credentials: - description: - - File to find the Rackspace credentials in (ignored if I(api_key) and I(username) are provided) - default: null - aliases: ['creds_file'] dest: description: - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder". @@ -64,12 +57,11 @@ options: - The method of operation to be performed. For example, put to upload files to Cloud Files, get to download files from Cloud Files or delete to delete remote objects in Cloud Files - choices: ["get", "put", "delete"] - default: "get" - region: - description: - - Region in which to work. Maps to a Rackspace Cloud region, i.e. DFW, ORD, IAD, SYD, LON - default: DFW + choices: + - get + - put + - delete + default: get src: description: - Source from which to upload files. Used to specify a remote object as a source for @@ -81,7 +73,9 @@ options: - Used to specify whether to maintain nested directory structure when downloading objects from Cloud Files. Setting to false downloads the contents of a container to a single, flat directory - choices: ["yes", "no"] + choices: + - yes + - "no" default: "yes" state: description: @@ -92,21 +86,12 @@ options: description: - Type of object to do work on - Metadata object or a file object - choices: ["file", "meta"] - default: "file" - username: - description: - - Rackspace username (overrides I(credentials)) - default: null -requirements: [ "pyrax" ] + choices: + - file + - meta + default: file author: Paul Durivage -notes: - - The following environment variables can be used, C(RAX_USERNAME), C(RAX_API_KEY), - C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file appropriate - for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE ''' EXAMPLES = ''' diff --git a/library/cloud/rax_keypair b/library/cloud/rax_keypair index bd5270b9e3d..fa195fc0d59 100644 --- a/library/cloud/rax_keypair +++ b/library/cloud/rax_keypair @@ -22,52 +22,6 @@ description: - Create a keypair for use with Rackspace Cloud Servers version_added: 1.5 options: - api_key: - description: - - Rackspace API key (overrides I(credentials)) - aliases: - - password - auth_endpoint: - description: - - The URI of the authentication service - default: https://identity.api.rackspacecloud.com/v2.0/ - version_added: 1.5 - credentials: - description: - - File to find the Rackspace credentials in (ignored if I(api_key) and - I(username) are provided) - default: null - aliases: - - creds_file - env: - description: - - Environment as configured in ~/.pyrax.cfg, - see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration - version_added: 1.5 - identity_type: - description: - - Authentication machanism to use, such as rackspace or keystone - default: rackspace - version_added: 1.5 - region: - description: - - Region to create an instance in - default: DFW - tenant_id: - description: - - The tenant ID used for authentication - version_added: 1.5 - tenant_name: - description: - - The tenant name used for authentication - version_added: 1.5 - username: - description: - - Rackspace username (overrides I(credentials)) - verify_ssl: - description: - - Whether or not to require SSL validation of API endpoints - version_added: 1.5 name: description: - Name of keypair @@ -79,19 +33,15 @@ options: state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present -requirements: [ "pyrax" ] author: Matt Martz notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) - Keypairs cannot be manipulated, only created and deleted. To "update" a keypair you must first delete and then recreate. +extends_documentation_fragment: RACKSPACE_AND_OPENSTACK ''' EXAMPLES = ''' diff --git a/library/cloud/rax_network b/library/cloud/rax_network index 05f3f554e36..566016dde64 100644 --- a/library/cloud/rax_network +++ b/library/cloud/rax_network @@ -25,20 +25,10 @@ options: state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present - credentials: - description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) - default: null - aliases: ['creds_file'] - api_key: - description: - - Rackspace API key (overrides C(credentials)) - username: - description: - - Rackspace username (overrides C(credentials)) label: description: - Label (name) to give the network @@ -47,19 +37,8 @@ options: description: - cidr of the network being created default: null - region: - description: - - Region to create the network in - default: DFW -requirements: [ "pyrax" ] author: Christopher H. Laco, Jesse Keating -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS) points to a credentials file - appropriate for pyrax - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE_AND_OPENSTACK ''' EXAMPLES = ''' diff --git a/library/cloud/rax_queue b/library/cloud/rax_queue index ee873739a34..0faceb128d4 100644 --- a/library/cloud/rax_queue +++ b/library/cloud/rax_queue @@ -22,40 +22,19 @@ description: - creates / deletes a Rackspace Public Cloud queue. version_added: "1.5" options: - api_key: - description: - - Rackspace API key (overrides C(credentials)) - credentials: - description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) - default: null - aliases: ['creds_file'] name: description: - Name to give the queue default: null - region: - description: - - Region to create the load balancer in - default: DFW state: description: - Indicate desired state of the resource - choices: ['present', 'absent'] + choices: + - present + - absent default: present - username: - description: - - Rackspace username (overrides C(credentials)) -requirements: [ "pyrax" ] author: Christopher H. Laco, Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: RACKSPACE ''' EXAMPLES = ''' @@ -68,7 +47,6 @@ EXAMPLES = ''' local_action: module: rax_queue credentials: ~/.raxpub - client_id: unique-client-name name: my-queue region: DFW state: present From 7b5f89ec7c06ed9b24fcb5c11334e58fc62f7410 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 24 Feb 2014 20:48:15 -0600 Subject: [PATCH 690/772] Use PluginLoader for module docs fragments --- lib/ansible/utils/module_docs.py | 25 +++++++--- .../utils/module_docs_fragments/__init__.py | 0 .../rackspace.py} | 48 +++++++++++-------- lib/ansible/utils/plugins.py | 7 ++- library/cloud/rax | 4 +- library/cloud/rax_clb | 4 +- library/cloud/rax_clb_nodes | 4 +- library/cloud/rax_dns | 4 +- library/cloud/rax_dns_record | 4 +- library/cloud/rax_facts | 4 +- library/cloud/rax_files | 4 +- library/cloud/rax_files_objects | 4 +- library/cloud/rax_keypair | 4 +- library/cloud/rax_network | 4 +- library/cloud/rax_queue | 4 +- 15 files changed, 85 insertions(+), 39 deletions(-) create mode 100644 lib/ansible/utils/module_docs_fragments/__init__.py rename lib/ansible/utils/{module_docs_fragments.py => module_docs_fragments/rackspace.py} (92%) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index c356c973501..140b3217caf 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -23,8 +23,7 @@ import ast import yaml import traceback -from ansible.utils import module_docs_fragments as fragments - +from ansible import utils # modules that are ok that they do not have documentation strings BLACKLIST_MODULES = [ @@ -37,6 +36,10 @@ def get_docstring(filename, verbose=False): in the given file. Parse DOCUMENTATION from YAML and return the YAML doc or None together with EXAMPLES, as plain text. + + DOCUMENTATION can be extended using documentation fragments + loaded by the PluginLoader from the module_docs_fragments + directory. """ doc = None @@ -49,10 +52,20 @@ def get_docstring(filename, verbose=False): if isinstance(child, ast.Assign): if 'DOCUMENTATION' in (t.id for t in child.targets): doc = yaml.safe_load(child.value.s) - fragment_name = doc.get('extends_documentation_fragment', - 'DOESNOTEXIST').upper() - fragment_yaml = getattr(fragments, fragment_name, None) - if fragment_yaml: + fragment_slug = doc.get('extends_documentation_fragment', + 'doesnotexist').lower() + + # Allow the module to specify a var other than DOCUMENTATION + # to pull the fragment from, using dot notation as a separator + if '.' in fragment_slug: + fragment_name, fragment_var = fragment_slug.split('.', 1) + fragment_var = fragment_var.upper() + else: + fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION' + + fragment_class = utils.plugins.fragment_loader.get(fragment_name) + if fragment_class: + fragment_yaml = getattr(fragment_class, fragment_var, '{}') fragment = yaml.safe_load(fragment_yaml) if fragment.has_key('notes'): notes = fragment.pop('notes') diff --git a/lib/ansible/utils/module_docs_fragments/__init__.py b/lib/ansible/utils/module_docs_fragments/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/lib/ansible/utils/module_docs_fragments.py b/lib/ansible/utils/module_docs_fragments/rackspace.py similarity index 92% rename from lib/ansible/utils/module_docs_fragments.py rename to lib/ansible/utils/module_docs_fragments/rackspace.py index 23300ba85e9..a49202c500f 100644 --- a/lib/ansible/utils/module_docs_fragments.py +++ b/lib/ansible/utils/module_docs_fragments/rackspace.py @@ -1,4 +1,4 @@ -# (c) 2012, Matt Martz +# (c) 2014, Matt Martz # # This file is part of Ansible # @@ -15,18 +15,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -RACKSPACE_AND_OPENSTACK = """ + +class ModuleDocFragment(object): + + # Standard Rackspace only documentation fragment + DOCUMENTATION = """ options: api_key: description: - Rackspace API key (overrides I(credentials)) aliases: - password - auth_endpoint: - description: - - The URI of the authentication service - default: https://identity.api.rackspacecloud.com/v2.0/ - version_added: 1.5 credentials: description: - File to find the Rackspace credentials in (ignored if I(api_key) and @@ -39,23 +38,10 @@ options: - Environment as configured in ~/.pyrax.cfg, see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) version_added: 1.5 - identity_type: - description: - - Authentication machanism to use, such as rackspace or keystone - default: rackspace - version_added: 1.5 region: description: - Region to create an instance in default: DFW - tenant_id: - description: - - The tenant ID used for authentication - version_added: 1.5 - tenant_name: - description: - - The tenant name used for authentication - version_added: 1.5 username: description: - Rackspace username (overrides I(credentials)) @@ -74,13 +60,20 @@ notes: - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) """ -RACKSPACE = """ + # Documentation fragment including attributes to enable communication + # of other OpenStack clouds. Not all rax modules support this. + OPENSTACK = """ options: api_key: description: - Rackspace API key (overrides I(credentials)) aliases: - password + auth_endpoint: + description: + - The URI of the authentication service + default: https://identity.api.rackspacecloud.com/v2.0/ + version_added: 1.5 credentials: description: - File to find the Rackspace credentials in (ignored if I(api_key) and @@ -93,10 +86,23 @@ options: - Environment as configured in ~/.pyrax.cfg, see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) version_added: 1.5 + identity_type: + description: + - Authentication machanism to use, such as rackspace or keystone + default: rackspace + version_added: 1.5 region: description: - Region to create an instance in default: DFW + tenant_id: + description: + - The tenant ID used for authentication + version_added: 1.5 + tenant_name: + description: + - The tenant name used for authentication + version_added: 1.5 username: description: - Rackspace username (overrides I(credentials)) diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index b1d0117e613..22d74c185a3 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -240,4 +240,9 @@ filter_loader = PluginLoader( 'filter_plugins' ) - +fragment_loader = PluginLoader( + 'ModuleDocFragment', + 'ansible.utils.module_docs_fragments', + os.path.join(os.path.dirname(__file__), 'module_docs_fragments'), + '', +) diff --git a/library/cloud/rax b/library/cloud/rax index 387640753cd..248790037eb 100644 --- a/library/cloud/rax +++ b/library/cloud/rax @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax @@ -129,7 +131,7 @@ options: - how long before wait gives up, in seconds default: 300 author: Jesse Keating, Matt Martz -extends_documentation_fragment: RACKSPACE_AND_OPENSTACK +extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' diff --git a/library/cloud/rax_clb b/library/cloud/rax_clb index b462908d540..dbc7f85b196 100644 --- a/library/cloud/rax_clb +++ b/library/cloud/rax_clb @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_clb @@ -102,7 +104,7 @@ options: - how long before wait gives up, in seconds default: 300 author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: RACKSPACE +extends_documentation_fragment: rackspace ''' EXAMPLES = ''' diff --git a/library/cloud/rax_clb_nodes b/library/cloud/rax_clb_nodes index 38b4d752676..fb12967ec1d 100644 --- a/library/cloud/rax_clb_nodes +++ b/library/cloud/rax_clb_nodes @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_clb_nodes @@ -84,7 +86,7 @@ options: description: - Weight of node author: Lukasz Kawczynski -extends_documentation_fragment: RACKSPACE +extends_documentation_fragment: rackspace ''' EXAMPLES = ''' diff --git a/library/cloud/rax_dns b/library/cloud/rax_dns index d63a9aeaa09..7ed2c926d6f 100644 --- a/library/cloud/rax_dns +++ b/library/cloud/rax_dns @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_dns @@ -43,7 +45,7 @@ options: - Time to live of domain in seconds default: 3600 author: Matt Martz -extends_documentation_fragment: RACKSPACE +extends_documentation_fragment: rackspace ''' EXAMPLES = ''' diff --git a/library/cloud/rax_dns_record b/library/cloud/rax_dns_record index ca5b24de1e8..51dc26b779f 100644 --- a/library/cloud/rax_dns_record +++ b/library/cloud/rax_dns_record @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_dns_record @@ -66,7 +68,7 @@ options: - TXT default: A author: Matt Martz -extends_documentation_fragment: RACKSPACE +extends_documentation_fragment: rackspace ''' EXAMPLES = ''' diff --git a/library/cloud/rax_facts b/library/cloud/rax_facts index 655b1bbf199..f71982f4243 100644 --- a/library/cloud/rax_facts +++ b/library/cloud/rax_facts @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_facts @@ -34,7 +36,7 @@ options: - Server name to retrieve facts for default: null author: Matt Martz -extends_documentation_fragment: RACKSPACE_AND_OPENSTACK +extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' diff --git a/library/cloud/rax_files b/library/cloud/rax_files index 66dc1b91be5..bdb11a661f5 100644 --- a/library/cloud/rax_files +++ b/library/cloud/rax_files @@ -17,6 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_files @@ -75,7 +77,7 @@ options: description: - Sets an object to be presented as the HTTP index page when accessed by the CDN URL author: Paul Durivage -extends_documentation_fragment: RACKSPACE +extends_documentation_fragment: rackspace ''' EXAMPLES = ''' diff --git a/library/cloud/rax_files_objects b/library/cloud/rax_files_objects index ef229d7a95b..9002291ceff 100644 --- a/library/cloud/rax_files_objects +++ b/library/cloud/rax_files_objects @@ -17,6 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_files_objects @@ -91,7 +93,7 @@ options: - meta default: file author: Paul Durivage -extends_documentation_fragment: RACKSPACE +extends_documentation_fragment: rackspace ''' EXAMPLES = ''' diff --git a/library/cloud/rax_keypair b/library/cloud/rax_keypair index fa195fc0d59..6a42e3c99f9 100644 --- a/library/cloud/rax_keypair +++ b/library/cloud/rax_keypair @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_keypair @@ -41,7 +43,7 @@ author: Matt Martz notes: - Keypairs cannot be manipulated, only created and deleted. To "update" a keypair you must first delete and then recreate. -extends_documentation_fragment: RACKSPACE_AND_OPENSTACK +extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' diff --git a/library/cloud/rax_network b/library/cloud/rax_network index 566016dde64..ac3aca6991e 100644 --- a/library/cloud/rax_network +++ b/library/cloud/rax_network @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_network @@ -38,7 +40,7 @@ options: - cidr of the network being created default: null author: Christopher H. Laco, Jesse Keating -extends_documentation_fragment: RACKSPACE_AND_OPENSTACK +extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' diff --git a/library/cloud/rax_queue b/library/cloud/rax_queue index 0faceb128d4..7388c4ed81d 100644 --- a/library/cloud/rax_queue +++ b/library/cloud/rax_queue @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_queue @@ -34,7 +36,7 @@ options: - absent default: present author: Christopher H. Laco, Matt Martz -extends_documentation_fragment: RACKSPACE +extends_documentation_fragment: rackspace ''' EXAMPLES = ''' From 1fa19e29e86789a73420d7224e1bd4b94062b50b Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 3 Apr 2014 16:51:13 -0400 Subject: [PATCH 691/772] Use common file argument system previously implemented by @sivel for rax modules on the file modules as well (copy/file/template). Application to other cloud providers would make very good sense. --- lib/ansible/utils/module_docs.py | 13 ++++++-- library/files/copy | 5 +-- library/files/file | 54 +------------------------------- library/files/template | 5 +-- 4 files changed, 14 insertions(+), 63 deletions(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 140b3217caf..f5a696ca568 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -63,17 +63,26 @@ def get_docstring(filename, verbose=False): else: fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION' - fragment_class = utils.plugins.fragment_loader.get(fragment_name) - if fragment_class: + + if fragment_slug != 'doesnotexist': + fragment_class = utils.plugins.fragment_loader.get(fragment_name) + assert fragment_class is not None + fragment_yaml = getattr(fragment_class, fragment_var, '{}') fragment = yaml.safe_load(fragment_yaml) + if fragment.has_key('notes'): notes = fragment.pop('notes') if notes: if not doc.has_key('notes'): doc['notes'] = [] doc['notes'].extend(notes) + + if 'options' not in fragment.keys(): + raise Exception("missing options in fragment, possibly misformatted?") + for key, value in fragment.items(): + if not doc.has_key(key): doc[key] = value else: diff --git a/library/files/copy b/library/files/copy index bbf277837a8..a8b4000f73e 100644 --- a/library/files/copy +++ b/library/files/copy @@ -83,11 +83,8 @@ options: defaults. required: false version_added: "1.5" - others: - description: - - all arguments accepted by the M(file) module also work here - required: false author: Michael DeHaan +extends_documentation_fragment: files.DOCUMENTATION notes: - The "copy" module recursively copy facility does not scale to lots (>hundreds) of files. For alternative, see synchronize module, which is a wrapper around rsync. diff --git a/library/files/file b/library/files/file index adbfa9733c2..0cb9d6fe080 100644 --- a/library/files/file +++ b/library/files/file @@ -33,6 +33,7 @@ DOCUMENTATION = ''' module: file version_added: "historical" short_description: Sets attributes of files +extends_documentation_fragment: files description: - Sets attributes of files, symlinks, and directories, or removes files/symlinks/directories. Many other modules support the same options as @@ -58,24 +59,6 @@ options: required: false default: file choices: [ file, link, directory, hard, touch, absent ] - mode: - required: false - default: null - choices: [] - description: - - mode the file or directory should be, such as 0644 as would be fed to I(chmod) - owner: - required: false - default: null - choices: [] - description: - - name of the user that should own the file/directory, as would be fed to I(chown) - group: - required: false - default: null - choices: [] - description: - - name of the group that should own the file/directory, as would be fed to I(chown) src: required: false default: null @@ -83,34 +66,6 @@ options: description: - path of the file to link to (applies only to C(state= link or hard)). Will accept absolute, relative and nonexisting (with C(force)) paths. Relative paths are not expanded. - seuser: - required: false - default: null - choices: [] - description: - - user part of SELinux file context. Will default to system policy, if - applicable. If set to C(_default), it will use the C(user) portion of the - policy if available - serole: - required: false - default: null - choices: [] - description: - - role part of SELinux file context, C(_default) feature works as for I(seuser). - setype: - required: false - default: null - choices: [] - description: - - type part of SELinux file context, C(_default) feature works as for I(seuser). - selevel: - required: false - default: "s0" - choices: [] - description: - - level part of the SELinux file context. This is the MLS/MCS attribute, - sometimes known as the C(range). C(_default) feature works as for - I(seuser). recurse: required: false default: "no" @@ -118,13 +73,6 @@ options: version_added: "1.1" description: - recursively set the specified file attributes (applies only to state=directory) - force: - required: false - default: "no" - choices: [ "yes", "no" ] - description: - - 'force the creation of the symlinks when the destination exists and is a file (so, we need to unlink the - "path" file and create symlink to the "src" file in place of it).' notes: - See also M(copy), M(template), M(assemble) requirements: [ ] diff --git a/library/files/template b/library/files/template index 8ffa6252516..e7b9a502938 100644 --- a/library/files/template +++ b/library/files/template @@ -47,10 +47,6 @@ options: required: false default: "" version_added: "1.2" - others: - description: - - all arguments accepted by the M(file) module also work here, as well as the M(copy) module (except the the 'content' parameter). - required: false notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." @@ -59,6 +55,7 @@ notes: which changes the variable interpolation markers to [% var %] instead of {{ var }}. This is the best way to prevent evaluation of things that look like, but should not be Jinja2. raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated." requirements: [] +extends_documentation_fragment: files.DOCUMENTATION author: Michael DeHaan ''' From 37f096a6bb3f75514e061efad19a64d7af7922b1 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 12 Feb 2014 21:51:36 -0600 Subject: [PATCH 692/772] rax modules improvements * Catch issues with invalid regions * Ensure we send string only data as meta values in the rax module * Add public_key/lookup example for rax_keypair * Clean up import statements --- library/cloud/rax | 31 +++++++++++++------- library/cloud/rax_clb | 15 ++++++---- library/cloud/rax_clb_nodes | 13 +++++---- library/cloud/rax_dns | 16 +++++++---- library/cloud/rax_dns_record | 16 +++++++---- library/cloud/rax_facts | 18 ++++++++---- library/cloud/rax_files | 48 ++++++++++++++++++++----------- library/cloud/rax_files_objects | 51 +++++++++++++++++++-------------- library/cloud/rax_identity | 16 +++++++---- library/cloud/rax_keypair | 32 +++++++++++++++++---- library/cloud/rax_network | 23 ++++++++------- library/cloud/rax_queue | 17 ++++++----- 12 files changed, 191 insertions(+), 105 deletions(-) diff --git a/library/cloud/rax b/library/cloud/rax index 248790037eb..af533bca126 100644 --- a/library/cloud/rax +++ b/library/cloud/rax @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -175,18 +175,18 @@ EXAMPLES = ''' register: rax ''' -import sys -import time import os import re +import time + from uuid import UUID from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax is required for this module'") - sys.exit(1) + HAS_PYRAX = False ACTIVE_STATUSES = ('ACTIVE', 'BUILD', 'HARD_REBOOT', 'MIGRATING', 'PASSWORD', 'REBOOT', 'REBUILD', 'RESCUE', 'RESIZE', 'REVERT_RESIZE') @@ -379,8 +379,16 @@ def cloudservers(module, state, name, flavor, image, meta, key_name, files, auto_increment, extra_create_args): cs = pyrax.cloudservers cnw = pyrax.cloud_networks + if not cnw: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + servers = [] + for key, value in meta.items(): + meta[key] = repr(value) + # Add the group meta key if group and 'group' not in meta: meta['group'] = group @@ -641,6 +649,9 @@ def main(): required_together=rax_required_together(), ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + service = module.params.get('service') if service is not None: @@ -672,11 +683,6 @@ def main(): setup_rax_module(module, pyrax) - if pyrax.cloudservers is None: - module.fail_json(msg='Failed to instantiate client. This ' - 'typically indicates an invalid region or an ' - 'incorrectly capitalized region name.') - if extra_client_args: pyrax.cloudservers = pyrax.connect_to_cloudservers( region=pyrax.cloudservers.client.region_name, @@ -685,6 +691,11 @@ def main(): if 'bypass_url' in extra_client_args: client.management_url = extra_client_args['bypass_url'] + if pyrax.cloudservers is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + cloudservers(module, state, name, flavor, image, meta, key_name, files, wait, wait_timeout, disk_config, count, group, instance_ids, exact_count, networks, count_offset, diff --git a/library/cloud/rax_clb b/library/cloud/rax_clb index dbc7f85b196..85700895c7c 100644 --- a/library/cloud/rax_clb +++ b/library/cloud/rax_clb @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -130,15 +130,13 @@ EXAMPLES = ''' register: my_lb ''' -import sys - from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', @@ -190,6 +188,10 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, balancers = [] clb = pyrax.cloud_loadbalancers + if not clb: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') for balancer in clb.list(): if name != balancer.name and name != balancer.id: @@ -308,6 +310,9 @@ def main(): required_together=rax_required_together(), ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + algorithm = module.params.get('algorithm') meta = module.params.get('meta') name = module.params.get('name') diff --git a/library/cloud/rax_clb_nodes b/library/cloud/rax_clb_nodes index fb12967ec1d..04ec11fc94f 100644 --- a/library/cloud/rax_clb_nodes +++ b/library/cloud/rax_clb_nodes @@ -121,13 +121,12 @@ EXAMPLES = ''' ''' import os -import sys try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax is required for this module'") - sys.exit(1) + HAS_PYRAX = False def _activate_virtualenv(path): @@ -196,6 +195,9 @@ def main(): required_together=rax_required_together(), ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + address = module.params['address'] condition = (module.params['condition'] and module.params['condition'].upper()) @@ -219,8 +221,9 @@ def main(): setup_rax_module(module, pyrax) if not pyrax.cloud_loadbalancers: - module.fail_json(msg='Failed to instantiate load balancer client ' - '(possibly incorrect region)') + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') try: lb = pyrax.cloud_loadbalancers.get(load_balancer_id) diff --git a/library/cloud/rax_dns b/library/cloud/rax_dns index 7ed2c926d6f..c12d09fb1ad 100644 --- a/library/cloud/rax_dns +++ b/library/cloud/rax_dns @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -62,16 +62,13 @@ EXAMPLES = ''' register: rax_dns ''' -import sys -import os - from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) @@ -89,6 +86,10 @@ def rax_dns(module, comment, email, name, state, ttl): changed = False dns = pyrax.cloud_dns + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') if state == 'present': if not email: @@ -159,6 +160,9 @@ def main(): required_together=rax_required_together(), ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + comment = module.params.get('comment') email = module.params.get('email') name = module.params.get('name') diff --git a/library/cloud/rax_dns_record b/library/cloud/rax_dns_record index 51dc26b779f..d1e79983604 100644 --- a/library/cloud/rax_dns_record +++ b/library/cloud/rax_dns_record @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -87,16 +87,13 @@ EXAMPLES = ''' register: rax_dns_record ''' -import sys -import os - from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) @@ -115,6 +112,10 @@ def rax_dns_record(module, comment, data, domain, name, priority, record_type, changed = False dns = pyrax.cloud_dns + if not dns: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') if state == 'present': if not priority and record_type in ['MX', 'SRV']: @@ -211,6 +212,9 @@ def main(): required_together=rax_required_together(), ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + comment = module.params.get('comment') data = module.params.get('data') domain = module.params.get('domain') diff --git a/library/cloud/rax_facts b/library/cloud/rax_facts index f71982f4243..64711f41519 100644 --- a/library/cloud/rax_facts +++ b/library/cloud/rax_facts @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -55,16 +55,13 @@ EXAMPLES = ''' ansible_ssh_host: "{{ rax_accessipv4 }}" ''' -import sys -import os - from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) @@ -87,6 +84,12 @@ def rax_facts(module, address, name, server_id): changed = False cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + ansible_facts = {} search_opts = {} @@ -139,6 +142,9 @@ def main(): required_one_of=[['address', 'id', 'name']], ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + address = module.params.get('address') server_id = module.params.get('id') name = module.params.get('name') diff --git a/library/cloud/rax_files b/library/cloud/rax_files index bdb11a661f5..68e28a07f74 100644 --- a/library/cloud/rax_files +++ b/library/cloud/rax_files @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # (c) 2013, Paul Durivage # @@ -143,9 +143,9 @@ from ansible import __version__ try: import pyrax + HAS_PYRAX = True except ImportError, e: - print("failed=True msg='pyrax is required for this module'") - sys.exit(1) + HAS_PYRAX = False EXIT_DICT = dict(success=True) META_PREFIX = 'x-container-meta-' @@ -200,7 +200,8 @@ def meta(cf, module, container_, state, meta_, clear_meta): module.exit_json(**EXIT_DICT) -def container(cf, module, container_, state, meta_, clear_meta, ttl, public, private, web_index, web_error): +def container(cf, module, container_, state, meta_, clear_meta, ttl, public, + private, web_index, web_error): if public and private: module.fail_json(msg='container cannot be simultaneously ' 'set to public and private') @@ -297,9 +298,8 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri EXIT_DICT['container'] = c.name EXIT_DICT['objs_in_container'] = c.object_count EXIT_DICT['total_bytes'] = c.total_bytes - + _locals = locals().keys() - if ('cont_deleted' in _locals or 'meta_set' in _locals or 'cont_public' in _locals @@ -311,15 +311,23 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri module.exit_json(**EXIT_DICT) -def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, private, web_index, web_error): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles - cf.user_agent = USER_AGENT +def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, + private, web_index, web_error): + """ Dispatch from here to work with metadata or file objects """ + cf = pyrax.cloudfiles - if typ == "container": - container(cf, module, container_, state, meta_, clear_meta, ttl, public, private, web_index, web_error) - else: - meta(cf, module, container_, state, meta_, clear_meta) + if cf is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + + cf.user_agent = USER_AGENT + + if typ == "container": + container(cf, module, container_, state, meta_, clear_meta, ttl, + public, private, web_index, web_error) + else: + meta(cf, module, container_, state, meta_, clear_meta) def main(): @@ -327,7 +335,8 @@ def main(): argument_spec.update( dict( container=dict(), - state=dict(choices=['present', 'absent', 'list'], default='present'), + state=dict(choices=['present', 'absent', 'list'], + default='present'), meta=dict(type='dict', default=dict()), clear_meta=dict(default=False, type='bool'), type=dict(choices=['container', 'meta'], default='container'), @@ -344,6 +353,9 @@ def main(): required_together=rax_required_together() ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + container_ = module.params.get('container') state = module.params.get('state') meta_ = module.params.get('meta') @@ -358,10 +370,12 @@ def main(): if state in ['present', 'absent'] and not container_: module.fail_json(msg='please specify a container name') if clear_meta and not typ == 'meta': - module.fail_json(msg='clear_meta can only be used when setting metadata') + module.fail_json(msg='clear_meta can only be used when setting ' + 'metadata') setup_rax_module(module, pyrax) - cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, private, web_index, web_error) + cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, + private, web_index, web_error) from ansible.module_utils.basic import * diff --git a/library/cloud/rax_files_objects b/library/cloud/rax_files_objects index 9002291ceff..d7f11900ab9 100644 --- a/library/cloud/rax_files_objects +++ b/library/cloud/rax_files_objects @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # (c) 2013, Paul Durivage # @@ -187,9 +187,9 @@ import os try: import pyrax -except ImportError, e: - print("failed=True msg='pyrax is required for this module'") - sys.exit(1) + HAS_PYRAX = True +except ImportError: + HAS_PYRAX = False EXIT_DICT = dict(success=False) META_PREFIX = 'x-object-meta-' @@ -433,7 +433,6 @@ def get_meta(module, cf, container, src, dest): meta_key = k.split(META_PREFIX)[-1] results[obj][meta_key] = v - EXIT_DICT['container'] = c.name if results: EXIT_DICT['meta_results'] = results @@ -530,28 +529,33 @@ def delete_meta(module, cf, container, src, dest, meta): def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires): - """ Dispatch from here to work with metadata or file objects """ - cf = pyrax.cloudfiles + """ Dispatch from here to work with metadata or file objects """ + cf = pyrax.cloudfiles - if typ == "file": - if method == 'put': - upload(module, cf, container, src, dest, meta, expires) + if cf is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') - elif method == 'get': - download(module, cf, container, src, dest, structure) + if typ == "file": + if method == 'put': + upload(module, cf, container, src, dest, meta, expires) - elif method == 'delete': - delete(module, cf, container, src, dest) + elif method == 'get': + download(module, cf, container, src, dest, structure) - else: - if method == 'get': - get_meta(module, cf, container, src, dest) + elif method == 'delete': + delete(module, cf, container, src, dest) - if method == 'put': - put_meta(module, cf, container, src, dest, meta, clear_meta) + else: + if method == 'get': + get_meta(module, cf, container, src, dest) - if method == 'delete': - delete_meta(module, cf, container, src, dest, meta) + if method == 'put': + put_meta(module, cf, container, src, dest, meta, clear_meta) + + if method == 'delete': + delete_meta(module, cf, container, src, dest, meta) def main(): @@ -575,6 +579,9 @@ def main(): required_together=rax_required_together() ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + container = module.params.get('container') src = module.params.get('src') dest = module.params.get('dest') @@ -595,4 +602,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -main() \ No newline at end of file +main() diff --git a/library/cloud/rax_identity b/library/cloud/rax_identity index 2890e40d160..b9b82f13f16 100644 --- a/library/cloud/rax_identity +++ b/library/cloud/rax_identity @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -68,15 +68,13 @@ EXAMPLES = ''' register: rackspace_identity ''' -import sys - from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) @@ -118,10 +116,18 @@ def main(): required_together=rax_required_together() ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + state = module.params.get('state') setup_rax_module(module, pyrax) + if pyrax.identity is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + cloud_identity(module, state, pyrax.identity) # import module snippets diff --git a/library/cloud/rax_keypair b/library/cloud/rax_keypair index 6a42e3c99f9..458ec5713c4 100644 --- a/library/cloud/rax_keypair +++ b/library/cloud/rax_keypair @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -48,7 +48,7 @@ extends_documentation_fragment: rackspace.openstack EXAMPLES = ''' - name: Create a keypair - hosts: local + hosts: localhost gather_facts: False tasks: - name: keypair request @@ -68,17 +68,28 @@ EXAMPLES = ''' module: copy content: "{{ keypair.keypair.private_key }}" dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}" -''' -import sys +- name: Create a keypair + hosts: localhost + gather_facts: False + tasks: + - name: keypair request + local_action: + module: rax_keypair + credentials: ~/.raxpub + name: my_keypair + public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}" + region: DFW + register: keypair +''' from types import NoneType try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False NON_CALLABLES = (basestring, bool, dict, int, list, NoneType) @@ -96,6 +107,12 @@ def rax_keypair(module, name, public_key, state): changed = False cs = pyrax.cloudservers + + if cs is None: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + keypair = {} if state == 'present': @@ -141,6 +158,9 @@ def main(): required_together=rax_required_together(), ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + name = module.params.get('name') public_key = module.params.get('public_key') state = module.params.get('state') diff --git a/library/cloud/rax_network b/library/cloud/rax_network index ac3aca6991e..bc4745a7a84 100644 --- a/library/cloud/rax_network +++ b/library/cloud/rax_network @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -57,16 +57,11 @@ EXAMPLES = ''' state: present ''' -import sys -import os - try: import pyrax - import pyrax.utils - from pyrax import exc + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax required for this module'") - sys.exit(1) + HAS_PYRAX = False def cloud_network(module, state, label, cidr): @@ -78,10 +73,15 @@ def cloud_network(module, state, label, cidr): network = None networks = [] + if not pyrax.cloud_networks: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') + if state == 'present': try: network = pyrax.cloud_networks.find_network_by_label(label) - except exc.NetworkNotFound: + except pyrax.exceptions.NetworkNotFound: try: network = pyrax.cloud_networks.create(label, cidr=cidr) changed = True @@ -95,7 +95,7 @@ def cloud_network(module, state, label, cidr): network = pyrax.cloud_networks.find_network_by_label(label) network.delete() changed = True - except exc.NetworkNotFound: + except pyrax.exceptions.NetworkNotFound: pass except Exception, e: module.fail_json(msg='%s' % e.message) @@ -125,6 +125,9 @@ def main(): required_together=rax_required_together(), ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + state = module.params.get('state') label = module.params.get('label') cidr = module.params.get('cidr') diff --git a/library/cloud/rax_queue b/library/cloud/rax_queue index 7388c4ed81d..d3e5ac3f81e 100644 --- a/library/cloud/rax_queue +++ b/library/cloud/rax_queue @@ -1,4 +1,4 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -55,15 +55,11 @@ EXAMPLES = ''' register: my_queue ''' -import sys -import os - - try: import pyrax + HAS_PYRAX = True except ImportError: - print("failed=True msg='pyrax is required for this module'") - sys.exit(1) + HAS_PYRAX = False def cloud_queue(module, state, name): @@ -76,6 +72,10 @@ def cloud_queue(module, state, name): instance = {} cq = pyrax.queues + if not cq: + module.fail_json(msg='Failed to instantiate client. This ' + 'typically indicates an invalid region or an ' + 'incorrectly capitalized region name.') for queue in cq.list(): if name != queue.name: @@ -126,6 +126,9 @@ def main(): required_together=rax_required_together() ) + if not HAS_PYRAX: + module.fail_json(msg='pyrax is required for this module') + name = module.params.get('name') state = module.params.get('state') From 729e20ae7747e87d3abad9b9b4c58f13665c1e51 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 3 Apr 2014 17:17:12 -0400 Subject: [PATCH 693/772] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d215e95793..9bf97b40162 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,8 @@ Other notable changes: * synchronize module sets relative dirs based on inventory or role path * misc bugfixes and other parameters * the ec2_key module now has wait/wait_timeout parameters +* added version_compare filter (see docs) +* added ability for module documentation YAML to utilize shared module snippets for common args ## 1.5.3 "Love Walks In" - March 13, 2014 From 3ac731087cb5a0c2c1d9cf3a5bcdf518f94d0426 Mon Sep 17 00:00:00 2001 From: Peter Gehres Date: Sun, 9 Mar 2014 12:35:45 -0700 Subject: [PATCH 694/772] Fix for #6353 adding a newline between assembled files --- library/files/assemble | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/library/files/assemble b/library/files/assemble index 90c1a90aeb3..b7d6c38a04d 100644 --- a/library/files/assemble +++ b/library/files/assemble @@ -102,21 +102,30 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') delimit_me = False + for f in sorted(os.listdir(src_path)): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) - if delimit_me and delimiter: - # un-escape anything like newlines - delimiter = delimiter.decode('unicode-escape') - tmp.write(delimiter) - # always make sure there's a newline after the - # delimiter, so lines don't run together - if delimiter[-1] != '\n': - tmp.write('\n') + + # delimiters should only appear between fragments + if delimit_me: + # always put a newline between fragments + tmp.write('\n') + + if delimiter: + # un-escape anything like newlines + delimiter = delimiter.decode('unicode-escape') + tmp.write(delimiter) + # always make sure there's a newline after the + # delimiter, so lines don't run together + if delimiter[-1] != '\n': + tmp.write('\n') + if os.path.isfile(fragment): tmp.write(file(fragment).read()) delimit_me = True + tmp.close() return temp_path From dc93b31d2212fca3b26fe16a25495b3da5c4ce0a Mon Sep 17 00:00:00 2001 From: Richard C Isaacson Date: Mon, 17 Mar 2014 17:35:03 -0500 Subject: [PATCH 695/772] assemble module: fix insertion of newlines when not needed. This builds on GH-6359 and changes the logic so that a newline is only inserted between fragments if the previous fragment does not end with a newline. --- library/files/assemble | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/library/files/assemble b/library/files/assemble index b7d6c38a04d..7f0a9d1e0a1 100644 --- a/library/files/assemble +++ b/library/files/assemble @@ -102,17 +102,22 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') delimit_me = False + add_newline = False for f in sorted(os.listdir(src_path)): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) + if not os.path.isfile(fragment): + continue + fragment_content = file(fragment).read() - # delimiters should only appear between fragments - if delimit_me: - # always put a newline between fragments + # always put a newline between fragments if the previous fragment didn't end with a newline. + if add_newline: tmp.write('\n') + # delimiters should only appear between fragments + if delimit_me: if delimiter: # un-escape anything like newlines delimiter = delimiter.decode('unicode-escape') @@ -122,9 +127,12 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): if delimiter[-1] != '\n': tmp.write('\n') - if os.path.isfile(fragment): - tmp.write(file(fragment).read()) + tmp.write(fragment_content) delimit_me = True + if fragment_content.endswith('\n'): + add_newline = False + else: + add_newline = True tmp.close() return temp_path From 82b24c162e7d0395355e3d34f79d8eb539cd3de8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Apr 2014 16:29:15 -0500 Subject: [PATCH 696/772] Adding delimiter fixes to action_plugin + fixing local assembling with a delimiter Also added a new integration test for assemble using local assembly with a delimiter. --- lib/ansible/module_utils/basic.py | 1 + lib/ansible/runner/action_plugins/assemble.py | 41 ++++++++++++++----- .../roles/test_assemble/tasks/main.yml | 10 +++++ 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 654042370b5..94326711ba2 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -115,6 +115,7 @@ FILE_COMMON_ARGUMENTS=dict( backup = dict(), force = dict(), remote_src = dict(), # used by assemble + delimiter = dict(), # used by assemble directory_mode = dict(), # used by copy ) diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index 2fd76391769..741053f4cf0 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -31,24 +31,43 @@ class ActionModule(object): def __init__(self, runner): self.runner = runner - def _assemble_from_fragments(self, src_path, delimiter=None): + def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') delimit_me = False + add_newline = False + for f in sorted(os.listdir(src_path)): + if compiled_regexp and not compiled_regexp.search(f): + continue fragment = "%s/%s" % (src_path, f) - if delimit_me and delimiter: - # en-escape things like new-lines - delimiter = delimiter.decode('unicode-escape') - tmp.write(delimiter) - # always make sure there's a newline after the - # delimiter, so lines don't run together - if delimiter[-1] != '\n': - tmp.write('\n') - if os.path.isfile(fragment): - tmp.write(file(fragment).read()) + if not os.path.isfile(fragment): + continue + fragment_content = file(fragment).read() + + # always put a newline between fragments if the previous fragment didn't end with a newline. + if add_newline: + tmp.write('\n') + + # delimiters should only appear between fragments + if delimit_me: + if delimiter: + # un-escape anything like newlines + delimiter = delimiter.decode('unicode-escape') + tmp.write(delimiter) + # always make sure there's a newline after the + # delimiter, so lines don't run together + if delimiter[-1] != '\n': + tmp.write('\n') + + tmp.write(fragment_content) delimit_me = True + if fragment_content.endswith('\n'): + add_newline = False + else: + add_newline = True + tmp.close() return temp_path diff --git a/test/integration/roles/test_assemble/tasks/main.yml b/test/integration/roles/test_assemble/tasks/main.yml index b20551f8866..f06cee6ace8 100644 --- a/test/integration/roles/test_assemble/tasks/main.yml +++ b/test/integration/roles/test_assemble/tasks/main.yml @@ -69,3 +69,13 @@ - "result.state == 'file'" - "result.md5sum == '96905702a2ece40de6bf3a94b5062513'" +- name: test assemble with remote_src=False and a delimiter + assemble: src="./" dest="{{output_dir}}/assembled5" remote_src=no delimiter="#--- delimiter ---#" + register: result + +- name: assert the fragments were assembled without remote + assert: + that: + - "result.state == 'file'" + - "result.md5sum == '4773eac67aba3f0be745876331c8a450'" + From 051d57caeb3e4a398924e9c10a7ae1f6c904d6bd Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 3 Apr 2014 17:34:11 -0400 Subject: [PATCH 697/772] Clarify free_form isn't a real option :) --- library/commands/command | 3 ++- library/commands/shell | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/library/commands/command b/library/commands/command index b35501f1bf8..f1a48922122 100644 --- a/library/commands/command +++ b/library/commands/command @@ -39,7 +39,8 @@ description: options: free_form: description: - - the command module takes a free form command to run + - the command module takes a free form command to run. There is no parameter actually named 'free form'. + See the examples! required: true default: null aliases: [] diff --git a/library/commands/shell b/library/commands/shell index 03299b967cc..639d4a14b09 100644 --- a/library/commands/shell +++ b/library/commands/shell @@ -14,7 +14,8 @@ version_added: "0.2" options: free_form: description: - - The shell module takes a free form command to run + - The shell module takes a free form command to run, as a string. There's not an actual + option named "free form". See the examples! required: true default: null creates: From 621fcbb9de43303ac99534462b2511b9efd9498c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 3 Apr 2014 18:02:54 -0400 Subject: [PATCH 698/772] Name is not required, update docs. --- library/system/cron | 1 - 1 file changed, 1 deletion(-) diff --git a/library/system/cron b/library/system/cron index be17ede845d..9fc9a1a11f1 100644 --- a/library/system/cron +++ b/library/system/cron @@ -44,7 +44,6 @@ options: name: description: - Description of a crontab entry. - required: true default: null user: description: From 2e26549426cc7f718742cc87cf85314df68a2238 Mon Sep 17 00:00:00 2001 From: Drew Blas Date: Fri, 4 Apr 2014 05:59:57 -0500 Subject: [PATCH 699/772] Update apt:deb with correct version The docs site says this option is available in 1.5, but it is not. #5910 https://github.com/ansible/ansible/pull/5910 added the deb option. The PR was generated two months ago (before 1.5) but was not included until after 1.5 was released. This fixes the docs. --- library/packaging/apt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/apt b/library/packaging/apt index c79f73ab517..6bd19177f2d 100755 --- a/library/packaging/apt +++ b/library/packaging/apt @@ -92,7 +92,7 @@ options: description: - Path to a local .deb package file to install. required: false - version_added: "1.5" + version_added: "1.6" requirements: [ python-apt, aptitude ] author: Matthew Williams notes: From d544eb3e3b31faee18a2062171c7d8de53e24b4d Mon Sep 17 00:00:00 2001 From: Ted Timmons Date: Thu, 3 Apr 2014 18:09:53 -0700 Subject: [PATCH 700/772] cloudformation's disable_rollback must be a bool passing "yes" to boto causes severe problems. --- library/cloud/cloudformation | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/cloudformation b/library/cloud/cloudformation index 822bd6e2503..02132f56325 100644 --- a/library/cloud/cloudformation +++ b/library/cloud/cloudformation @@ -196,7 +196,7 @@ def main(): template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=True), - disable_rollback=dict(default=False), + disable_rollback=dict(default=False, type='bool'), tags=dict(default=None) ) ) From 7f9736198a4c054b3b4691a150f26c2d61a24aed Mon Sep 17 00:00:00 2001 From: Eric Lake Date: Fri, 4 Apr 2014 11:41:20 -0500 Subject: [PATCH 701/772] add distribution_major_version to fact gathering --- lib/ansible/module_utils/facts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index c6e64fc4e60..607b911b0fc 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -231,6 +231,7 @@ class Facts(object): dist = platform.dist() self.facts['distribution'] = dist[0].capitalize() or 'NA' self.facts['distribution_version'] = dist[1] or 'NA' + self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA' self.facts['distribution_release'] = dist[2] or 'NA' # Try to handle the exceptions now ... for (path, name) in Facts.OSDIST_DICT.items(): From 5a65dc3b6a41c7fe5c4685555922db35b5297bc9 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Fri, 4 Apr 2014 13:05:33 -0400 Subject: [PATCH 702/772] Fixes #6820 fix erroneous missing vault password error when using the template module --- lib/ansible/runner/__init__.py | 7 ++++--- lib/ansible/runner/action_plugins/template.py | 2 +- lib/ansible/utils/template.py | 2 +- lib/ansible/utils/vault.py | 1 - 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 9324bfd5f40..eb625c49369 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -83,15 +83,16 @@ def _executor_hook(job_queue, result_queue, new_stdin): class HostVars(dict): ''' A special view of vars_cache that adds values from the inventory when needed. ''' - def __init__(self, vars_cache, inventory): + def __init__(self, vars_cache, inventory, vault_password=None): self.vars_cache = vars_cache self.inventory = inventory self.lookup = dict() self.update(vars_cache) + self.vault_password = vault_password def __getitem__(self, host): if host not in self.lookup: - result = self.inventory.get_variables(host) + result = self.inventory.get_variables(host, vault_password=self.vault_password) result.update(self.vars_cache.get(host, {})) self.lookup[host] = result return self.lookup[host] @@ -563,7 +564,7 @@ class Runner(object): inject = utils.combine_vars(inject, module_vars) inject = utils.combine_vars(inject, combined_cache.get(host, {})) inject.setdefault('ansible_ssh_user', self.remote_user) - inject['hostvars'] = HostVars(combined_cache, self.inventory) + inject['hostvars'] = HostVars(combined_cache, self.inventory, vault_password=self.vault_pass) inject['group_names'] = host_variables.get('group_names', []) inject['groups'] = self.inventory.groups_list() inject['vars'] = self.module_vars diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index 44b8e62dda3..96d8f97a3aa 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -85,7 +85,7 @@ class ActionModule(object): # template the source data locally & get ready to transfer try: - resultant = template.template_from_file(self.runner.basedir, source, inject) + resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass) except Exception, e: result = dict(failed=True, msg=str(e)) return ReturnData(conn=conn, comm_ok=False, result=result) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 3f26f3f9c0f..8ec27ac0976 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -199,7 +199,7 @@ class J2Template(jinja2.environment.Template): def new_context(self, vars=None, shared=False, locals=None): return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks) -def template_from_file(basedir, path, vars): +def template_from_file(basedir, path, vars, vault_password=None): ''' run a file through the templating engine ''' fail_on_undefined = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index b4d79a50388..88fa710938b 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -451,7 +451,6 @@ class VaultAES256(object): derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, count=10000, prf=pbkdf2_prf) - #import epdb; epdb.st() key1 = derivedkey[:keylength] key2 = derivedkey[keylength:(keylength * 2)] iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength] From e821770505b5107100a80d0f046ec9d0e3da3446 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 23 Dec 2013 15:30:45 -0600 Subject: [PATCH 703/772] Allow respecification of a node without requiring node_id --- library/cloud/rax_clb_nodes | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/library/cloud/rax_clb_nodes b/library/cloud/rax_clb_nodes index 04ec11fc94f..30514abde92 100644 --- a/library/cloud/rax_clb_nodes +++ b/library/cloud/rax_clb_nodes @@ -135,11 +135,21 @@ def _activate_virtualenv(path): execfile(activate_this, dict(__file__=activate_this)) -def _get_node(lb, node_id): - """Return a node with the given `node_id`""" - for node in lb.nodes: - if node.id == node_id: - return node +def _get_node(lb, node_id=None, address=None, port=None): + """Return a matching node""" + searches = { + 'id': node_id, + 'address': address, + 'port': port + } + + for node in getattr(lb, 'nodes', []): + try: + if all(getattr(node, attr) == value + for (attr, value) in searches.items() if value is not None): + return node + except AttributeError: + continue return None @@ -230,10 +240,7 @@ def main(): except pyrax.exc.PyraxException, e: module.fail_json(msg='%s' % e.message) - if node_id: - node = _get_node(lb, node_id) - else: - node = None + node = _get_node(lb, node_id, address, port) result = _node_to_dict(node) @@ -272,22 +279,12 @@ def main(): except pyrax.exc.PyraxException, e: module.fail_json(msg='%s' % e.message) else: # Updating an existing node - immutable = { - 'address': address, - 'port': port, - } - mutable = { 'condition': condition, 'type': typ, 'weight': weight, } - for name, value in immutable.items(): - if value: - module.fail_json( - msg='Attribute %s cannot be modified' % name) - for name, value in mutable.items(): if value is None or value == getattr(node, name): mutable.pop(name) From 55a26fa9edd35a5468b59db880934b29fe7077f2 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 4 Apr 2014 13:02:49 -0500 Subject: [PATCH 704/772] Shared module docs for remaining rax modules --- library/cloud/rax_cbs | 52 ++----------------------------- library/cloud/rax_cbs_attachments | 52 ++----------------------------- library/cloud/rax_identity | 27 ++-------------- 3 files changed, 9 insertions(+), 122 deletions(-) diff --git a/library/cloud/rax_cbs b/library/cloud/rax_cbs index 73106eb41ab..443c833e7d0 100644 --- a/library/cloud/rax_cbs +++ b/library/cloud/rax_cbs @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_cbs @@ -22,46 +24,6 @@ description: - Manipulate Rackspace Cloud Block Storage Volumes version_added: 1.6 options: - api_key: - description: - - Rackspace API key (overrides I(credentials)) - aliases: - - password - auth_endpoint: - description: - - The URI of the authentication service - default: https://identity.api.rackspacecloud.com/v2.0/ - credentials: - description: - - File to find the Rackspace credentials in (ignored if I(api_key) and - I(username) are provided) - default: null - aliases: - - creds_file - env: - description: - - Environment as configured in ~/.pyrax.cfg, - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) - identity_type: - description: - - Authentication machanism to use, such as rackspace or keystone - default: rackspace - region: - description: - - Region to create an instance in - default: DFW - tenant_id: - description: - - The tenant ID used for authentication - tenant_name: - description: - - The tenant name used for authentication - username: - description: - - Rackspace username (overrides I(credentials)) - verify_ssl: - description: - - Whether or not to require SSL validation of API endpoints description: description: - Description to give the volume being created @@ -111,16 +73,8 @@ options: description: - how long before wait gives up, in seconds default: 300 -requirements: - - pyrax author: Christopher H. Laco, Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' diff --git a/library/cloud/rax_cbs_attachments b/library/cloud/rax_cbs_attachments index c20c03a69ea..bc7dba9eec2 100644 --- a/library/cloud/rax_cbs_attachments +++ b/library/cloud/rax_cbs_attachments @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_cbs_attachments @@ -22,46 +24,6 @@ description: - Manipulate Rackspace Cloud Block Storage Volume Attachments version_added: 1.6 options: - api_key: - description: - - Rackspace API key (overrides I(credentials)) - aliases: - - password - auth_endpoint: - description: - - The URI of the authentication service - default: https://identity.api.rackspacecloud.com/v2.0/ - credentials: - description: - - File to find the Rackspace credentials in (ignored if I(api_key) and - I(username) are provided) - default: null - aliases: - - creds_file - env: - description: - - Environment as configured in ~/.pyrax.cfg, - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration) - identity_type: - description: - - Authentication machanism to use, such as rackspace or keystone - default: rackspace - region: - description: - - Region to create an instance in - default: DFW - tenant_id: - description: - - The tenant ID used for authentication - tenant_name: - description: - - The tenant name used for authentication - username: - description: - - Rackspace username (overrides I(credentials)) - verify_ssl: - description: - - Whether or not to require SSL validation of API endpoints device: description: - The device path to attach the volume to, e.g. /dev/xvde @@ -96,16 +58,8 @@ options: description: - how long before wait gives up, in seconds default: 300 -requirements: - - pyrax author: Christopher H. Laco, Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' diff --git a/library/cloud/rax_identity b/library/cloud/rax_identity index b9b82f13f16..591cd018e70 100644 --- a/library/cloud/rax_identity +++ b/library/cloud/rax_identity @@ -14,6 +14,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# This is a DOCUMENTATION stub specific to this module, it extends +# a documentation fragment located in ansible.utils.module_docs_fragments DOCUMENTATION = ''' --- module: rax_identity @@ -22,36 +24,13 @@ description: - Verifies Rackspace Cloud credentials and returns identity information version_added: "1.5" options: - api_key: - description: - - Rackspace API key (overrides C(credentials)) - credentials: - description: - - File to find the Rackspace credentials in (ignored if C(api_key) and - C(username) are provided) - default: null - aliases: ['creds_file'] - region: - description: - - Region to authenticate against - default: DFW state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present - username: - description: - - Rackspace username (overrides C(credentials)) -requirements: [ "pyrax" ] author: Christopher H. Laco, Matt Martz -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) +extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' From 036c8193b708f75f87ee6ae715b17770051a416f Mon Sep 17 00:00:00 2001 From: James Tanner Date: Fri, 4 Apr 2014 16:36:38 -0400 Subject: [PATCH 705/772] Fixes #6857 Make the url parameter required for get_url --- library/network/get_url | 1 + 1 file changed, 1 insertion(+) diff --git a/library/network/get_url b/library/network/get_url index 8f0ccb1686d..74cc5479f4a 100644 --- a/library/network/get_url +++ b/library/network/get_url @@ -177,6 +177,7 @@ def main(): argument_spec = url_argument_spec() argument_spec.update( + url = dict(required=True), dest = dict(required=True), sha256sum = dict(default=''), ) From 07ba9070ba902c4b3134773a3a920938ec2ef3ff Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sat, 5 Apr 2014 15:49:52 -0400 Subject: [PATCH 706/772] Update CHANGELOG.md update changelog, these weren't major features so move to other section --- CHANGELOG.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bf97b40162..06781835b11 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,8 +12,7 @@ Major features/changes: - multiple users can connect with different keys, when `accelerate_multi_key = yes` is specified in the ansible.cfg. - daemon lifetime is now based on the time from the last activity, not the time from the daemon's launch. * ansible-playbook now accepts --force-handlers to run handlers even if tasks result in failures -* apt module now accepts "deb" parameter to install local dpkg files -* regex_replace filter plugin added + New Modules: @@ -65,6 +64,9 @@ Other notable changes: * the ec2_key module now has wait/wait_timeout parameters * added version_compare filter (see docs) * added ability for module documentation YAML to utilize shared module snippets for common args +* apt module now accepts "deb" parameter to install local dpkg files +* regex_replace filter plugin added +* ... to be filled in from changelogs ... ## 1.5.3 "Love Walks In" - March 13, 2014 From 264b58f3efecbd8b5968276b2062a5d68b52cb17 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Sat, 5 Apr 2014 15:50:59 -0400 Subject: [PATCH 707/772] Update CHANGELOG.md Include changelog from previous release. --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06781835b11..f16b3d3ca69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,12 @@ Other notable changes: * apt module now accepts "deb" parameter to install local dpkg files * regex_replace filter plugin added * ... to be filled in from changelogs ... +* + +## 1.5.4 "Love Walks In" - April 1, 2014 + +- Security fix for safe_eval, which further hardens the checking of the evaluation function. +- Changing order of variable precendence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host. ## 1.5.3 "Love Walks In" - March 13, 2014 From 6e81f41cd407905c665a41aba3b0bb0ac995bd03 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sat, 5 Apr 2014 15:33:22 -0700 Subject: [PATCH 708/772] add replicaset support to mongodb_user module --- library/database/mongodb_user | 101 ++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 48 deletions(-) diff --git a/library/database/mongodb_user b/library/database/mongodb_user index de99e4971dd..2858919c485 100644 --- a/library/database/mongodb_user +++ b/library/database/mongodb_user @@ -2,6 +2,7 @@ # (c) 2012, Elliott Foster # Sponsored by Four Kitchens http://fourkitchens.com. +# (c) 2014, Epic Games, Inc. # # This file is part of Ansible # @@ -46,6 +47,11 @@ options: - The port to connect to required: false default: 27017 + replica_set: + description: + - Replica set to connect to (automatically connects to primary for writes) + required: false + default: null database: description: - The name of the database to add/remove the user from @@ -92,12 +98,17 @@ EXAMPLES = ''' - mongodb_user: database=burgers name=ben password=12345 roles='read' state=present - mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present - mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present + +# add a user to database in a replica set, the primary server is automatically discovered and written to +- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present ''' import ConfigParser +from distutils.version import LooseVersion try: from pymongo.errors import ConnectionFailure from pymongo.errors import OperationFailure + from pymongo import version as PyMongoVersion from pymongo import MongoClient except ImportError: try: # for older PyMongo 2.2 @@ -114,34 +125,25 @@ else: # def user_add(module, client, db_name, user, password, roles): - try: - db = client[db_name] - if roles is None: - db.add_user(user, password, False) - else: - try: - db.add_user(user, password, None, roles=roles) - except: - module.fail_json(msg='"problem adding user; you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param"') - except OperationFailure: - return False - - return True + db = client[db_name] + if roles is None: + db.add_user(user, password, False) + else: + try: + db.add_user(user, password, None, roles=roles) + except OperationFailure, e: + err_msg = str(e) + if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): + err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' + module.fail_json(msg=err_msg) def user_remove(client, db_name, user): - try: - db = client[db_name] - db.remove_user(user) - except OperationFailure: - return False - - return True + db = client[db_name] + db.remove_user(user) def load_mongocnf(): config = ConfigParser.RawConfigParser() mongocnf = os.path.expanduser('~/.mongodb.cnf') - if not os.path.exists(mongocnf): - return False try: config.readfp(open(mongocnf)) @@ -165,6 +167,7 @@ def main(): login_password=dict(default=None), login_host=dict(default='localhost'), login_port=dict(default='27017'), + replica_set=dict(default=None), database=dict(required=True, aliases=['db']), user=dict(required=True, aliases=['name']), password=dict(aliases=['pass']), @@ -180,6 +183,7 @@ def main(): login_password = module.params['login_password'] login_host = module.params['login_host'] login_port = module.params['login_port'] + replica_set = module.params['replica_set'] db_name = module.params['database'] user = module.params['user'] password = module.params['password'] @@ -187,38 +191,39 @@ def main(): state = module.params['state'] try: - client = MongoClient(login_host, int(login_port)) - except ConnectionFailure, e: - module.fail_json(msg='unable to connect to database, check login_host and login_port are correct') + if replica_set: + client = MongoClient(login_host, int(login_port), replicaset=replica_set) + else: + client = MongoClient(login_host, int(login_port)) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None and login_user is not None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password) - # try to authenticate as a target user to check if it already exists - try: - client[db_name].authenticate(user, password) - if state == 'present': - module.exit_json(changed=False, user=user) - except OperationFailure: - if state == 'absent': - module.exit_json(changed=False, user=user) - - if login_user is None and login_password is None: - mongocnf_creds = load_mongocnf() - if mongocnf_creds is not False: - login_user = mongocnf_creds['user'] - login_password = mongocnf_creds['password'] - elif login_password is None and login_user is not None: - module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') - - if login_user is not None and login_password is not None: - client.admin.authenticate(login_user, login_password) + except ConnectionFailure, e: + module.fail_json(msg='unable to connect to database: %s' % str(e)) if state == 'present': if password is None: module.fail_json(msg='password parameter required when adding a user') - if user_add(module, client, db_name, user, password, roles) is not True: - module.fail_json(msg='Unable to add or update user, check login_user and login_password are correct and that this user has access to the admin collection') + + try: + user_add(module, client, db_name, user, password, roles) + except OperationFailure, e: + module.fail_json(msg='Unable to add or update user: %s' % str(e)) + elif state == 'absent': - if user_remove(client, db_name, user) is not True: - module.fail_json(msg='Unable to remove user, check login_user and login_password are correct and that this user has access to the admin collection') + try: + user_remove(client, db_name, user) + except OperationFailure, e: + module.fail_json(msg='Unable to remove user: %s' % str(e)) module.exit_json(changed=True, user=user) From 6a321fe34671a0524f548ebad74d91c38fad06c5 Mon Sep 17 00:00:00 2001 From: Cove Schneider Date: Sat, 5 Apr 2014 16:31:33 -0700 Subject: [PATCH 709/772] re-add idempotency check --- library/database/mongodb_user | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/library/database/mongodb_user b/library/database/mongodb_user index 2858919c485..5d7e0897b68 100644 --- a/library/database/mongodb_user +++ b/library/database/mongodb_user @@ -48,6 +48,7 @@ options: required: false default: 27017 replica_set: + version_added: "1.6" description: - Replica set to connect to (automatically connects to primary for writes) required: false @@ -196,6 +197,15 @@ def main(): else: client = MongoClient(login_host, int(login_port)) + # try to authenticate as a target user to check if it already exists + try: + client[db_name].authenticate(user, password) + if state == 'present': + module.exit_json(changed=False, user=user) + except OperationFailure: + if state == 'absent': + module.exit_json(changed=False, user=user) + if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: From 4dc46e90618aa84ca851c210583dc5796d387cc6 Mon Sep 17 00:00:00 2001 From: MorrisA Date: Sat, 5 Apr 2014 18:10:45 -0700 Subject: [PATCH 710/772] Modified Cron to add AIX Crontab support, changed according to suggestions from mpdehaan and redone entirely. --- library/system/cron | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/library/system/cron b/library/system/cron index 9fc9a1a11f1..a85bdd57976 100644 --- a/library/system/cron +++ b/library/system/cron @@ -352,6 +352,8 @@ class CronTab(object): if self.user: if platform.system() == 'SunOS': return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) + elif platform.system() == 'AIX': + return "%s -l %s" % (pipes.quote(CRONCMD), (pipes.quote(self.user)) else: user = '-u %s' % pipes.quote(self.user) return "%s %s %s" % (CRONCMD , user, '-l') @@ -362,7 +364,7 @@ class CronTab(object): """ user = '' if self.user: - if platform.system() == 'SunOS': + if platform.system() in [ 'SunOS', 'AIX' ]: return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) else: user = '-u %s' % pipes.quote(self.user) From 97a7df76442f681c27c7d00b5af7bd42e7e77475 Mon Sep 17 00:00:00 2001 From: Atlas Health Date: Sun, 6 Apr 2014 15:34:44 -0700 Subject: [PATCH 711/772] moved ebs_optimized param out of main request hash --- library/cloud/ec2 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 50496d60ba3..88a62b9829a 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -833,9 +833,11 @@ def create_instances(module, ec2, override_count=None): 'instance_type': instance_type, 'kernel_id': kernel, 'ramdisk_id': ramdisk, - 'ebs_optimized': ebs_optimized, 'user_data': user_data} + if ebs_optimized: + params['ebs_optimized'] = ebs_optimized + if boto_supports_profile_name_arg(ec2): params['instance_profile_name'] = instance_profile_name else: From 8a98773089a5329d31d40e89997b75affbc37173 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Sun, 6 Apr 2014 19:17:13 -0500 Subject: [PATCH 712/772] Simplify node matching --- library/cloud/rax_clb_nodes | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/library/cloud/rax_clb_nodes b/library/cloud/rax_clb_nodes index 30514abde92..dc0950dca58 100644 --- a/library/cloud/rax_clb_nodes +++ b/library/cloud/rax_clb_nodes @@ -137,19 +137,18 @@ def _activate_virtualenv(path): def _get_node(lb, node_id=None, address=None, port=None): """Return a matching node""" - searches = { - 'id': node_id, - 'address': address, - 'port': port - } - for node in getattr(lb, 'nodes', []): - try: - if all(getattr(node, attr) == value - for (attr, value) in searches.items() if value is not None): - return node - except AttributeError: - continue + match_list = [] + if node_id is not None: + match_list.append(getattr(node, 'id', None) == node_id) + if address is not None: + match_list.append(getattr(node, 'address', None) == address) + if port is not None: + match_list.append(getattr(node, 'port', None) == port) + + if match_list and all(match_list): + return node + return None From e84291d717030827e96a27bf5ace7f46ffb46c0f Mon Sep 17 00:00:00 2001 From: Eric Lake Date: Mon, 7 Apr 2014 09:37:15 -0500 Subject: [PATCH 713/772] Adding bold to service doc to call up state and enabled. --- library/system/service | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/system/service b/library/system/service index 9820c28a06d..a694d8d92b8 100644 --- a/library/system/service +++ b/library/system/service @@ -37,8 +37,8 @@ options: description: - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the - service. C(reloaded) will always reload. At least one of state - and enabled are required. + service. C(reloaded) will always reload. B(At least one of state + and enabled are required.) sleep: required: false version_added: "1.3" @@ -59,8 +59,8 @@ options: required: false choices: [ "yes", "no" ] description: - - Whether the service should start on boot. At least one of state and - enabled are required. + - Whether the service should start on boot. B(At least one of state and + enabled are required.) runlevel: required: false From 318e752b6f6cb1aedf84ee453ba60d424782461f Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 7 Apr 2014 10:51:04 -0400 Subject: [PATCH 714/772] Fixes #6845 evaluate changed_when only if task is not skipped --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index eb625c49369..859e04667de 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -873,7 +873,7 @@ class Runner(object): # only run the final checks if the async_status has finished, # or if we're not running an async_status check at all if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status': - if changed_when is not None: + if changed_when is not None and 'skipped' not in data: data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) if failed_when is not None: data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) From b73cddc48583b5fc4a42028385ac422c0f3d7305 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 7 Apr 2014 09:42:27 -0500 Subject: [PATCH 715/772] Revert "Fix #5679 again after recent changes in core" This reverts commit 2bfaacd17063ed52ceca53f55861acb7ff655c66. Fixes #6821 --- library/files/lineinfile | 5 +++-- .../roles/test_lineinfile/tasks/main.yml | 18 ------------------ 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/library/files/lineinfile b/library/files/lineinfile index 06e946f130b..f781911ccd1 100644 --- a/library/files/lineinfile +++ b/library/files/lineinfile @@ -351,8 +351,9 @@ def main(): if ins_bef is None and ins_aft is None: ins_aft = 'EOF' - # Replace the newline character with an actual newline. - line = params['line'].decode('string_escape') + # Replace the newline character with an actual newline. Don't replace + # escaped \\n, hence sub and not str.replace. + line = re.sub(r'\n', os.linesep, params['line']) present(module, dest, params['regexp'], line, ins_aft, ins_bef, create, backup, backrefs) diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index b8974b7edca..f59979473b9 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -209,21 +209,3 @@ that: - "result.stat.md5 == 'fef1d487711facfd7aa2c87d788c19d9'" - -- name: insert a multiple lines at the end of the file - lineinfile: dest={{output_dir}}/test.txt state=present line="This is a line\nwith \\\n character" insertafter="EOF" - register: result - -- name: assert that the multiple lines was inserted - assert: - that: - - "result.changed == true" - - "result.msg == 'line added'" - -- stat: path={{output_dir}}/test.txt - register: result - -- name: assert test md5 matches after insert the multiple lines - assert: - that: - - "result.stat.md5 == 'c2510d5bc8fdef8e752b8f8e74c784c2'" From b2701d0d76f01739ee39591658a164da6e216fb7 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 7 Apr 2014 12:44:34 -0400 Subject: [PATCH 716/772] Fixes #6780 Update version_added for source_dest_check in ec2 module --- library/cloud/ec2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index a6bd32d58a4..544a5360570 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -198,7 +198,7 @@ options: default: null aliases: [] source_dest_check: - version_added: "1.5" + version_added: "1.6" description: - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) required: false From 325bbf7dee192ab379ce891fbd711eb1c302293c Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 7 Apr 2014 22:30:53 -0400 Subject: [PATCH 717/772] Fix version_added for url in airbrake_deployment module --- library/monitoring/airbrake_deployment | 1 + 1 file changed, 1 insertion(+) diff --git a/library/monitoring/airbrake_deployment b/library/monitoring/airbrake_deployment index 89d62deda5e..e1c490b881b 100644 --- a/library/monitoring/airbrake_deployment +++ b/library/monitoring/airbrake_deployment @@ -52,6 +52,7 @@ options: - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. required: false default: "https://airbrake.io/deploys" + version_added: "1.5" validate_certs: description: - If C(no), SSL certificates for the target url will not be validated. This should only be used From 66747529a6a1bb7775230c2a599f41ee8c101fcf Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 7 Apr 2014 22:57:49 -0400 Subject: [PATCH 718/772] Fix version_added for a few parameters in django_manage --- library/web_infrastructure/django_manage | 3 +++ 1 file changed, 3 insertions(+) diff --git a/library/web_infrastructure/django_manage b/library/web_infrastructure/django_manage index 509bd404493..42ce3781fda 100644 --- a/library/web_infrastructure/django_manage +++ b/library/web_infrastructure/django_manage @@ -74,14 +74,17 @@ options: description: - Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate) required: false + version_added: "1.3" merge: description: - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command required: false + version_added: "1.3" link: description: - Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command required: false + version_added: "1.3" notes: - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified. - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location. From 34648426ba7f29afb863d8dde48491f072742f4a Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 7 Apr 2014 23:10:24 -0400 Subject: [PATCH 719/772] Set version_added for snapshot in ec2_vol --- library/cloud/ec2_vol | 1 + 1 file changed, 1 insertion(+) diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index fb38852f429..7eb8edec4d5 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -96,6 +96,7 @@ options: - snapshot ID on which to base the volume required: false default: null + version_added: "1.5" validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. From 387414fe29371aaa8d26c48e5418502431409ca9 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:00:30 -0400 Subject: [PATCH 720/772] Fix version_added for new params in gce_lb --- library/cloud/gce_lb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/gce_lb b/library/cloud/gce_lb index 3e22c216998..4d7190d8752 100644 --- a/library/cloud/gce_lb +++ b/library/cloud/gce_lb @@ -111,21 +111,21 @@ options: choices: ["active", "present", "absent", "deleted"] aliases: [] service_account_email: - version_added: 1.5.1 + version_added: "1.6" description: - service account email required: false default: null aliases: [] pem_file: - version_added: 1.5.1 + version_added: "1.6" description: - path to the pem file associated with the service account email required: false default: null aliases: [] project_id: - version_added: 1.5.1 + version_added: "1.6" description: - your GCE project ID required: false From d37f609e00b99b5416fa0bb286bb1877f1014b12 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:03:56 -0400 Subject: [PATCH 721/772] Fix version_added for new params in gce_net --- library/cloud/gce_net | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/gce_net b/library/cloud/gce_net index 4e731f196d3..c2c0b30452d 100644 --- a/library/cloud/gce_net +++ b/library/cloud/gce_net @@ -74,21 +74,21 @@ options: choices: ["active", "present", "absent", "deleted"] aliases: [] service_account_email: - version_added: 1.5.1 + version_added: "1.6" description: - service account email required: false default: null aliases: [] pem_file: - version_added: 1.5.1 + version_added: "1.6" description: - path to the pem file associated with the service account email required: false default: null aliases: [] project_id: - version_added: 1.5.1 + version_added: "1.6" description: - your GCE project ID required: false From 03f99a57b737342ca5df61816ddaad532039d879 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:05:57 -0400 Subject: [PATCH 722/772] Fix version_added for new params in gce_pd --- library/cloud/gce_pd | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/library/cloud/gce_pd b/library/cloud/gce_pd index 04083aa89eb..e5ea6cc4ad8 100644 --- a/library/cloud/gce_pd +++ b/library/cloud/gce_pd @@ -76,21 +76,21 @@ options: default: "us-central1-b" aliases: [] service_account_email: - version_added: 1.5.1 + version_added: "1.6" description: - service account email required: false default: null aliases: [] pem_file: - version_added: 1.5.1 + version_added: "1.6" description: - path to the pem file associated with the service account email required: false default: null aliases: [] project_id: - version_added: 1.5.1 + version_added: "1.6" description: - your GCE project ID required: false From 5308c5dbdffd52f34c9a0e2e2f0c610359ba438f Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:13:33 -0400 Subject: [PATCH 723/772] Fix version_added for install_options in homebrew --- library/packaging/homebrew | 1 + 1 file changed, 1 insertion(+) diff --git a/library/packaging/homebrew b/library/packaging/homebrew index 38413fa3c4e..0dfc86096ff 100644 --- a/library/packaging/homebrew +++ b/library/packaging/homebrew @@ -49,6 +49,7 @@ options: - options flags to install a package required: false default: null + version_added: "1.4" notes: [] ''' EXAMPLES = ''' From db90f308ca9c5fdc1e4050f82d166f8e64e0ebc9 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:25:44 -0400 Subject: [PATCH 724/772] Fix version_added for login_tenant_name in keystone_user --- library/cloud/keystone_user | 1 + 1 file changed, 1 insertion(+) diff --git a/library/cloud/keystone_user b/library/cloud/keystone_user index 206fd68b070..d6529b537ed 100644 --- a/library/cloud/keystone_user +++ b/library/cloud/keystone_user @@ -26,6 +26,7 @@ options: - The tenant login_user belongs to required: false default: None + version_added: "1.3" token: description: - The token to be uses in case the password is not specified From c29cca6f86cd482a0d96ec129c12cbdfc9ceda69 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:28:01 -0400 Subject: [PATCH 725/772] Fix version added for annotation in pkgng --- library/packaging/pkgng | 1 + 1 file changed, 1 insertion(+) diff --git a/library/packaging/pkgng b/library/packaging/pkgng index 67d10c2635b..b8893b27ce6 100644 --- a/library/packaging/pkgng +++ b/library/packaging/pkgng @@ -54,6 +54,7 @@ options: annotation. If setting or modifying annotations, a value must be provided. required: false + version_added: "1.6" pkgsite: description: - for pkgng versions before 1.1.4, specify packagesite to use From 7c9243eacffe7850a52ff8780f113bd6523df962 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:32:53 -0400 Subject: [PATCH 726/772] Fix version added for new parameters in quantum modules --- library/cloud/quantum_floating_ip | 1 + library/cloud/quantum_subnet | 1 + 2 files changed, 2 insertions(+) diff --git a/library/cloud/quantum_floating_ip b/library/cloud/quantum_floating_ip index c69f2b16587..2ad761ec3b7 100644 --- a/library/cloud/quantum_floating_ip +++ b/library/cloud/quantum_floating_ip @@ -80,6 +80,7 @@ options: - The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks. required: false default: None + version_added: "1.5" requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"] ''' diff --git a/library/cloud/quantum_subnet b/library/cloud/quantum_subnet index 53cf5d32d15..17f7a6a0056 100644 --- a/library/cloud/quantum_subnet +++ b/library/cloud/quantum_subnet @@ -98,6 +98,7 @@ options: - DNS nameservers for this subnet, comma-separated required: false default: None + version_added: "1.4" allocation_pool_start: description: - From the subnet pool the starting address from which the IP should be allocated From 6d54eae96aeab1d7a4300759ee19a22a9a5a7ea5 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 10:40:41 -0400 Subject: [PATCH 727/772] Fix version_added for node in rabbitmq modules --- library/messaging/rabbitmq_parameter | 1 + library/messaging/rabbitmq_user | 1 + library/messaging/rabbitmq_vhost | 1 + 3 files changed, 3 insertions(+) diff --git a/library/messaging/rabbitmq_parameter b/library/messaging/rabbitmq_parameter index 2b540cbfdee..2f78bd4ee15 100644 --- a/library/messaging/rabbitmq_parameter +++ b/library/messaging/rabbitmq_parameter @@ -52,6 +52,7 @@ options: - erlang node name of the rabbit we wish to configure required: false default: rabbit + version_added: "1.2" state: description: - Specify if user is to be added or removed diff --git a/library/messaging/rabbitmq_user b/library/messaging/rabbitmq_user index 175bc0c1624..1cbee360dff 100644 --- a/library/messaging/rabbitmq_user +++ b/library/messaging/rabbitmq_user @@ -55,6 +55,7 @@ options: - erlang node name of the rabbit we wish to configure required: false default: rabbit + version_added: "1.2" configure_priv: description: - Regular expression to restrict configure actions on a resource diff --git a/library/messaging/rabbitmq_vhost b/library/messaging/rabbitmq_vhost index 122f84e5761..fd4b04a683f 100644 --- a/library/messaging/rabbitmq_vhost +++ b/library/messaging/rabbitmq_vhost @@ -39,6 +39,7 @@ options: - erlang node name of the rabbit we wish to configure required: false default: rabbit + version_added: "1.2" tracing: description: - Enable/disable tracing for a vhost From ba65c23ff9033445ec15dacfb9804bc0002629e3 Mon Sep 17 00:00:00 2001 From: jaseg Date: Tue, 8 Apr 2014 16:45:55 +0200 Subject: [PATCH 728/772] modules.rst: be consistent with service module documentation --- docsite/rst/modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/modules.rst b/docsite/rst/modules.rst index 1e2a851d4a4..aa9ca0f40a1 100644 --- a/docsite/rst/modules.rst +++ b/docsite/rst/modules.rst @@ -17,7 +17,7 @@ handle executing system commands. Let's review how we execute three different modules from the command line:: - ansible webservers -m service -a "name=httpd state=running" + ansible webservers -m service -a "name=httpd state=started" ansible webservers -m ping ansible webservers -m command -a "/sbin/reboot -t now" From 26ce8dac27980382a7a2b88c5a88028b77b8c584 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Apr 2014 10:25:44 -0500 Subject: [PATCH 729/772] Fix incorrect version_added value in docstring for subversion export param --- library/source_control/subversion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/source_control/subversion b/library/source_control/subversion index bda8a8620a1..29d62240af3 100644 --- a/library/source_control/subversion +++ b/library/source_control/subversion @@ -73,7 +73,7 @@ options: export: required: false default: False - version_added: "1.5" + version_added: "1.6" description: - If True, do export instead of checkout/update. ''' From 899ba4c9bb59c5f8a00b6a7dff2715da896b7cc7 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 12:53:56 -0400 Subject: [PATCH 730/772] Fixes syntax error in cron module caused by #6870 --- library/system/cron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/system/cron b/library/system/cron index a85bdd57976..32e7e872f06 100644 --- a/library/system/cron +++ b/library/system/cron @@ -353,7 +353,7 @@ class CronTab(object): if platform.system() == 'SunOS': return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD)) elif platform.system() == 'AIX': - return "%s -l %s" % (pipes.quote(CRONCMD), (pipes.quote(self.user)) + return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) else: user = '-u %s' % pipes.quote(self.user) return "%s %s %s" % (CRONCMD , user, '-l') From 68cd7258b6a1ab9f27c2cb2c4f25465c2d79286d Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 13:20:15 -0400 Subject: [PATCH 731/772] Fixes #6894 create docs fragment for files --- lib/ansible/utils/module_docs.py | 2 +- library/files/file | 35 -------------------------------- 2 files changed, 1 insertion(+), 36 deletions(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index f5a696ca568..5139b0a414f 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -82,7 +82,7 @@ def get_docstring(filename, verbose=False): raise Exception("missing options in fragment, possibly misformatted?") for key, value in fragment.items(): - + open("/tmp/awx.log", "a").write("key: %s value: %s\n" % (key, value)) if not doc.has_key(key): doc[key] = value else: diff --git a/library/files/file b/library/files/file index 0cb9d6fe080..3b4aaa9e235 100644 --- a/library/files/file +++ b/library/files/file @@ -38,41 +38,6 @@ description: - Sets attributes of files, symlinks, and directories, or removes files/symlinks/directories. Many other modules support the same options as the M(file) module - including M(copy), M(template), and M(assemble). -options: - path: - description: - - 'path to the file being managed. Aliases: I(dest), I(name)' - required: true - default: [] - aliases: ['dest', 'name'] - state: - description: - - If C(directory), all immediate subdirectories will be created if they - do not exist. If C(file), the file will NOT be created if it does not - exist, see the M(copy) or M(template) module if you want that behavior. - If C(link), the symbolic link will be created or changed. Use C(hard) - for hardlinks. If C(absent), directories will be recursively deleted, - and files or symlinks will be unlinked. If C(touch) (new in 1.4), an empty file will - be created if the c(path) does not exist, while an existing file or - directory will receive updated file access and modification times (similar - to the way `touch` works from the command line). - required: false - default: file - choices: [ file, link, directory, hard, touch, absent ] - src: - required: false - default: null - choices: [] - description: - - path of the file to link to (applies only to C(state= link or hard)). Will accept absolute, - relative and nonexisting (with C(force)) paths. Relative paths are not expanded. - recurse: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.1" - description: - - recursively set the specified file attributes (applies only to state=directory) notes: - See also M(copy), M(template), M(assemble) requirements: [ ] From 2d33cd1b5dbbc12ffb5bf801227a363bc215ff94 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 8 Apr 2014 13:22:23 -0400 Subject: [PATCH 732/772] Fixes #6894 add missing file and remove debug line --- lib/ansible/utils/module_docs.py | 1 - .../utils/module_docs_fragments/files.py | 58 +++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 lib/ansible/utils/module_docs_fragments/files.py diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 5139b0a414f..3983efd508b 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -82,7 +82,6 @@ def get_docstring(filename, verbose=False): raise Exception("missing options in fragment, possibly misformatted?") for key, value in fragment.items(): - open("/tmp/awx.log", "a").write("key: %s value: %s\n" % (key, value)) if not doc.has_key(key): doc[key] = value else: diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py new file mode 100644 index 00000000000..15c6b69bab8 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/files.py @@ -0,0 +1,58 @@ +# (c) 2014, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = """ +options: + path: + description: + - 'path to the file being managed. Aliases: I(dest), I(name)' + required: true + default: [] + aliases: ['dest', 'name'] + state: + description: + - If C(directory), all immediate subdirectories will be created if they + do not exist. If C(file), the file will NOT be created if it does not + exist, see the M(copy) or M(template) module if you want that behavior. + If C(link), the symbolic link will be created or changed. Use C(hard) + for hardlinks. If C(absent), directories will be recursively deleted, + and files or symlinks will be unlinked. If C(touch) (new in 1.4), an empty file will + be created if the c(path) does not exist, while an existing file or + directory will receive updated file access and modification times (similar + to the way `touch` works from the command line). + required: false + default: file + choices: [ file, link, directory, hard, touch, absent ] + src: + required: false + default: null + choices: [] + description: + - path of the file to link to (applies only to C(state= link or hard)). Will accept absolute, + relative and nonexisting (with C(force)) paths. Relative paths are not expanded. + recurse: + required: false + default: "no" + choices: [ "yes", "no" ] + version_added: "1.1" + description: + - recursively set the specified file attributes (applies only to state=directory) +""" From eebc72ab9b1cf61ab3bdbe61b92636c0bae22873 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Apr 2014 13:21:42 -0500 Subject: [PATCH 733/772] Add a timeout decorator for timing out functions that may hang Fixes #6891 --- lib/ansible/module_utils/facts.py | 47 +++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 607b911b0fc..c056404210f 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -17,11 +17,13 @@ import os import array +import errno import fcntl import fnmatch import glob import platform import re +import signal import socket import struct import datetime @@ -40,6 +42,33 @@ try: except ImportError: import simplejson as json +# -------------------------------------------------------------- +# timeout function to make sure some fact gathering +# steps do not exceed a time limit + +class TimeoutError(Exception): + pass + +def timeout(seconds=10, error_message=os.strerror(errno.ETIME)): + def decorator(func): + def _handle_timeout(signum, frame): + raise TimeoutError(error_message) + + def wrapper(*args, **kwargs): + signal.signal(signal.SIGALRM, _handle_timeout) + signal.alarm(seconds) + try: + result = func(*args, **kwargs) + finally: + signal.alarm(0) + return result + + return wrapper + + return decorator + +# -------------------------------------------------------------- + class Facts(object): """ This class should only attempt to populate those facts that @@ -498,7 +527,10 @@ class LinuxHardware(Hardware): self.get_memory_facts() self.get_dmi_facts() self.get_device_facts() - self.get_mount_facts() + try: + self.get_mount_facts() + except TimeoutError: + pass return self.facts def get_memory_facts(self): @@ -622,6 +654,7 @@ class LinuxHardware(Hardware): else: self.facts[k] = 'NA' + @timeout(10) def get_mount_facts(self): self.facts['mounts'] = [] mtab = get_file_content('/etc/mtab', '') @@ -919,7 +952,10 @@ class FreeBSDHardware(Hardware): self.get_memory_facts() self.get_dmi_facts() self.get_device_facts() - self.get_mount_facts() + try: + self.get_mount_facts() + except TimeoutError: + pass return self.facts def get_cpu_facts(self): @@ -962,6 +998,7 @@ class FreeBSDHardware(Hardware): self.facts['swaptotal_mb'] = data[1] self.facts['swapfree_mb'] = data[3] + @timeout(10) def get_mount_facts(self): self.facts['mounts'] = [] fstab = get_file_content('/etc/fstab') @@ -1041,7 +1078,10 @@ class NetBSDHardware(Hardware): def populate(self): self.get_cpu_facts() self.get_memory_facts() - self.get_mount_facts() + try: + self.get_mount_facts() + except TimeoutError: + pass return self.facts def get_cpu_facts(self): @@ -1085,6 +1125,7 @@ class NetBSDHardware(Hardware): val = data[1].strip().split(' ')[0] self.facts["%s_mb" % key.lower()] = long(val) / 1024 + @timeout(10) def get_mount_facts(self): self.facts['mounts'] = [] fstab = get_file_content('/etc/fstab') From 509561f658c20a4fdb20b2f2bc8fd984459530d2 Mon Sep 17 00:00:00 2001 From: willthames Date: Wed, 9 Apr 2014 16:43:55 +1000 Subject: [PATCH 734/772] Moved AWS modules over to common module fragments Created common module doc fragment, and applied to all modules that use ec2_connect or connect_to_aws as they definitely share the common doc fragments --- .../utils/module_docs_fragments/aws.py | 76 +++++++++++++++++++ library/cloud/ec2 | 42 +--------- library/cloud/ec2_ami | 42 +--------- library/cloud/ec2_asg | 14 +--- library/cloud/ec2_eip | 42 +--------- library/cloud/ec2_group | 42 +--------- library/cloud/ec2_key | 42 +--------- library/cloud/ec2_lc | 14 +--- library/cloud/ec2_metric_alarm | 2 +- library/cloud/ec2_scaling_policy | 2 +- library/cloud/ec2_snapshot | 34 +-------- library/cloud/ec2_tag | 42 +--------- library/cloud/ec2_vol | 34 +-------- 13 files changed, 88 insertions(+), 340 deletions(-) create mode 100644 lib/ansible/utils/module_docs_fragments/aws.py diff --git a/lib/ansible/utils/module_docs_fragments/aws.py b/lib/ansible/utils/module_docs_fragments/aws.py new file mode 100644 index 00000000000..9bbe84a1355 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/aws.py @@ -0,0 +1,76 @@ +# (c) 2014, Will Thames +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +class ModuleDocFragment(object): + + # AWS only documentation fragment + DOCUMENTATION = """ +options: + ec2_url: + description: + - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used + required: false + default: null + aliases: [] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. + required: false + default: "yes" + choices: ["yes", "no"] + aliases: [] + version_added: "1.5" + profile: + description: + - uses a boto profile. Only works with boto >= 2.24.0 + required: false + default: null + aliases: [] + version_added: "1.6" + security_token: + description: + - security token to authenticate against AWS + required: false + default: null + aliases: [] + version_added: "1.6" +requirements: + - boto +notes: + - The following environment variables can be used C(AWS_ACCESS_KEY) or + C(EC2_ACCESS_KEY) or C(AWS_ACCESS_KEY_ID), + C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY) or C(AWS_SECRET_ACCESS_KEY), + C(AWS_REGION) or C(EC2_REGION), C(AWS_SECURITY_TOKEN) + - Ansible uses the boto configuration file (typically ~/.boto) if no + credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html + - C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the + AWS region, when required, but + this can also be configured in the boto config file +""" diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 999d766c6c1..0752f40fa4b 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -110,24 +110,6 @@ options: - how long to wait for the spot instance request to be fulfilled default: 600 aliases: [] - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used - required: false - default: null - aliases: [] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] count: description: - number of instances to launch @@ -237,31 +219,9 @@ options: required: false default: null aliases: [] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" -requirements: [ "boto" ] author: Seth Vidal, Tim Gerla, Lester Wade +extends_documentation_fragment: aws ''' EXAMPLES = ''' diff --git a/library/cloud/ec2_ami b/library/cloud/ec2_ami index 446c7417e01..3baf70a438f 100644 --- a/library/cloud/ec2_ami +++ b/library/cloud/ec2_ami @@ -22,24 +22,6 @@ short_description: create or destroy an image in ec2, return imageid description: - Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5 options: - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used - required: false - default: null - aliases: [] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: ['ec2_access_key', 'access_key' ] instance_id: description: - instance id of the image to create @@ -101,31 +83,9 @@ options: required: false default: null aliases: [] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" -requirements: [ "boto" ] author: Evan Duffield +extends_documentation_fragment: aws ''' # Thank you to iAcquire for sponsoring development of this module. diff --git a/library/cloud/ec2_asg b/library/cloud/ec2_asg index 0e43e6b2524..6528d951180 100644 --- a/library/cloud/ec2_asg +++ b/library/cloud/ec2_asg @@ -22,7 +22,6 @@ description: - Can create or delete AWS Autoscaling Groups - Works with the ec2_lc module to manage Launch Configurations version_added: "1.6" -requirements: [ "boto" ] author: Gareth Rushgrove options: state: @@ -58,18 +57,6 @@ options: description: - Desired number of instances in group required: false - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. @@ -80,6 +67,7 @@ options: - List of VPC subnets to use required: false default: None +extends_documentation_fragment: aws """ EXAMPLES = ''' diff --git a/library/cloud/ec2_eip b/library/cloud/ec2_eip index 5be83387e07..e1182108097 100644 --- a/library/cloud/ec2_eip +++ b/library/cloud/ec2_eip @@ -23,24 +23,6 @@ options: required: false choices: ['present', 'absent'] default: present - ec2_url: - description: - - URL to use to connect to EC2-compatible cloud (by default the module will use EC2 endpoints) - required: false - default: null - aliases: [ EC2_URL ] - ec2_access_key: - description: - - EC2 access key. If not specified then the EC2_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ EC2_ACCESS_KEY ] - ec2_secret_key: - description: - - EC2 secret key. If not specified then the EC2_SECRET_KEY environment variable is used. - required: false - default: null - aliases: [ EC2_SECRET_KEY ] region: description: - the EC2 region to use @@ -53,28 +35,6 @@ options: required: false default: false version_added: "1.4" - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" reuse_existing_ip_allowed: description: - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. @@ -82,7 +42,7 @@ options: default: false version_added: "1.6" -requirements: [ "boto" ] +extends_documentation_fragment: aws author: Lorin Hochstein notes: - This module will return C(public_ip) on success, which will contain the diff --git a/library/cloud/ec2_group b/library/cloud/ec2_group index 12a49fe0d82..56581ecd778 100644 --- a/library/cloud/ec2_group +++ b/library/cloud/ec2_group @@ -37,24 +37,6 @@ options: required: false default: null aliases: [] - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints) - required: false - default: null - aliases: [] - ec2_secret_key: - description: - - EC2 secret key - required: false - default: null - aliases: ['aws_secret_key'] - ec2_access_key: - description: - - EC2 access key - required: false - default: null - aliases: ['aws_access_key'] state: version_added: "1.4" description: @@ -62,30 +44,8 @@ options: required: false default: 'present' aliases: [] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" -requirements: [ "boto" ] +extends_documentation_fragment: aws notes: - If a rule declares a group_name and that group doesn't exist, it will be diff --git a/library/cloud/ec2_key b/library/cloud/ec2_key index 99ea5bcc3e0..9c8274f764a 100644 --- a/library/cloud/ec2_key +++ b/library/cloud/ec2_key @@ -24,52 +24,12 @@ options: required: false default: null aliases: [] - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints) - required: false - default: null - aliases: [] - ec2_secret_key: - description: - - EC2 secret key - required: false - default: null - aliases: ['aws_secret_key', 'secret_key'] - ec2_access_key: - description: - - EC2 access key - required: false - default: null - aliases: ['aws_access_key', 'access_key'] state: description: - create or delete keypair required: false default: 'present' aliases: [] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" wait: description: - Wait for the specified action to complete before returning. @@ -85,7 +45,7 @@ options: aliases: [] version_added: "1.6" -requirements: [ "boto" ] +extends_documentation_fragment: aws author: Vincent Viallet ''' diff --git a/library/cloud/ec2_lc b/library/cloud/ec2_lc index ff24924aaa6..91905a38894 100644 --- a/library/cloud/ec2_lc +++ b/library/cloud/ec2_lc @@ -22,7 +22,6 @@ description: - Can create or delete AwS Autoscaling Configurations - Works with the ec2_asg module to manage Autoscaling Groups version_added: "1.6" -requirements: [ "boto" ] author: Gareth Rushgrove options: state: @@ -46,18 +45,6 @@ options: description: - A list of security groups into which instances should be found required: false - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. @@ -75,6 +62,7 @@ options: required: false default: null aliases: [] +extends_documentation_fragment: aws """ EXAMPLES = ''' diff --git a/library/cloud/ec2_metric_alarm b/library/cloud/ec2_metric_alarm index b9d7173b2f3..4791330dbe2 100644 --- a/library/cloud/ec2_metric_alarm +++ b/library/cloud/ec2_metric_alarm @@ -21,7 +21,6 @@ description: - Can create or delete AWS metric alarms - Metrics you wish to alarm on must already exist version_added: "1.6" -requirements: [ "boto" ] author: Zacharie Eakin options: state: @@ -91,6 +90,7 @@ options: description: - A list of the names of action(s) to take when the alarm is in the 'ok' status required: false +extends_documentation_fragment: aws """ EXAMPLES = ''' diff --git a/library/cloud/ec2_scaling_policy b/library/cloud/ec2_scaling_policy index f5ad4d3b010..4e66f463063 100755 --- a/library/cloud/ec2_scaling_policy +++ b/library/cloud/ec2_scaling_policy @@ -7,7 +7,6 @@ description: - Can create or delete scaling policies for autoscaling groups - Referenced autoscaling groups must already exist version_added: "1.6" -requirements: [ "boto" ] author: Zacharie Eakin options: state: @@ -40,6 +39,7 @@ options: description: - The minimum period of time between which autoscaling actions can take place required: false +extends_documentation_fragment: aws """ EXAMPLES = ''' diff --git a/library/cloud/ec2_snapshot b/library/cloud/ec2_snapshot index 075fe143f84..10aba7963c6 100644 --- a/library/cloud/ec2_snapshot +++ b/library/cloud/ec2_snapshot @@ -22,24 +22,6 @@ description: - creates an EC2 snapshot from an existing EBS volume version_added: "1.5" options: - ec2_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['aws_secret_key', 'secret_key' ] - ec2_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['aws_access_key', 'access_key' ] - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used - required: false - default: null - aliases: [] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. @@ -70,23 +52,9 @@ options: required: false default: null aliases: [] - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" -requirements: [ "boto" ] author: Will Thames +extends_documentation_fragment: aws ''' EXAMPLES = ''' diff --git a/library/cloud/ec2_tag b/library/cloud/ec2_tag index 92af644933e..6c6eb94d218 100644 --- a/library/cloud/ec2_tag +++ b/library/cloud/ec2_tag @@ -41,49 +41,9 @@ options: required: false default: null aliases: ['aws_region', 'ec2_region'] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used. - required: false - default: null - aliases: [] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" -requirements: [ "boto" ] author: Lester Wade +extends_documentation_fragment: aws ''' EXAMPLES = ''' diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index 7eb8edec4d5..152094d9b9b 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -22,24 +22,6 @@ description: - creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto. version_added: "1.1" options: - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - ec2_url: - description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used - required: false - default: null - aliases: [] instance: description: - instance ID if you wish to attach the volume. @@ -105,20 +87,6 @@ options: choices: ["yes", "no"] aliases: [] version_added: "1.5" - profile: - description: - - uses a boto profile. Only works with boto >= 2.24.0 - required: false - default: null - aliases: [] - version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" state: description: - whether to ensure the volume is present or absent @@ -126,8 +94,8 @@ options: default: present choices: ['absent', 'present'] version_added: "1.6" -requirements: [ "boto" ] author: Lester Wade +extends_documentation_fragment: aws ''' EXAMPLES = ''' From e7b275483c03f4b69e570691a9a8122a56c5893a Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 9 Apr 2014 22:44:29 +1000 Subject: [PATCH 735/772] Use common code for ec2_elb and ec2_elb_lb Uses the new get_aws_connection_info and connect_to_aws common methods to reuse code Now complains if region is not set in one of the three possible methods Also moved over to common documentation code so this is actually based on #6913 --- library/cloud/ec2_elb | 46 ++++++++++++---------------------------- library/cloud/ec2_elb_lb | 45 +++++++++------------------------------ 2 files changed, 24 insertions(+), 67 deletions(-) diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index 21d771221b5..e76816fbca3 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -25,7 +25,6 @@ description: if state=absent is passed as an argument. - Will be marked changed when called only if there are ELBs found to operate on. version_added: "1.2" -requirements: [ "boto" ] author: John Jarvis options: state: @@ -33,29 +32,15 @@ options: - register or deregister the instance required: true choices: ['present', 'absent'] - instance_id: description: - EC2 Instance ID required: true - ec2_elbs: description: - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. required: false default: None - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. @@ -88,7 +73,7 @@ options: required: false default: 0 version_added: "1.6" - +extends_documentation_fragment: aws """ EXAMPLES = """ @@ -130,12 +115,11 @@ class ElbManager: """Handles EC2 instance ELB registration and de-registration""" def __init__(self, module, instance_id=None, ec2_elbs=None, - aws_access_key=None, aws_secret_key=None, region=None): - self.aws_access_key = aws_access_key - self.aws_secret_key = aws_secret_key + region=None, **aws_connect_params): self.module = module self.instance_id = instance_id self.region = region + self.aws_connect_params = aws_connect_params self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False @@ -270,9 +254,8 @@ class ElbManager: are attached to self.instance_id""" try: - endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region) + elb = connect_to_aws(boto.ec2.elb, self.region, + **self.aws_connect_params) except boto.exception.NoAuthHandlerFound, e: self.module.fail_json(msg=str(e)) @@ -291,12 +274,11 @@ class ElbManager: def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: - endpoint = "ec2.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - ec2_conn = boto.ec2.EC2Connection(self.aws_access_key, self.aws_secret_key, region=connect_region) + ec2 = connect_to_aws(boto.ec2, self.region, + **self.aws_connect_params) except boto.exception.NoAuthHandlerFound, e: self.module.fail_json(msg=str(e)) - return ec2_conn.get_only_instances(instance_ids=[self.instance_id])[0] + return ec2.get_only_instances(instance_ids=[self.instance_id])[0] def main(): @@ -315,12 +297,12 @@ def main(): argument_spec=argument_spec, ) - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") ec2_elbs = module.params['ec2_elbs'] - region = module.params['region'] wait = module.params['wait'] enable_availability_zone = module.params['enable_availability_zone'] timeout = module.params['wait_timeout'] @@ -329,8 +311,8 @@ def main(): module.fail_json(msg="ELBs are required for registration") instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key, - aws_secret_key, region=region) + elb_man = ElbManager(module, instance_id, ec2_elbs, + region=region, **aws_connect_params) if ec2_elbs is not None: for elb in ec2_elbs: diff --git a/library/cloud/ec2_elb_lb b/library/cloud/ec2_elb_lb index 0737b1a087b..5de76cb5df0 100644 --- a/library/cloud/ec2_elb_lb +++ b/library/cloud/ec2_elb_lb @@ -22,7 +22,6 @@ short_description: Creates or destroys Amazon ELB. - Returns information about the load balancer. - Will be marked changed when called only if state is changed. version_added: "1.5" -requirements: [ "boto" ] author: Jim Dalton options: state: @@ -62,32 +61,12 @@ options: - An associative array of health check configuration settigs (see example) require: false default: None - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key'] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false aliases: ['aws_region', 'ec2_region'] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - +extends_documentation_fragment: aws """ EXAMPLES = """ @@ -190,7 +169,7 @@ class ElbManager(object): def __init__(self, module, name, listeners=None, purge_listeners=None, zones=None, purge_zones=None, security_group_ids=None, health_check=None, - aws_access_key=None, aws_secret_key=None, region=None): + region=None, **aws_connect_params): self.module = module self.name = name self.listeners = listeners @@ -200,8 +179,7 @@ class ElbManager(object): self.security_group_ids = security_group_ids self.health_check = health_check - self.aws_access_key = aws_access_key - self.aws_secret_key = aws_secret_key + self.aws_connect_params = aws_connect_params self.region = region self.changed = False @@ -271,11 +249,8 @@ class ElbManager(object): def _get_elb_connection(self): try: - endpoint = "elasticloadbalancing.%s.amazonaws.com" % self.region - connect_region = RegionInfo(name=self.region, endpoint=endpoint) - return boto.ec2.elb.ELBConnection(self.aws_access_key, - self.aws_secret_key, - region=connect_region) + return connect_to_aws(boto.ec2.elb, self.region, + **self.aws_connect_params) except boto.exception.NoAuthHandlerFound, e: self.module.fail_json(msg=str(e)) @@ -479,9 +454,9 @@ def main(): argument_spec=argument_spec, ) - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") name = module.params['name'] state = module.params['state'] @@ -499,8 +474,8 @@ def main(): module.fail_json(msg="At least one availability zone is required for ELB creation") elb_man = ElbManager(module, name, listeners, purge_listeners, zones, - purge_zones, security_group_ids, health_check, aws_access_key, - aws_secret_key, region=region) + purge_zones, security_group_ids, health_check, + region=region, **aws_connect_params) if state == 'present': elb_man.ensure_ok() From 80cd217eb7b176eb3019bab30ca870a2fd3504c5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Apr 2014 08:52:32 -0400 Subject: [PATCH 736/772] simple random filter --- docsite/rst/playbooks_variables.rst | 21 +++++++++++++++++++++ lib/ansible/runner/filter_plugins/core.py | 8 ++++++++ 2 files changed, 29 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 18aa31dcf30..92f77fa1703 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -234,6 +234,27 @@ be used. The default is ``False``, and if set as ``True`` will use more strict {{ sample_version_var | version_compare('1.0', operator='lt', strict=True) }} +.. _random_filter + +Random Number Filter +-------------------------- + +.. versionadded:: 1.6 + +To get a random number from 0 to supplied end:: + + {{ 59 |random}} * * * * root /script/from/cron + +Get a random number from 0 to 100 but in steps of 10:: + + {{ 100 |random(step=10) }} => 70 + +Get a random number from 1 to 100 but in steps of 10:: + + {{ 100 |random(1, 10) }} => 31 + {{ 100 |random(start=1, step=10) }} => 51 + + .. _other_useful_filters: Other Useful Filters diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index 8bad776cbe8..8557a42c072 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -27,6 +27,7 @@ import operator as py_operator from ansible import errors from ansible.utils import md5s from distutils.version import LooseVersion, StrictVersion +from random import SystemRandom def to_nice_yaml(*a, **kw): '''Make verbose, human readable yaml''' @@ -180,6 +181,10 @@ def version_compare(value, version, operator='eq', strict=False): except Exception, e: raise errors.AnsibleFilterError('Version comparison: %s' % e) +def rand(end, start=0, step=1): + r = SystemRandom() + return r.randrange(start, end, step) + class FilterModule(object): ''' Ansible core jinja2 filters ''' @@ -245,5 +250,8 @@ class FilterModule(object): # version comparison 'version_compare': version_compare, + + # random numbers + 'random': rand, } From 4eb626825be313f68d5cb94c5f5a9440534da829 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 10 Apr 2014 13:43:59 -0400 Subject: [PATCH 737/772] Refactor play._update_vars_files_for_host to make common functions and easier debugging Fixes #4883 --- lib/ansible/playbook/play.py | 103 ++++++++++++++++++++--------------- 1 file changed, 60 insertions(+), 43 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 13fd0e471bf..386170be78c 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -130,7 +130,6 @@ class Play(object): self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) self.su = ds.get('su', self.playbook.su) self.su_user = ds.get('su_user', self.playbook.su_user) - #self.vault_password = vault_password # gather_facts is not a simple boolean, as None means that a 'smart' # fact gathering mode will be used, so we need to be careful here as @@ -763,45 +762,81 @@ class Play(object): def _update_vars_files_for_host(self, host, vault_password=None): + def generate_filenames(host, inject, filename): + + """ Render the raw filename into 3 forms """ + + filename2 = template(self.basedir, filename, self.vars) + filename3 = filename2 + if host is not None: + filename3 = template(self.basedir, filename2, inject) + if self._has_vars_in(filename3) and host is not None: + # allow play scoped vars and host scoped vars to template the filepath + inject.update(self.vars) + filename4 = template(self.basedir, filename3, inject) + filename4 = utils.path_dwim(self.basedir, filename4) + else: + filename4 = utils.path_dwim(self.basedir, filename3) + return filename2, filename3, filename4 + + + def update_vars_cache(host, inject, data, filename): + + """ update a host's varscache with new var data """ + + data = utils.combine_vars(inject, data) + self.playbook.VARS_CACHE[host].update(data) + self.playbook.callbacks.on_import_for_host(host, filename4) + + def process_files(filename, filename2, filename3, filename4, host=None): + + """ pseudo-algorithm for deciding where new vars should go """ + + data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password) + if data: + if type(data) != dict: + raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4) + if host is not None: + if self._has_vars_in(filename2) and not self._has_vars_in(filename3): + # running a host specific pass and has host specific variables + # load into setup cache + update_vars_cache(host, inject, data, filename4) + elif self._has_vars_in(filename3) and not self._has_vars_in(filename4): + # handle mixed scope variables in filepath + update_vars_cache(host, inject, data, filename4) + + elif not self._has_vars_in(filename4): + # found a non-host specific variable, load into vars and NOT + # the setup cache + if host is not None: + self.vars.update(data) + else: + self.vars = utils.combine_vars(self.vars, data) + + # Enforce that vars_files is always a list if type(self.vars_files) != list: self.vars_files = [ self.vars_files ] + # Build an inject if this is a host run started by self.update_vars_files if host is not None: inject = {} inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password)) inject.update(self.playbook.SETUP_CACHE.get(host, {})) inject.update(self.playbook.VARS_CACHE.get(host, {})) + else: + inject = None for filename in self.vars_files: - if type(filename) == list: - - # loop over all filenames, loading the first one, and failing if # none found + # loop over all filenames, loading the first one, and failing if none found found = False sequence = [] for real_filename in filename: - filename2 = template(self.basedir, real_filename, self.vars) - filename3 = filename2 - if host is not None: - filename3 = template(self.basedir, filename2, inject) - filename4 = utils.path_dwim(self.basedir, filename3) + filename2, filename3, filename4 = generate_filenames(host, inject, real_filename) sequence.append(filename4) if os.path.exists(filename4): found = True - data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password) - if type(data) != dict: - raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4) - if host is not None: - if self._has_vars_in(filename2) and not self._has_vars_in(filename3): - # this filename has variables in it that were fact specific - # so it needs to be loaded into the per host VARS_CACHE - data = utils.combine_vars(inject, data) - self.playbook.VARS_CACHE[host].update(data) - self.playbook.callbacks.on_import_for_host(host, filename4) - elif not self._has_vars_in(filename4): - # found a non-host specific variable, load into vars and NOT - # the setup cache - self.vars.update(data) + process_files(filename, filename2, filename3, filename4, host=host) elif host is not None: self.playbook.callbacks.on_not_import_for_host(host, filename4) if found: @@ -813,28 +848,10 @@ class Play(object): else: # just one filename supplied, load it! - - filename2 = template(self.basedir, filename, self.vars) - filename3 = filename2 - if host is not None: - filename3 = template(self.basedir, filename2, inject) - filename4 = utils.path_dwim(self.basedir, filename3) + filename2, filename3, filename4 = generate_filenames(host, inject, filename) if self._has_vars_in(filename4): continue - new_vars = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password) - if new_vars: - if type(new_vars) != dict: - raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename4, type(new_vars))) - if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3): - # running a host specific pass and has host specific variables - # load into setup cache - new_vars = utils.combine_vars(inject, new_vars) - self.playbook.VARS_CACHE[host] = utils.combine_vars( - self.playbook.VARS_CACHE[host], new_vars) - self.playbook.callbacks.on_import_for_host(host, filename4) - elif host is None: - # running a non-host specific pass and we can update the global vars instead - self.vars = utils.combine_vars(self.vars, new_vars) + process_files(filename, filename2, filename3, filename4, host=host) # finally, update the VARS_CACHE for the host, if it is set if host is not None: From d9d36ce207e8b0357ad2ee0010de2b72637372d8 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 10 Apr 2014 13:55:03 -0400 Subject: [PATCH 738/772] Add unit tests for vars_files --- test/units/TestPlayVarsFiles.py | 415 ++++++++++++++++++++++++++++++++ 1 file changed, 415 insertions(+) create mode 100644 test/units/TestPlayVarsFiles.py diff --git a/test/units/TestPlayVarsFiles.py b/test/units/TestPlayVarsFiles.py new file mode 100644 index 00000000000..cdfa48fe557 --- /dev/null +++ b/test/units/TestPlayVarsFiles.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python + +import os +import shutil +from tempfile import mkstemp +from tempfile import mkdtemp +from ansible.playbook.play import Play +import ansible + +import unittest +from nose.plugins.skip import SkipTest + + +class FakeCallBacks(object): + def __init__(self): + pass + def on_vars_prompt(self): + pass + def on_import_for_host(self, host, filename): + pass + +class FakeInventory(object): + def __init__(self): + self.hosts = {} + def basedir(self): + return "." + def get_variables(self, host, vault_password=None): + if host in self.hosts: + return self.hosts[host] + else: + return {} + +class FakePlayBook(object): + def __init__(self): + self.extra_vars = {} + self.remote_user = None + self.remote_port = None + self.sudo = None + self.sudo_user = None + self.su = None + self.su_user = None + self.transport = None + self.only_tags = None + self.skip_tags = None + self.VARS_CACHE = {} + self.SETUP_CACHE = {} + self.inventory = FakeInventory() + self.callbacks = FakeCallBacks() + + self.VARS_CACHE['localhost'] = {} + + +class TestMe(unittest.TestCase): + + ######################################## + # BASIC FILE LOADING BEHAVIOR TESTS + ######################################## + + def test_play_constructor(self): + # __init__(self, playbook, ds, basedir, vault_password=None) + playbook = FakePlayBook() + ds = { "hosts": "localhost"} + basedir = "." + play = Play(playbook, ds, basedir) + + def test_vars_file(self): + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # create a play with a vars_file + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": [temp_path]} + basedir = "." + play = Play(playbook, ds, basedir) + os.remove(temp_path) + + # make sure the variable was loaded + assert 'foo' in play.vars, "vars_file was not loaded into play.vars" + assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + + def test_vars_file_nonlist_error(self): + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # create a play with a string for vars_files + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": temp_path} + basedir = "." + error_hit = False + try: + play = Play(playbook, ds, basedir) + except: + error_hit = True + os.remove(temp_path) + + assert error_hit == True, "no error was thrown when vars_files was not a list" + + + def test_multiple_vars_files(self): + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # make a second vars file + fd, temp_path2 = mkstemp() + f = open(temp_path2, "wb") + f.write("baz: bang\n") + f.close() + + + # create a play with two vars_files + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": [temp_path, temp_path2]} + basedir = "." + play = Play(playbook, ds, basedir) + os.remove(temp_path) + os.remove(temp_path2) + + # make sure the variables were loaded + assert 'foo' in play.vars, "vars_file was not loaded into play.vars" + assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + assert 'baz' in play.vars, "vars_file2 was not loaded into play.vars" + assert play.vars['baz'] == 'bang', "baz was not set to bang in play.vars" + + def test_vars_files_first_found(self): + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # get a random file path + fd, temp_path2 = mkstemp() + # make sure this file doesn't exist + os.remove(temp_path2) + + # create a play + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": [[temp_path2, temp_path]]} + basedir = "." + play = Play(playbook, ds, basedir) + os.remove(temp_path) + + # make sure the variable was loaded + assert 'foo' in play.vars, "vars_file was not loaded into play.vars" + assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + + def test_vars_files_multiple_found(self): + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # make a second vars file + fd, temp_path2 = mkstemp() + f = open(temp_path2, "wb") + f.write("baz: bang\n") + f.close() + + # create a play + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": [[temp_path, temp_path2]]} + basedir = "." + play = Play(playbook, ds, basedir) + os.remove(temp_path) + os.remove(temp_path2) + + # make sure the variables were loaded + assert 'foo' in play.vars, "vars_file was not loaded into play.vars" + assert play.vars['foo'] == 'bar', "foo was not set to bar in play.vars" + assert 'baz' not in play.vars, "vars_file2 was loaded after vars_file1 was loaded" + + def test_vars_files_assert_all_found(self): + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # make a second vars file + fd, temp_path2 = mkstemp() + # make sure it doesn't exist + os.remove(temp_path2) + + # create a play + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": [temp_path, temp_path2]} + basedir = "." + + error_hit = False + error_msg = None + + try: + play = Play(playbook, ds, basedir) + except ansible.errors.AnsibleError, e: + error_hit = True + error_msg = e + + os.remove(temp_path) + assert error_hit == True, "no error was thrown for missing vars_file" + + + ######################################## + # VARIABLE PRECEDENCE TESTS + ######################################## + + # On the first run vars_files are loaded into play.vars by host == None + # * only files with vars from host==None will work here + # On the secondary run(s), a host is given and the vars_files are loaded into VARS_CACHE + # * this only occurs if host is not None, filename2 has vars in the name, and filename3 does not + + # filename -- the original string + # filename2 -- filename templated with play vars + # filename3 -- filename2 template with inject (hostvars + setup_cache + vars_cache) + # filename4 -- path_dwim(filename3) + + def test_vars_files_for_host(self): + + # host != None + # vars in filename2 + # no vars in filename3 + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # build play attributes + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": ["{{ temp_path }}"]} + basedir = "." + playbook.VARS_CACHE['localhost']['temp_path'] = temp_path + + # create play and do first run + play = Play(playbook, ds, basedir) + + # the second run is started by calling update_vars_files + play.update_vars_files(['localhost']) + os.remove(temp_path) + + assert 'foo' in play.playbook.VARS_CACHE['localhost'], "vars_file vars were not loaded into vars_cache" + assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', "foo does not equal bar" + + def test_vars_files_for_host_with_extra_vars(self): + + # host != None + # vars in filename2 + # no vars in filename3 + + # make a vars file + fd, temp_path = mkstemp() + f = open(temp_path, "wb") + f.write("foo: bar\n") + f.close() + + # build play attributes + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars_files": ["{{ temp_path }}"]} + basedir = "." + playbook.VARS_CACHE['localhost']['temp_path'] = temp_path + playbook.extra_vars = {"foo": "extra"} + + # create play and do first run + play = Play(playbook, ds, basedir) + + # the second run is started by calling update_vars_files + play.update_vars_files(['localhost']) + os.remove(temp_path) + + assert 'foo' in play.vars, "extra vars were not set in play.vars" + assert 'foo' in play.playbook.VARS_CACHE['localhost'], "vars_file vars were not loaded into vars_cache" + assert play.playbook.VARS_CACHE['localhost']['foo'] == 'extra', "extra vars did not overwrite vars_files vars" + + + ######################################## + # COMPLEX FILENAME TEMPLATING TESTS + ######################################## + + def test_vars_files_two_vars_in_name(self): + + # self.vars = ds['vars'] + # self.vars += _get_vars() ... aka extra_vars + + # make a temp dir + temp_dir = mkdtemp() + + # make a temp file + fd, temp_file = mkstemp(dir=temp_dir) + f = open(temp_file, "wb") + f.write("foo: bar\n") + f.close() + + # build play attributes + playbook = FakePlayBook() + ds = { "hosts": "localhost", + "vars": { "temp_dir": os.path.dirname(temp_file), + "temp_file": os.path.basename(temp_file) }, + "vars_files": ["{{ temp_dir + '/' + temp_file }}"]} + basedir = "." + + # create play and do first run + play = Play(playbook, ds, basedir) + + # cleanup + shutil.rmtree(temp_dir) + + assert 'foo' in play.vars, "double var templated vars_files filename not loaded" + + def test_vars_files_two_vars_different_scope(self): + + # + # Use a play var and an inventory var to create the filename + # + + # self.playbook.inventory.get_variables(host) + # {'group_names': ['ungrouped'], 'inventory_hostname': 'localhost', + # 'ansible_ssh_user': 'root', 'inventory_hostname_short': 'localhost'} + + # make a temp dir + temp_dir = mkdtemp() + + # make a temp file + fd, temp_file = mkstemp(dir=temp_dir) + f = open(temp_file, "wb") + f.write("foo: bar\n") + f.close() + + # build play attributes + playbook = FakePlayBook() + playbook.inventory.hosts['localhost'] = {'inventory_hostname': os.path.basename(temp_file)} + ds = { "hosts": "localhost", + "vars": { "temp_dir": os.path.dirname(temp_file)}, + "vars_files": ["{{ temp_dir + '/' + inventory_hostname }}"]} + basedir = "." + + # create play and do first run + play = Play(playbook, ds, basedir) + + # do the host run + play.update_vars_files(['localhost']) + + # cleanup + shutil.rmtree(temp_dir) + + assert 'foo' not in play.vars, \ + "mixed scope vars_file loaded into play vars" + assert 'foo' in play.playbook.VARS_CACHE['localhost'], \ + "differently scoped templated vars_files filename not loaded" + assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', \ + "foo is not bar" + + def test_vars_files_two_vars_different_scope_first_found(self): + + # + # Use a play var and an inventory var to create the filename + # + + # make a temp dir + temp_dir = mkdtemp() + + # make a temp file + fd, temp_file = mkstemp(dir=temp_dir) + f = open(temp_file, "wb") + f.write("foo: bar\n") + f.close() + + # build play attributes + playbook = FakePlayBook() + playbook.inventory.hosts['localhost'] = {'inventory_hostname': os.path.basename(temp_file)} + ds = { "hosts": "localhost", + "vars": { "temp_dir": os.path.dirname(temp_file)}, + "vars_files": [["{{ temp_dir + '/' + inventory_hostname }}"]]} + basedir = "." + + # create play and do first run + play = Play(playbook, ds, basedir) + + # do the host run + play.update_vars_files(['localhost']) + + # cleanup + shutil.rmtree(temp_dir) + + assert 'foo' not in play.vars, \ + "mixed scope vars_file loaded into play vars" + assert 'foo' in play.playbook.VARS_CACHE['localhost'], \ + "differently scoped templated vars_files filename not loaded" + assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', \ + "foo is not bar" + + From 3b06ab84e37bdcbcbd74780f0e1b070d6754c939 Mon Sep 17 00:00:00 2001 From: Felix Kaiser Date: Thu, 10 Apr 2014 21:14:42 +0200 Subject: [PATCH 739/772] Make test-module interpret --args='{...' as yaml --- hacking/test-module | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hacking/test-module b/hacking/test-module index 3f7a8a2d648..f293458ad4b 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -93,6 +93,10 @@ def boilerplate_module(modfile, args, interpreter): # Argument is a YAML file (JSON is a subset of YAML) complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:])) args='' + elif args.startswith("{"): + # Argument is a YAML document (not a file) + complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args)) + args='' inject = {} if interpreter: From dfdc3b82a02500c8431f24968c94fe99f2cecf86 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 10 Apr 2014 16:06:35 -0400 Subject: [PATCH 740/772] Fixes #6929 remote file module doc fragments from copy and template --- library/files/copy | 1 - library/files/template | 1 - 2 files changed, 2 deletions(-) diff --git a/library/files/copy b/library/files/copy index a8b4000f73e..08aa1d71a40 100644 --- a/library/files/copy +++ b/library/files/copy @@ -84,7 +84,6 @@ options: required: false version_added: "1.5" author: Michael DeHaan -extends_documentation_fragment: files.DOCUMENTATION notes: - The "copy" module recursively copy facility does not scale to lots (>hundreds) of files. For alternative, see synchronize module, which is a wrapper around rsync. diff --git a/library/files/template b/library/files/template index e7b9a502938..3c21f3f1170 100644 --- a/library/files/template +++ b/library/files/template @@ -55,7 +55,6 @@ notes: which changes the variable interpolation markers to [% var %] instead of {{ var }}. This is the best way to prevent evaluation of things that look like, but should not be Jinja2. raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated." requirements: [] -extends_documentation_fragment: files.DOCUMENTATION author: Michael DeHaan ''' From ca1f6db28d8d95eebbaab72c916c6f2b1dc1f21f Mon Sep 17 00:00:00 2001 From: Chris Petersen Date: Thu, 10 Apr 2014 22:28:27 -0700 Subject: [PATCH 741/772] Explain how to do multiple paths in roles_path Document that roles_path can support multiple colon-separated paths --- docsite/rst/intro_configuration.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 6304a0d350b..f37ba6012cd 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -439,6 +439,10 @@ choose to establish a convention to checkout roles in /opt/mysite/roles like so: roles_path = /opt/mysite/roles +Additional paths can be provided separated by colon characters, in the same way as other pathstrings:: + + roles_path = /opt/mysite/roles:/opt/othersite/roles + Roles will be first searched for in the playbook directory. Should a role not be found, it will indicate all the possible paths that were searched. From ebb33ef9d07a451bf07d80fca74ab376eb52863d Mon Sep 17 00:00:00 2001 From: sam-at-github Date: Fri, 11 Apr 2014 23:50:36 +1000 Subject: [PATCH 742/772] What particular script? Moved a sentence. Sentence refers to script, "this particular script" before it has been introduced. Moved sentence to after intro as it confused me. I though I'd missed something.. --- docsite/rst/intro_dynamic_inventory.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index e42da4bad8f..7eeb517b2f4 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -28,11 +28,11 @@ It is expected that many Ansible users with a reasonable amount of physical hard While primarily used to kickoff OS installations and manage DHCP and DNS, Cobbler has a generic layer that allows it to represent data for multiple configuration management systems (even at the same time), and has -been referred to as a 'lightweight CMDB' by some admins. This particular script will communicate with Cobbler -using Cobbler's XMLRPC API. +been referred to as a 'lightweight CMDB' by some admins. To tie Ansible's inventory to Cobbler (optional), copy `this script `_ to /etc/ansible and `chmod +x` the file. cobblerd will now need to be running when you are using Ansible and you'll need to use Ansible's ``-i`` command line option (e.g. ``-i /etc/ansible/cobbler.py``). +This particular script will communicate with Cobbler using Cobbler's XMLRPC API. First test the script by running ``/etc/ansible/cobbler.py`` directly. You should see some JSON data output, but it may not have anything in it just yet. From 42fbbd05026be1472fe1d5a0be9e5fbff0ff0621 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Fri, 11 Apr 2014 11:04:05 -0400 Subject: [PATCH 743/772] Addresses #6702 set version_added for rsync_opts --- library/files/synchronize | 1 + 1 file changed, 1 insertion(+) diff --git a/library/files/synchronize b/library/files/synchronize index 4080399ebf6..c3a09c42aaf 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -136,6 +136,7 @@ options: - Specify additional rsync options by passing in an array. (added in Ansible 1.6) default: required: false + version_added: "1.6" notes: - Inspect the verbose output to validate the destination user/host/path are what was expected. From 6a6ade830ef51f16478a198108fe237dc1eb59a2 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Fri, 11 Apr 2014 11:14:40 -0400 Subject: [PATCH 744/772] Added example demonstrating other functionality --- library/system/user | 3 +++ 1 file changed, 3 insertions(+) diff --git a/library/system/user b/library/system/user index 12b1a62d270..d33244dba54 100644 --- a/library/system/user +++ b/library/system/user @@ -181,6 +181,9 @@ EXAMPLES = ''' # Add the user 'johnd' with a specific uid and a primary group of 'admin' - user: name=johnd comment="John Doe" uid=1040 +# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups +- user: name=james shell=/bin/bash groups=admins,developers append=yes + # Remove the user 'johnd' - user: name=johnd state=absent remove=yes From b0556ab1fbc0652147b86a809cf8b69d4c3cd188 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Fri, 11 Apr 2014 11:18:05 -0400 Subject: [PATCH 745/772] Addresses #6702 Remove version_added from description string --- library/files/synchronize | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/files/synchronize b/library/files/synchronize index c3a09c42aaf..8d67ce9bac1 100644 --- a/library/files/synchronize +++ b/library/files/synchronize @@ -133,7 +133,7 @@ options: default: yes rsync_opts: description: - - Specify additional rsync options by passing in an array. (added in Ansible 1.6) + - Specify additional rsync options by passing in an array. default: required: false version_added: "1.6" From ae0cb241a20863f65c44ca72eb249d698028a9fb Mon Sep 17 00:00:00 2001 From: Dick Davies Date: Fri, 11 Apr 2014 16:48:51 +0100 Subject: [PATCH 746/772] fix typo --- docsite/rst/playbooks_environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_environment.rst b/docsite/rst/playbooks_environment.rst index 971765ab303..11334fdb2f0 100644 --- a/docsite/rst/playbooks_environment.rst +++ b/docsite/rst/playbooks_environment.rst @@ -23,7 +23,7 @@ The environment can also be stored in a variable, and accessed like so:: - hosts: all remote_user: root - # here we make a variable named "env" that is a dictionary + # here we make a variable named "proxy_env" that is a dictionary vars: proxy_env: http_proxy: http://proxy.example.com:8080 From 71b5a11735390a918d9db9fb8035120d00945994 Mon Sep 17 00:00:00 2001 From: Matt Bray Date: Fri, 11 Apr 2014 17:08:55 +0100 Subject: [PATCH 747/772] docker: from API 1.10 dns and volumes_from should be passed to start() --- library/cloud/docker | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/library/cloud/docker b/library/cloud/docker index a6af36c4f48..3fb82fd7dc5 100644 --- a/library/cloud/docker +++ b/library/cloud/docker @@ -301,6 +301,7 @@ import sys from urlparse import urlparse try: import docker.client + import docker.utils from requests.exceptions import * except ImportError, e: HAS_DOCKER_PY = False @@ -524,10 +525,8 @@ class DockerManager: 'command': self.module.params.get('command'), 'ports': self.exposed_ports, 'volumes': self.volumes, - 'volumes_from': self.module.params.get('volumes_from'), 'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')), 'environment': self.env, - 'dns': self.module.params.get('dns'), 'hostname': self.module.params.get('hostname'), 'detach': self.module.params.get('detach'), 'name': self.module.params.get('name'), @@ -535,6 +534,10 @@ class DockerManager: 'tty': self.module.params.get('tty'), } + if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0: + params['dns'] = self.module.params.get('dns') + params['volumes_from'] = self.module.params.get('volumes_from') + def do_create(count, params): results = [] for _ in range(count): @@ -562,6 +565,11 @@ class DockerManager: 'privileged': self.module.params.get('privileged'), 'links': self.links, } + + if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0: + params['dns'] = self.module.params.get('dns') + params['volumes_from'] = self.module.params.get('volumes_from') + for i in containers: self.client.start(i['Id'], **params) self.increment_counter('started') From db92301b0005e95312ab1e45db049f846d8a9e7a Mon Sep 17 00:00:00 2001 From: "Michael J. Schultz" Date: Fri, 11 Apr 2014 18:23:53 -0500 Subject: [PATCH 748/772] cloud/elasticache: Ensure self.data is iterable - self.data['SecurityGroups'] can return None causing traceback, if that is the case fallback to an empty list --- library/cloud/elasticache | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/library/cloud/elasticache b/library/cloud/elasticache index 54bf734d204..8c82f2fcc20 100644 --- a/library/cloud/elasticache +++ b/library/cloud/elasticache @@ -395,7 +395,8 @@ class ElastiCacheManager(object): # check vpc security groups vpc_security_groups = [] - for sg in self.data['SecurityGroups']: + security_groups = self.data['SecurityGroups'] or [] + for sg in security_groups: vpc_security_groups.append(sg['SecurityGroupId']) if set(vpc_security_groups) - set(self.security_group_ids): return True From 21a2d32ffc2a535720908c545fb2b75e181f32a1 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Sat, 12 Apr 2014 15:57:59 -0400 Subject: [PATCH 749/772] Fix empty group in rax.py output --- plugins/inventory/rax.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/rax.py b/plugins/inventory/rax.py index 039233005d7..457c20962a6 100755 --- a/plugins/inventory/rax.py +++ b/plugins/inventory/rax.py @@ -168,7 +168,8 @@ def _list(regions): groups[group].append(server.name) for extra_group in server.metadata.get('groups', '').split(','): - groups[extra_group].append(server.name) + if extra_group: + groups[extra_group].append(server.name) # Add host metadata for key, value in to_dict(server).items(): From a6c00989d1a9e88e074f2deefe7a5a33461b7ace Mon Sep 17 00:00:00 2001 From: Nick Zaccardi Date: Sat, 12 Apr 2014 21:26:08 -0400 Subject: [PATCH 750/772] Add reference to the join filter. --- docsite/rst/playbooks_variables.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 92f77fa1703..ce70daf54ff 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -260,6 +260,10 @@ Get a random number from 1 to 100 but in steps of 10:: Other Useful Filters -------------------- +To concatenate a list into a string:: + + {{ list | join(" ") }} + To get the last name of a file path, like 'foo.txt' out of '/etc/asdf/foo.txt':: {{ path | basename }} From 7cb4b7ab896a11b987bdf6ed9bcc327a99c0ac3f Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Mon, 14 Apr 2014 01:22:18 +0300 Subject: [PATCH 751/772] Fix typos in ansible-playbook man page source Note: the man page needs to be rebuilt. --- docs/man/man1/ansible-playbook.1.asciidoc.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index a1ef2391930..23fe37a2c0b 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -76,11 +76,11 @@ access, if any. Desired sudo user (default=root). -*-t*, 'TAGS', *'--tags=*'TAGS':: +*-t*, 'TAGS', *--tags=*'TAGS':: Only run plays and tasks tagged with these values. -*'--skip-tags=*'SKIP_TAGS':: +*--skip-tags=*'SKIP_TAGS':: Only run plays and tasks whose tags do not match these values. From db36c41c3bfaf4e147a73740a8507a2a2c152246 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 14 Apr 2014 13:18:02 -0400 Subject: [PATCH 752/772] Addresses #6908 Strip usernames and passwords from apt_repository filenames --- library/packaging/apt_repository | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index af51618e682..928b6df0608 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -133,12 +133,22 @@ class SourcesList(object): def _suggest_filename(self, line): def _cleanup_filename(s): return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split()) + def _strip_username_password(s): + if '@' in s: + s = s.split('@', 1) + s = s[-1] + return s # Drop options and protocols. line = re.sub('\[[^\]]+\]', '', line) line = re.sub('\w+://', '', line) + # split line into valid keywords parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES] + + # Drop usernames and passwords + parts[0] = _strip_username_password(parts[0]) + return '%s.list' % _cleanup_filename(' '.join(parts[:1])) def _parse(self, line, raise_if_invalid_or_disabled=False): @@ -358,6 +368,7 @@ def main(): install_python_apt(module) repo = module.params['repo'] + open("/tmp/awx.log", "a").write("repo: %s\n" % repo) state = module.params['state'] update_cache = module.params['update_cache'] sourceslist = None From 2cc4ac2e7563e6b73666048c3624bf092f9959e1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Apr 2014 12:36:54 -0500 Subject: [PATCH 753/772] Catch traceback caused by permissions errors during a local md5 sum Fixes #6948 --- lib/ansible/utils/__init__.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 4abcd2cd82b..ff73e0629a5 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -590,12 +590,15 @@ def md5(filename): return None digest = _md5() blocksize = 64 * 1024 - infile = open(filename, 'rb') - block = infile.read(blocksize) - while block: - digest.update(block) + try: + infile = open(filename, 'rb') block = infile.read(blocksize) - infile.close() + while block: + digest.update(block) + block = infile.read(blocksize) + infile.close() + except IOError, e: + raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) return digest.hexdigest() def default(value, function): From 28cb4756c4077dfd5c071c189815d87ea2713b13 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 14 Apr 2014 16:02:08 -0400 Subject: [PATCH 754/772] Remove debug line --- library/packaging/apt_repository | 1 - 1 file changed, 1 deletion(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index 928b6df0608..5dfc74b5171 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -368,7 +368,6 @@ def main(): install_python_apt(module) repo = module.params['repo'] - open("/tmp/awx.log", "a").write("repo: %s\n" % repo) state = module.params['state'] update_cache = module.params['update_cache'] sourceslist = None From 6ec0e25d1adc1e5611817b898173ef138b61a5b4 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Mon, 14 Apr 2014 16:42:23 -0400 Subject: [PATCH 755/772] Addresses #6908 Add a mode parameter to apt_repository --- library/packaging/apt_repository | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/library/packaging/apt_repository b/library/packaging/apt_repository index 5dfc74b5171..a0d3b89e739 100644 --- a/library/packaging/apt_repository +++ b/library/packaging/apt_repository @@ -43,6 +43,12 @@ options: default: "present" description: - A source string state. + mode: + required: false + default: 0644 + description: + - The octal mode for newly created files in sources.list.d + version_added: "1.6" update_cache: description: - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes. @@ -217,7 +223,10 @@ class SourcesList(object): if sources: d, fn = os.path.split(filename) fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) - os.chmod(os.path.join(fd, tmp_path), 0644) + + # allow the user to override the default mode + this_mode = module.params['mode'] + module.set_mode_if_different(tmp_path, this_mode, False) f = os.fdopen(fd, 'w') for n, valid, enabled, source, comment in sources: @@ -356,6 +365,7 @@ def main(): argument_spec=dict( repo=dict(required=True), state=dict(choices=['present', 'absent'], default='present'), + mode=dict(required=False, default=0644), update_cache = dict(aliases=['update-cache'], type='bool', default='yes'), # this should not be needed, but exists as a failsafe install_python_apt=dict(required=False, default="yes", type='bool'), From 40b958e348e0b37d585979bf537a25be231ebaa6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Apr 2014 16:35:16 -0500 Subject: [PATCH 756/772] Don't turn complex args back into a k=v string for the synchronize module Fixes #6951 --- lib/ansible/runner/action_plugins/synchronize.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py index 63331760e44..42432d4fcb1 100644 --- a/lib/ansible/runner/action_plugins/synchronize.py +++ b/lib/ansible/runner/action_plugins/synchronize.py @@ -198,14 +198,12 @@ class ActionModule(object): if rsync_path: options['rsync_path'] = '"' + rsync_path + '"' - module_items = ' '.join(['%s=%s' % (k, v) for (k, - v) in options.items()]) - + module_args = "" if self.runner.noop_on_check(inject): - module_items += " CHECKMODE=True" + module_args = "CHECKMODE=True" # run the module and store the result - result = self.runner._execute_module(conn, tmp, 'synchronize', module_items, inject=inject) + result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject) # reset the sudo property self.runner.sudo = self.original_sudo From bdbb3bb4a1e969090fa6575c7e2530ce6448eafb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Apr 2014 21:13:14 -0500 Subject: [PATCH 757/772] Fixing unit tests for synchronize due to earlier changes --- test/units/TestSynchronize.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py index dfb1a129e5a..c6fa31bf9c6 100644 --- a/test/units/TestSynchronize.py +++ b/test/units/TestSynchronize.py @@ -19,12 +19,20 @@ class FakeRunner(object): self.private_key_file = None self.check = False - def _execute_module(self, conn, tmp, module_name, args, inject=None): + def _execute_module(self, conn, tmp, module_name, args, + async_jid=None, async_module=None, async_limit=None, inject=None, + persist_files=False, complex_args=None, delete_remote_tmp=True): self.executed_conn = conn self.executed_tmp = tmp self.executed_module_name = module_name self.executed_args = args + self.executed_async_jid = async_jid + self.executed_async_module = async_module + self.executed_async_limit = async_limit self.executed_inject = inject + self.executed_persist_files = persist_files + self.executed_complex_args = complex_args + self.executed_delete_remote_tmp = delete_remote_tmp def noop_on_check(self, inject): return self.check @@ -60,7 +68,7 @@ class TestSynchronize(unittest.TestCase): x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject) assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" - assert runner.executed_args == "dest=root@el6.lab.net:/tmp/bar src=/tmp/foo", "wrong args used" + assert runner.executed_complex_args == {"dest":"root@el6.lab.net:/tmp/bar", "src":"/tmp/foo"}, "wrong args used" assert runner.sudo == None, "sudo was not reset to None" def test_synchronize_action_sudo(self): @@ -86,8 +94,9 @@ class TestSynchronize(unittest.TestCase): x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject) assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" - assert runner.executed_args == 'dest=root@el6.lab.net:/tmp/bar src=/tmp/foo rsync_path="sudo rsync"', \ - "wrong args used: %s" % runner.executed_args + assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', + 'src':'/tmp/foo', + 'rsync_path':'"sudo rsync"'}, "wrong args used" assert runner.sudo == True, "sudo was not reset to True" @@ -117,9 +126,9 @@ class TestSynchronize(unittest.TestCase): assert runner.transport == "paramiko", "runner transport was changed" assert runner.remote_user == "jtanner", "runner remote_user was changed" assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" - assert "dest_port" not in runner.executed_args, "dest_port should not have been set" - assert "src=/tmp/foo" in runner.executed_args, "source was set incorrectly" - assert "dest=/tmp/bar" in runner.executed_args, "dest was set incorrectly" + assert "dest_port" not in runner.executed_complex_args, "dest_port should not have been set" + assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly" + assert runner.executed_complex_args.get("dest") == "/tmp/bar", "dest was set incorrectly" def test_synchronize_action_vagrant(self): @@ -158,7 +167,7 @@ class TestSynchronize(unittest.TestCase): assert runner.remote_user == "jtanner", "runner remote_user was changed" assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" assert runner.executed_inject['ansible_ssh_user'] == "vagrant", "runner user was changed" - assert "dest_port=2222" in runner.executed_args, "remote port was not set to 2222" - assert "src=/tmp/foo" in runner.executed_args, "source was set incorrectly" - assert "dest=vagrant@127.0.0.1:/tmp/bar" in runner.executed_args, "dest was set incorrectly" + assert runner.executed_complex_args.get("dest_port") == "2222", "remote port was not set to 2222" + assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly" + assert runner.executed_complex_args.get("dest") == "vagrant@127.0.0.1:/tmp/bar", "dest was set incorrectly" From 3384021fe61a60804af6dc4b4fca6c24704ae115 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20RICARD?= Date: Tue, 15 Apr 2014 16:41:27 +0200 Subject: [PATCH 758/772] Fixed test on 'cached' parameter. 'cached' parameter is boolean, not string --- library/packaging/pkgng | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/packaging/pkgng b/library/packaging/pkgng index b8893b27ce6..a1f443fd4e1 100644 --- a/library/packaging/pkgng +++ b/library/packaging/pkgng @@ -149,7 +149,7 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite): else: pkgsite = "-r %s" % (pkgsite) - if not module.check_mode and cached == "no": + if not module.check_mode and not cached: if old_pkgng: rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) else: From aad128bac7918230f318308ec7f5a1623bfa1fae Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 15 Apr 2014 10:54:43 -0500 Subject: [PATCH 759/772] Use combined cache for templating module vars Fixes #6901 --- lib/ansible/runner/__init__.py | 6 +- lib/ansible/runner/__init__.py.orig | 1224 +++++++++++++++++++++++++++ 2 files changed, 1228 insertions(+), 2 deletions(-) create mode 100644 lib/ansible/runner/__init__.py.orig diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 859e04667de..f72d5399e7a 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -552,12 +552,14 @@ class Runner(object): # fireball, local, etc port = self.remote_port - module_vars = template.template(self.basedir, self.module_vars, host_variables) - # merge the VARS and SETUP caches for this host combined_cache = self.setup_cache.copy() combined_cache.get(host, {}).update(self.vars_cache.get(host, {})) + # use combined_cache and host_variables to template the module_vars + module_vars_inject = utils.combine_vars(combined_cache.get(host, {}), host_variables) + module_vars = template.template(self.basedir, self.module_vars, module_vars_inject) + inject = {} inject = utils.combine_vars(inject, self.default_vars) inject = utils.combine_vars(inject, host_variables) diff --git a/lib/ansible/runner/__init__.py.orig b/lib/ansible/runner/__init__.py.orig new file mode 100644 index 00000000000..b1285af2517 --- /dev/null +++ b/lib/ansible/runner/__init__.py.orig @@ -0,0 +1,1224 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import multiprocessing +import signal +import os +import pwd +import Queue +import random +import traceback +import tempfile +import time +import collections +import socket +import base64 +import sys +import shlex +import pipes +import jinja2 +import subprocess + +import ansible.constants as C +import ansible.inventory +from ansible import utils +from ansible.utils import template +from ansible.utils import check_conditional +from ansible.utils import string_functions +from ansible import errors +from ansible import module_common +import poller +import connection +from return_data import ReturnData +from ansible.callbacks import DefaultRunnerCallbacks, vv +from ansible.module_common import ModuleReplacer + +module_replacer = ModuleReplacer(strip_comments=False) + +HAS_ATFORK=True +try: + from Crypto.Random import atfork +except ImportError: + HAS_ATFORK=False + +multiprocessing_runner = None + +OUTPUT_LOCKFILE = tempfile.TemporaryFile() +PROCESS_LOCKFILE = tempfile.TemporaryFile() + +################################################ + +def _executor_hook(job_queue, result_queue, new_stdin): + + # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17 + # this function also not present in CentOS 6 + if HAS_ATFORK: + atfork() + + signal.signal(signal.SIGINT, signal.SIG_IGN) + while not job_queue.empty(): + try: + host = job_queue.get(block=False) + return_data = multiprocessing_runner._executor(host, new_stdin) + result_queue.put(return_data) + + if 'LEGACY_TEMPLATE_WARNING' in return_data.flags: + # pass data back up across the multiprocessing fork boundary + template.Flags.LEGACY_TEMPLATE_WARNING = True + + except Queue.Empty: + pass + except: + traceback.print_exc() + +class HostVars(dict): + ''' A special view of vars_cache that adds values from the inventory when needed. ''' + + def __init__(self, vars_cache, inventory): + self.vars_cache = vars_cache + self.inventory = inventory + self.lookup = dict() + self.update(vars_cache) + + def __getitem__(self, host): + if host not in self.lookup: + result = self.inventory.get_variables(host) + result.update(self.vars_cache.get(host, {})) + self.lookup[host] = result + return self.lookup[host] + + +class Runner(object): + ''' core API interface to ansible ''' + + # see bin/ansible for how this is used... + + def __init__(self, + host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage + module_path=None, # ex: /usr/share/ansible + module_name=C.DEFAULT_MODULE_NAME, # ex: copy + module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b" + forks=C.DEFAULT_FORKS, # parallelism level + timeout=C.DEFAULT_TIMEOUT, # SSH timeout + pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org' + remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username' + remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key + remote_port=None, # if SSH on different ports + private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords + sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None + background=0, # async poll every X seconds, else 0 for non-async + basedir=None, # directory of playbook, if applicable + setup_cache=None, # used to share fact data w/ other tasks + vars_cache=None, # used to store variables about hosts + transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local' + conditional='True', # run only if this fact expression evals to true + callbacks=None, # used for output + sudo=False, # whether to run sudo or not + sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' + module_vars=None, # a playbooks internals thing + default_vars=None, # ditto + is_playbook=False, # running from playbook or not? + inventory=None, # reference to Inventory object + subset=None, # subset pattern + check=False, # don't make any changes, just try to probe for potential changes + diff=False, # whether to show diffs for template files that change + environment=None, # environment variables (as dict) to use inside the command + complex_args=None, # structured data in addition to module_args, must be a dict + error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False + accelerate=False, # use accelerated connection + accelerate_ipv6=False, # accelerated connection w/ IPv6 + accelerate_port=None, # port to use with accelerated connection + su=False, # Are we running our command via su? + su_user=None, # User to su to when running command, ex: 'root' + su_pass=C.DEFAULT_SU_PASS, + vault_pass=None, + run_hosts=None, # an optional list of pre-calculated hosts to run on + no_log=False, # option to enable/disable logging for a given task + ): + + # used to lock multiprocess inputs and outputs at various levels + self.output_lockfile = OUTPUT_LOCKFILE + self.process_lockfile = PROCESS_LOCKFILE + + if not complex_args: + complex_args = {} + + # storage & defaults + self.check = check + self.diff = diff + self.setup_cache = utils.default(setup_cache, lambda: collections.defaultdict(dict)) + self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict)) + self.basedir = utils.default(basedir, lambda: os.getcwd()) + self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks()) + self.generated_jid = str(random.randint(0, 999999999999)) + self.transport = transport + self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list)) + + self.module_vars = utils.default(module_vars, lambda: {}) + self.default_vars = utils.default(default_vars, lambda: {}) + self.always_run = None + self.connector = connection.Connection(self) + self.conditional = conditional + self.module_name = module_name + self.forks = int(forks) + self.pattern = pattern + self.module_args = module_args + self.timeout = timeout + self.remote_user = remote_user + self.remote_pass = remote_pass + self.remote_port = remote_port + self.private_key_file = private_key_file + self.background = background + self.sudo = sudo + self.sudo_user_var = sudo_user + self.sudo_user = None + self.sudo_pass = sudo_pass + self.is_playbook = is_playbook + self.environment = environment + self.complex_args = complex_args + self.error_on_undefined_vars = error_on_undefined_vars + self.accelerate = accelerate + self.accelerate_port = accelerate_port + self.accelerate_ipv6 = accelerate_ipv6 + self.callbacks.runner = self + self.su = su + self.su_user_var = su_user + self.su_user = None + self.su_pass = su_pass + self.vault_pass = vault_pass + self.no_log = no_log + + if self.transport == 'smart': + # if the transport is 'smart' see if SSH can support ControlPersist if not use paramiko + # 'smart' is the default since 1.2.1/1.3 + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err: + self.transport = "paramiko" + else: + self.transport = "ssh" + + # save the original transport, in case it gets + # changed later via options like accelerate + self.original_transport = self.transport + + # misc housekeeping + if subset and self.inventory._subset is None: + # don't override subset when passed from playbook + self.inventory.subset(subset) + + # If we get a pre-built list of hosts to run on, from say a playbook, use them. + # Also where we will store the hosts to run on once discovered + self.run_hosts = run_hosts + + if self.transport == 'local': + self.remote_user = pwd.getpwuid(os.geteuid())[0] + + if module_path is not None: + for i in module_path.split(os.pathsep): + utils.plugins.module_finder.add_directory(i) + + utils.plugins.push_basedir(self.basedir) + + # ensure we are using unique tmp paths + random.seed() + # ***************************************************** + + def _complex_args_hack(self, complex_args, module_args): + """ + ansible-playbook both allows specifying key=value string arguments and complex arguments + however not all modules use our python common module system and cannot + access these. An example might be a Bash module. This hack allows users to still pass "args" + as a hash of simple scalars to those arguments and is short term. We could technically + just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented + it does mean values in 'args' have LOWER priority than those on the key=value line, allowing + args to provide yet another way to have pluggable defaults. + """ + if complex_args is None: + return module_args + if type(complex_args) != dict: + raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args) + for (k,v) in complex_args.iteritems(): + if isinstance(v, basestring): + module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) + return module_args + + # ***************************************************** + + def _transfer_str(self, conn, tmp, name, data): + ''' transfer string to remote file ''' + + if type(data) == dict: + data = utils.jsonify(data) + + afd, afile = tempfile.mkstemp() + afo = os.fdopen(afd, 'w') + try: + if not isinstance(data, unicode): + #ensure the data is valid UTF-8 + data.decode('utf-8') + else: + data = data.encode('utf-8') + afo.write(data) + except: + raise errors.AnsibleError("failure encoding into utf-8") + afo.flush() + afo.close() + + remote = os.path.join(tmp, name) + try: + conn.put_file(afile, remote) + finally: + os.unlink(afile) + return remote + + # ***************************************************** + + def _compute_environment_string(self, inject=None): + ''' what environment variables to use when running the command? ''' + + if not self.environment: + return "" + enviro = template.template(self.basedir, self.environment, inject, convert_bare=True) + enviro = utils.safe_eval(enviro) + if type(enviro) != dict: + raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro) + result = "" + for (k,v) in enviro.iteritems(): + result = "%s=%s %s" % (k, pipes.quote(str(v)), result) + return result + + # ***************************************************** + + def _compute_delegate(self, host, password, remote_inject): + + """ Build a dictionary of all attributes for the delegate host """ + + delegate = {} + + # allow delegated host to be templated + delegate['host'] = template.template(self.basedir, host, + remote_inject, fail_on_undefined=True) + + delegate['inject'] = remote_inject.copy() + + # set any interpreters + interpreters = [] + for i in delegate['inject']: + if i.startswith("ansible_") and i.endswith("_interpreter"): + interpreters.append(i) + for i in interpreters: + del delegate['inject'][i] + port = C.DEFAULT_REMOTE_PORT + + this_host = delegate['host'] + + # get the vars for the delegate by it's name + try: + this_info = delegate['inject']['hostvars'][this_host] + except: + # make sure the inject is empty for non-inventory hosts + this_info = {} + + # get the real ssh_address for the delegate + # and allow ansible_ssh_host to be templated + delegate['ssh_host'] = template.template(self.basedir, + this_info.get('ansible_ssh_host', this_host), + this_info, fail_on_undefined=True) + + delegate['port'] = this_info.get('ansible_ssh_port', port) + + delegate['user'] = self._compute_delegate_user(this_host, delegate['inject']) + + delegate['pass'] = this_info.get('ansible_ssh_pass', password) + delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', + self.private_key_file) + delegate['transport'] = this_info.get('ansible_connection', self.transport) + delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass) + + if delegate['private_key_file'] is not None: + delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file']) + + for i in this_info: + if i.startswith("ansible_") and i.endswith("_interpreter"): + delegate['inject'][i] = this_info[i] + + return delegate + + def _compute_delegate_user(self, host, inject): + + """ Caculate the remote user based on an order of preference """ + + # inventory > playbook > original_host + + actual_user = inject.get('ansible_ssh_user', self.remote_user) + thisuser = None + + if host in inject['hostvars']: + if inject['hostvars'][host].get('ansible_ssh_user'): + # user for delegate host in inventory + thisuser = inject['hostvars'][host].get('ansible_ssh_user') + + if thisuser is None and self.remote_user: + # user defined by play/runner + thisuser = self.remote_user + + if thisuser is not None: + actual_user = thisuser + else: + # fallback to the inventory user of the play host + #actual_user = inject.get('ansible_ssh_user', actual_user) + actual_user = inject.get('ansible_ssh_user', self.remote_user) + + return actual_user + + + # ***************************************************** + + def _execute_module(self, conn, tmp, module_name, args, + async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True): + + ''' transfer and run a module along with its arguments on the remote side''' + + # hack to support fireball mode + if module_name == 'fireball': + args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host)))) + if 'port' not in args: + args += " port=%s" % C.ZEROMQ_PORT + + ( + module_style, + shebang, + module_data + ) = self._configure_module(conn, module_name, args, inject, complex_args) + + # a remote tmp path may be necessary and not already created + if self._late_needs_tmp_path(conn, tmp, module_style): + tmp = self._make_tmp_path(conn) + + remote_module_path = os.path.join(tmp, module_name) + + if (module_style != 'new' + or async_jid is not None + or not conn.has_pipelining + or not C.ANSIBLE_SSH_PIPELINING + or C.DEFAULT_KEEP_REMOTE_FILES + or self.su): + self._transfer_str(conn, tmp, module_name, module_data) + + environment_string = self._compute_environment_string(inject) + + if tmp.find("tmp") != -1 and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): + # deal with possible umask issues once sudo'ed to other user + cmd_chmod = "chmod a+r %s" % remote_module_path + self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) + + cmd = "" + in_data = None + if module_style != 'new': + if 'CHECKMODE=True' in args: + # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to + # do --check mode, so to be safe we will not run it. + return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules")) + elif 'NO_LOG' in args: + return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules")) + + args = template.template(self.basedir, args, inject) + + # decide whether we need to transfer JSON or key=value + argsfile = None + if module_style == 'non_native_want_json': + if complex_args: + complex_args.update(utils.parse_kv(args)) + argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args)) + else: + argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args))) + + else: + argsfile = self._transfer_str(conn, tmp, 'arguments', args) + + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): + # deal with possible umask issues once sudo'ed to other user + cmd_args_chmod = "chmod a+r %s" % argsfile + self._low_level_exec_command(conn, cmd_args_chmod, tmp, sudoable=False) + + if async_jid is None: + cmd = "%s %s" % (remote_module_path, argsfile) + else: + cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]]) + else: + if async_jid is None: + if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.su: + in_data = module_data + else: + cmd = "%s" % (remote_module_path) + else: + cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]]) + + if not shebang: + raise errors.AnsibleError("module is missing interpreter line") + + + cmd = " ".join([environment_string.strip(), shebang.replace("#!","").strip(), cmd]) + cmd = cmd.strip() + + if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: + if not self.sudo or self.su or self.sudo_user == 'root' or self.su_user == 'root': + # not sudoing or sudoing to root, so can cleanup files in the same step + cmd = cmd + "; rm -rf %s >/dev/null 2>&1" % tmp + + sudoable = True + if module_name == "accelerate": + # always run the accelerate module as the user + # specified in the play, not the sudo_user + sudoable = False + + if self.su: + res = self._low_level_exec_command(conn, cmd, tmp, su=True, in_data=in_data) + else: + res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) + + if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): + # not sudoing to root, so maybe can't delete files as that other user + # have to clean up temp files as original user in a second step + cmd2 = "rm -rf %s >/dev/null 2>&1" % tmp + self._low_level_exec_command(conn, cmd2, tmp, sudoable=False) + + data = utils.parse_json(res['stdout']) + if 'parsed' in data and data['parsed'] == False: + data['msg'] += res['stderr'] + return ReturnData(conn=conn, result=data) + + # ***************************************************** + + def _executor(self, host, new_stdin): + ''' handler for multiprocessing library ''' + + def get_flags(): + # flags are a way of passing arbitrary event information + # back up the chain, since multiprocessing forks and doesn't + # allow state exchange + flags = [] + if template.Flags.LEGACY_TEMPLATE_WARNING: + flags.append('LEGACY_TEMPLATE_WARNING') + return flags + + try: + fileno = sys.stdin.fileno() + except ValueError: + fileno = None + + try: + if not new_stdin and fileno is not None: + self._new_stdin = os.fdopen(os.dup(fileno)) + else: + self._new_stdin = new_stdin + + exec_rc = self._executor_internal(host, new_stdin) + if type(exec_rc) != ReturnData: + raise Exception("unexpected return type: %s" % type(exec_rc)) + exec_rc.flags = get_flags() + # redundant, right? + if not exec_rc.comm_ok: + self.callbacks.on_unreachable(host, exec_rc.result) + return exec_rc + except errors.AnsibleError, ae: + msg = str(ae) + self.callbacks.on_unreachable(host, msg) + return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg), flags=get_flags()) + except Exception: + msg = traceback.format_exc() + self.callbacks.on_unreachable(host, msg) + return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg), flags=get_flags()) + + # ***************************************************** + + def _executor_internal(self, host, new_stdin): + ''' executes any module one or more times ''' + + host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) + host_connection = host_variables.get('ansible_connection', self.transport) + if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: + port = host_variables.get('ansible_ssh_port', self.remote_port) + if port is None: + port = C.DEFAULT_REMOTE_PORT + else: + # fireball, local, etc + port = self.remote_port + + module_vars = template.template(self.basedir, self.module_vars, host_variables) + + # merge the VARS and SETUP caches for this host + combined_cache = self.setup_cache.copy() + combined_cache.get(host, {}).update(self.vars_cache.get(host, {})) + + inject = {} + inject = utils.combine_vars(inject, self.default_vars) + inject = utils.combine_vars(inject, host_variables) + inject = utils.combine_vars(inject, module_vars) + inject = utils.combine_vars(inject, combined_cache.get(host, {})) + inject.setdefault('ansible_ssh_user', self.remote_user) + inject['hostvars'] = HostVars(combined_cache, self.inventory) + inject['group_names'] = host_variables.get('group_names', []) + inject['groups'] = self.inventory.groups_list() + inject['vars'] = self.module_vars + inject['defaults'] = self.default_vars + inject['environment'] = self.environment + inject['playbook_dir'] = self.basedir + + if self.inventory.basedir() is not None: + inject['inventory_dir'] = self.inventory.basedir() + + if self.inventory.src() is not None: + inject['inventory_file'] = self.inventory.src() + + # allow with_foo to work in playbooks... + items = None + items_plugin = self.module_vars.get('items_lookup_plugin', None) + + if items_plugin is not None and items_plugin in utils.plugins.lookup_loader: + + basedir = self.basedir + if '_original_file' in inject: + basedir = os.path.dirname(inject['_original_file']) + filesdir = os.path.join(basedir, '..', 'files') + if os.path.exists(filesdir): + basedir = filesdir + + items_terms = self.module_vars.get('items_lookup_terms', '') + items_terms = template.template(basedir, items_terms, inject) + items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) + if type(items) != list: + raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) + + if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng' ]: + # hack for apt, yum, and pkgng so that with_items maps back into a single module call + use_these_items = [] + for x in items: + inject['item'] = x + if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): + use_these_items.append(x) + inject['item'] = ",".join(use_these_items) + items = None + + # logic to replace complex args if possible + complex_args = self.complex_args + + # logic to decide how to run things depends on whether with_items is used + if items is None: + if isinstance(complex_args, basestring): + complex_args = template.template(self.basedir, complex_args, inject, convert_bare=True) + complex_args = utils.safe_eval(complex_args) + if type(complex_args) != dict: + raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) + return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args) + elif len(items) > 0: + + # executing using with_items, so make multiple calls + # TODO: refactor + + if self.background > 0: + raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks") + + aggregrate = {} + all_comm_ok = True + all_changed = False + all_failed = False + results = [] + for x in items: + # use a fresh inject for each item + this_inject = inject.copy() + this_inject['item'] = x + + # TODO: this idiom should be replaced with an up-conversion to a Jinja2 template evaluation + if isinstance(self.complex_args, basestring): + complex_args = template.template(self.basedir, self.complex_args, this_inject, convert_bare=True) + complex_args = utils.safe_eval(complex_args) + if type(complex_args) != dict: + raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) + result = self._executor_internal_inner( + host, + self.module_name, + self.module_args, + this_inject, + port, + complex_args=complex_args + ) + results.append(result.result) + if result.comm_ok == False: + all_comm_ok = False + all_failed = True + break + for x in results: + if x.get('changed') == True: + all_changed = True + if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]: + all_failed = True + break + msg = 'All items completed' + if all_failed: + msg = "One or more items failed." + rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg) + if not all_failed: + del rd_result['failed'] + return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result) + else: + self.callbacks.on_skipped(host, None) + return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True)) + + # ***************************************************** + + def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None): + ''' decides how to invoke a module ''' + + # late processing of parameterized sudo_user (with_items,..) + if self.sudo_user_var is not None: + self.sudo_user = template.template(self.basedir, self.sudo_user_var, inject) + if self.su_user_var is not None: + self.su_user = template.template(self.basedir, self.su_user_var, inject) + + # allow module args to work as a dictionary + # though it is usually a string + new_args = "" + if type(module_args) == dict: + for (k,v) in module_args.iteritems(): + new_args = new_args + "%s='%s' " % (k,v) + module_args = new_args + + # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }}) + module_name = template.template(self.basedir, module_name, inject) + + if module_name in utils.plugins.action_loader: + if self.background != 0: + raise errors.AnsibleError("async mode is not supported with the %s module" % module_name) + handler = utils.plugins.action_loader.get(module_name, self) + elif self.background == 0: + handler = utils.plugins.action_loader.get('normal', self) + else: + handler = utils.plugins.action_loader.get('async', self) + + if type(self.conditional) != list: + self.conditional = [ self.conditional ] + + for cond in self.conditional: + + if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): + result = utils.jsonify(dict(changed=False, skipped=True)) + self.callbacks.on_skipped(host, inject.get('item',None)) + return ReturnData(host=host, result=result) + + if getattr(handler, 'setup', None) is not None: + handler.setup(module_name, inject) + conn = None + actual_host = inject.get('ansible_ssh_host', host) + # allow ansible_ssh_host to be templated + actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True) + actual_port = port + actual_user = inject.get('ansible_ssh_user', self.remote_user) + actual_pass = inject.get('ansible_ssh_pass', self.remote_pass) + actual_transport = inject.get('ansible_connection', self.transport) + actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) + actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True) + self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) + self.su = inject.get('ansible_su', self.su) + self.su_pass = inject.get('ansible_su_pass', self.su_pass) + + if actual_private_key_file is not None: + actual_private_key_file = os.path.expanduser(actual_private_key_file) + + if self.accelerate and actual_transport != 'local': + #Fix to get the inventory name of the host to accelerate plugin + if inject.get('ansible_ssh_host', None): + self.accelerate_inventory_host = host + else: + self.accelerate_inventory_host = None + # if we're using accelerated mode, force the + # transport to accelerate + actual_transport = "accelerate" + if not self.accelerate_port: + self.accelerate_port = C.ACCELERATE_PORT + + if actual_transport in [ 'paramiko', 'ssh', 'accelerate' ]: + actual_port = inject.get('ansible_ssh_port', port) + + # the delegated host may have different SSH port configured, etc + # and we need to transfer those, and only those, variables + delegate_to = inject.get('delegate_to', None) + if delegate_to is not None: + delegate = self._compute_delegate(delegate_to, actual_pass, inject) + actual_transport = delegate['transport'] + actual_host = delegate['ssh_host'] + actual_port = delegate['port'] + actual_user = delegate['user'] + actual_pass = delegate['pass'] + actual_private_key_file = delegate['private_key_file'] + self.sudo_pass = delegate['sudo_pass'] + inject = delegate['inject'] + + # user/pass may still contain variables at this stage + actual_user = template.template(self.basedir, actual_user, inject) + actual_pass = template.template(self.basedir, actual_pass, inject) + + # make actual_user available as __magic__ ansible_ssh_user variable + inject['ansible_ssh_user'] = actual_user + + try: + if actual_transport == 'accelerate': + # for accelerate, we stuff both ports into a single + # variable so that we don't have to mangle other function + # calls just to accomodate this one case + actual_port = [actual_port, self.accelerate_port] + elif actual_port is not None: + actual_port = int(template.template(self.basedir, actual_port, inject)) + except ValueError, e: + result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port) + return ReturnData(host=host, comm_ok=False, result=result) + + try: + conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file) + if delegate_to or host != actual_host: + conn.delegate = host + + + except errors.AnsibleConnectionFailed, e: + result = dict(failed=True, msg="FAILED: %s" % str(e)) + return ReturnData(host=host, comm_ok=False, result=result) + + tmp = '' + # action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir + if self._early_needs_tmp_path(module_name, handler): + tmp = self._make_tmp_path(conn) + + # render module_args and complex_args templates + try: + module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars) + complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars) + except jinja2.exceptions.UndefinedError, e: + raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e)) + + + result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) + # Code for do until feature + until = self.module_vars.get('until', None) + if until is not None and result.comm_ok: + inject[self.module_vars.get('register')] = result.result + cond = template.template(self.basedir, until, inject, expand_lists=False) + if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): + retries = self.module_vars.get('retries') + delay = self.module_vars.get('delay') + for x in range(1, int(retries) + 1): + # template the delay, cast to float and sleep + delay = template.template(self.basedir, delay, inject, expand_lists=False) + delay = float(delay) + time.sleep(delay) + tmp = '' + if self._early_needs_tmp_path(module_name, handler): + tmp = self._make_tmp_path(conn) + result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) + result.result['attempts'] = x + vv("Result from run %i is: %s" % (x, result.result)) + inject[self.module_vars.get('register')] = result.result + cond = template.template(self.basedir, until, inject, expand_lists=False) + if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): + break + if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): + result.result['failed'] = True + result.result['msg'] = "Task failed as maximum retries was encountered" + else: + result.result['attempts'] = 0 + conn.close() + + if not result.comm_ok: + # connection or parsing errors... + self.callbacks.on_unreachable(host, result.result) + else: + data = result.result + + # https://github.com/ansible/ansible/issues/4958 + if hasattr(sys.stdout, "isatty"): + if "stdout" in data and sys.stdout.isatty(): + if not string_functions.isprintable(data['stdout']): + data['stdout'] = '' + + if 'item' in inject: + result.result['item'] = inject['item'] + + result.result['invocation'] = dict( + module_args=module_args, + module_name=module_name + ) + + changed_when = self.module_vars.get('changed_when') + failed_when = self.module_vars.get('failed_when') + if changed_when is not None or failed_when is not None: + register = self.module_vars.get('register') + if register is not None: + if 'stdout' in data: + data['stdout_lines'] = data['stdout'].splitlines() + inject[register] = data + if changed_when is not None: + data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) + if failed_when is not None: + data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) + + if is_chained: + # no callbacks + return result + if 'skipped' in data: + self.callbacks.on_skipped(host) + elif not result.is_successful(): + ignore_errors = self.module_vars.get('ignore_errors', False) + self.callbacks.on_failed(host, data, ignore_errors) + else: + if self.diff: + self.callbacks.on_file_diff(conn.host, result.diff) + self.callbacks.on_ok(host, data) + return result + + def _early_needs_tmp_path(self, module_name, handler): + ''' detect if a tmp path should be created before the handler is called ''' + if module_name in utils.plugins.action_loader: + return getattr(handler, 'TRANSFERS_FILES', False) + # other modules never need tmp path at early stage + return False + + def _late_needs_tmp_path(self, conn, tmp, module_style): + if tmp.find("tmp") != -1: + # tmp has already been created + return False + if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su: + # tmp is necessary to store module source code + return True + if not conn.has_pipelining: + # tmp is necessary to store the module source code + # or we want to keep the files on the target system + return True + if module_style != "new": + # even when conn has pipelining, old style modules need tmp to store arguments + return True + return False + + + # ***************************************************** + + def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, + executable=None, su=False, in_data=None): + ''' execute a command string over SSH, return the output ''' + + if executable is None: + executable = C.DEFAULT_EXECUTABLE + + sudo_user = self.sudo_user + su_user = self.su_user + + # compare connection user to (su|sudo)_user and disable if the same + if hasattr(conn, 'user'): + if conn.user == sudo_user or conn.user == su_user: + sudoable = False + su = False + + if su: + rc, stdin, stdout, stderr = conn.exec_command(cmd, + tmp, + su=su, + su_user=su_user, + executable=executable, + in_data=in_data) + else: + rc, stdin, stdout, stderr = conn.exec_command(cmd, + tmp, + sudo_user, + sudoable=sudoable, + executable=executable, + in_data=in_data) + + if type(stdout) not in [ str, unicode ]: + out = ''.join(stdout.readlines()) + else: + out = stdout + + if type(stderr) not in [ str, unicode ]: + err = ''.join(stderr.readlines()) + else: + err = stderr + + if rc is not None: + return dict(rc=rc, stdout=out, stderr=err) + else: + return dict(stdout=out, stderr=err) + + # ***************************************************** + + def _remote_md5(self, conn, tmp, path): + ''' takes a remote md5sum without requiring python, and returns 1 if no file ''' + + path = pipes.quote(path) + # The following test needs to be SH-compliant. BASH-isms will + # not work if /bin/sh points to a non-BASH shell. + test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1; [ -d \"%s\" ] && echo 3 && exit 0" % ((path,) * 3) + md5s = [ + "(/usr/bin/md5sum %s 2>/dev/null)" % path, # Linux + "(/sbin/md5sum -q %s 2>/dev/null)" % path, # ? + "(/usr/bin/digest -a md5 %s 2>/dev/null)" % path, # Solaris 10+ + "(/sbin/md5 -q %s 2>/dev/null)" % path, # Freebsd + "(/usr/bin/md5 -n %s 2>/dev/null)" % path, # Netbsd + "(/bin/md5 -q %s 2>/dev/null)" % path, # Openbsd + "(/usr/bin/csum -h MD5 %s 2>/dev/null)" % path, # AIX + "(/bin/csum -h MD5 %s 2>/dev/null)" % path # AIX also + ] + + cmd = " || ".join(md5s) + cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path) + data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) + data2 = utils.last_non_blank_line(data['stdout']) + try: + if data2 == '': + # this may happen if the connection to the remote server + # failed, so just return "INVALIDMD5SUM" to avoid errors + return "INVALIDMD5SUM" + else: + return data2.split()[0] + except IndexError: + sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n") + sys.stderr.write("command: %s\n" % md5s) + sys.stderr.write("----\n") + sys.stderr.write("output: %s\n" % data) + sys.stderr.write("----\n") + # this will signal that it changed and allow things to keep going + return "INVALIDMD5SUM" + + # ***************************************************** + + def _make_tmp_path(self, conn): + ''' make and return a temporary path on a remote box ''' + + basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) + basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile) + if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root') and basetmp.startswith('$HOME'): + basetmp = os.path.join('/tmp', basefile) + + cmd = 'mkdir -p %s' % basetmp + if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): + cmd += ' && chmod a+rx %s' % basetmp + cmd += ' && echo %s' % basetmp + + result = self._low_level_exec_command(conn, cmd, None, sudoable=False) + + # error handling on this seems a little aggressive? + if result['rc'] != 0: + if result['rc'] == 5: + output = 'Authentication failure.' + elif result['rc'] == 255 and self.transport in ['ssh']: + if utils.VERBOSITY > 3: + output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) + else: + output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue' + else: + output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc']) + if 'stdout' in result and result['stdout'] != '': + output = output + ": %s" % result['stdout'] + raise errors.AnsibleError(output) + + rc = utils.last_non_blank_line(result['stdout']).strip() + '/' + # Catch failure conditions, files should never be + # written to locations in /. + if rc == '/': + raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd)) + return rc + + # ***************************************************** + + def _remove_tmp_path(self, conn, tmp_path): + ''' Remove a tmp_path. ''' + + if "-tmp-" in tmp_path: + cmd = "rm -rf %s >/dev/null 2>&1" % tmp_path + self._low_level_exec_command(conn, cmd, None, sudoable=False) + # If we have gotten here we have a working ssh configuration. + # If ssh breaks we could leave tmp directories out on the remote system. + + # ***************************************************** + + def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None): + ''' transfer a module over SFTP, does not run it ''' + ( + module_style, + module_shebang, + module_data + ) = self._configure_module(conn, module_name, module_args, inject, complex_args) + module_remote_path = os.path.join(tmp, module_name) + + self._transfer_str(conn, tmp, module_name, module_data) + + return (module_remote_path, module_style, module_shebang) + + # ***************************************************** + + def _configure_module(self, conn, module_name, module_args, inject, complex_args=None): + ''' find module and configure it ''' + + # Search module path(s) for named module. + module_path = utils.plugins.module_finder.find_plugin(module_name) + if module_path is None: + raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) + + + # insert shared code and arguments into the module + (module_data, module_style, module_shebang) = module_replacer.modify_module( + module_path, complex_args, module_args, inject + ) + + return (module_style, module_shebang, module_data) + + + # ***************************************************** + + + def _parallel_exec(self, hosts): + ''' handles mulitprocessing when more than 1 fork is required ''' + + manager = multiprocessing.Manager() + job_queue = manager.Queue() + for host in hosts: + job_queue.put(host) + result_queue = manager.Queue() + + workers = [] + for i in range(self.forks): + new_stdin = os.fdopen(os.dup(sys.stdin.fileno())) + prc = multiprocessing.Process(target=_executor_hook, + args=(job_queue, result_queue, new_stdin)) + prc.start() + workers.append(prc) + + try: + for worker in workers: + worker.join() + except KeyboardInterrupt: + for worker in workers: + worker.terminate() + worker.join() + + results = [] + try: + while not result_queue.empty(): + results.append(result_queue.get(block=False)) + except socket.error: + raise errors.AnsibleError("") + return results + + # ***************************************************** + + def _partition_results(self, results): + ''' separate results by ones we contacted & ones we didn't ''' + + if results is None: + return None + results2 = dict(contacted={}, dark={}) + + for result in results: + host = result.host + if host is None: + raise Exception("internal error, host not set") + if result.communicated_ok(): + results2["contacted"][host] = result.result + else: + results2["dark"][host] = result.result + + # hosts which were contacted but never got a chance to return + for host in self.run_hosts: + if not (host in results2['dark'] or host in results2['contacted']): + results2["dark"][host] = {} + return results2 + + # ***************************************************** + + def run(self): + ''' xfer & run module on all matched hosts ''' + + # find hosts that match the pattern + if not self.run_hosts: + self.run_hosts = self.inventory.list_hosts(self.pattern) + hosts = self.run_hosts + if len(hosts) == 0: + self.callbacks.on_no_hosts() + return dict(contacted={}, dark={}) + + global multiprocessing_runner + multiprocessing_runner = self + results = None + + # Check if this is an action plugin. Some of them are designed + # to be ran once per group of hosts. Example module: pause, + # run once per hostgroup, rather than pausing once per each + # host. + p = utils.plugins.action_loader.get(self.module_name, self) + + if self.forks == 0 or self.forks > len(hosts): + self.forks = len(hosts) + + if p and getattr(p, 'BYPASS_HOST_LOOP', None): + + # Expose the current hostgroup to the bypassing plugins + self.host_set = hosts + # We aren't iterating over all the hosts in this + # group. So, just pick the first host in our group to + # construct the conn object with. + result_data = self._executor(hosts[0], None).result + # Create a ResultData item for each host in this group + # using the returned result. If we didn't do this we would + # get false reports of dark hosts. + results = [ ReturnData(host=h, result=result_data, comm_ok=True) \ + for h in hosts ] + del self.host_set + + elif self.forks > 1: + try: + results = self._parallel_exec(hosts) + except IOError, ie: + print ie.errno + if ie.errno == 32: + # broken pipe from Ctrl+C + raise errors.AnsibleError("interrupted") + raise + else: + results = [ self._executor(h, None) for h in hosts ] + + return self._partition_results(results) + + # ***************************************************** + + def run_async(self, time_limit): + ''' Run this module asynchronously and return a poller. ''' + + self.background = time_limit + results = self.run() + return results, poller.AsyncPoller(results, self) + + # ***************************************************** + + def noop_on_check(self, inject): + ''' Should the runner run in check mode or not ? ''' + + # initialize self.always_run on first call + if self.always_run is None: + self.always_run = self.module_vars.get('always_run', False) + self.always_run = check_conditional( + self.always_run, self.basedir, inject, fail_on_undefined=True) + + return (self.check and not self.always_run) From ba1a6c2e11d91511aec7e3dbb6325b0eb0b5c9f2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 15 Apr 2014 10:58:53 -0500 Subject: [PATCH 760/772] Removing .orig file merged in accidentally --- lib/ansible/runner/__init__.py.orig | 1224 --------------------------- 1 file changed, 1224 deletions(-) delete mode 100644 lib/ansible/runner/__init__.py.orig diff --git a/lib/ansible/runner/__init__.py.orig b/lib/ansible/runner/__init__.py.orig deleted file mode 100644 index b1285af2517..00000000000 --- a/lib/ansible/runner/__init__.py.orig +++ /dev/null @@ -1,1224 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import multiprocessing -import signal -import os -import pwd -import Queue -import random -import traceback -import tempfile -import time -import collections -import socket -import base64 -import sys -import shlex -import pipes -import jinja2 -import subprocess - -import ansible.constants as C -import ansible.inventory -from ansible import utils -from ansible.utils import template -from ansible.utils import check_conditional -from ansible.utils import string_functions -from ansible import errors -from ansible import module_common -import poller -import connection -from return_data import ReturnData -from ansible.callbacks import DefaultRunnerCallbacks, vv -from ansible.module_common import ModuleReplacer - -module_replacer = ModuleReplacer(strip_comments=False) - -HAS_ATFORK=True -try: - from Crypto.Random import atfork -except ImportError: - HAS_ATFORK=False - -multiprocessing_runner = None - -OUTPUT_LOCKFILE = tempfile.TemporaryFile() -PROCESS_LOCKFILE = tempfile.TemporaryFile() - -################################################ - -def _executor_hook(job_queue, result_queue, new_stdin): - - # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17 - # this function also not present in CentOS 6 - if HAS_ATFORK: - atfork() - - signal.signal(signal.SIGINT, signal.SIG_IGN) - while not job_queue.empty(): - try: - host = job_queue.get(block=False) - return_data = multiprocessing_runner._executor(host, new_stdin) - result_queue.put(return_data) - - if 'LEGACY_TEMPLATE_WARNING' in return_data.flags: - # pass data back up across the multiprocessing fork boundary - template.Flags.LEGACY_TEMPLATE_WARNING = True - - except Queue.Empty: - pass - except: - traceback.print_exc() - -class HostVars(dict): - ''' A special view of vars_cache that adds values from the inventory when needed. ''' - - def __init__(self, vars_cache, inventory): - self.vars_cache = vars_cache - self.inventory = inventory - self.lookup = dict() - self.update(vars_cache) - - def __getitem__(self, host): - if host not in self.lookup: - result = self.inventory.get_variables(host) - result.update(self.vars_cache.get(host, {})) - self.lookup[host] = result - return self.lookup[host] - - -class Runner(object): - ''' core API interface to ansible ''' - - # see bin/ansible for how this is used... - - def __init__(self, - host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage - module_path=None, # ex: /usr/share/ansible - module_name=C.DEFAULT_MODULE_NAME, # ex: copy - module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b" - forks=C.DEFAULT_FORKS, # parallelism level - timeout=C.DEFAULT_TIMEOUT, # SSH timeout - pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org' - remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username' - remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key - remote_port=None, # if SSH on different ports - private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords - sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None - background=0, # async poll every X seconds, else 0 for non-async - basedir=None, # directory of playbook, if applicable - setup_cache=None, # used to share fact data w/ other tasks - vars_cache=None, # used to store variables about hosts - transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local' - conditional='True', # run only if this fact expression evals to true - callbacks=None, # used for output - sudo=False, # whether to run sudo or not - sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' - module_vars=None, # a playbooks internals thing - default_vars=None, # ditto - is_playbook=False, # running from playbook or not? - inventory=None, # reference to Inventory object - subset=None, # subset pattern - check=False, # don't make any changes, just try to probe for potential changes - diff=False, # whether to show diffs for template files that change - environment=None, # environment variables (as dict) to use inside the command - complex_args=None, # structured data in addition to module_args, must be a dict - error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False - accelerate=False, # use accelerated connection - accelerate_ipv6=False, # accelerated connection w/ IPv6 - accelerate_port=None, # port to use with accelerated connection - su=False, # Are we running our command via su? - su_user=None, # User to su to when running command, ex: 'root' - su_pass=C.DEFAULT_SU_PASS, - vault_pass=None, - run_hosts=None, # an optional list of pre-calculated hosts to run on - no_log=False, # option to enable/disable logging for a given task - ): - - # used to lock multiprocess inputs and outputs at various levels - self.output_lockfile = OUTPUT_LOCKFILE - self.process_lockfile = PROCESS_LOCKFILE - - if not complex_args: - complex_args = {} - - # storage & defaults - self.check = check - self.diff = diff - self.setup_cache = utils.default(setup_cache, lambda: collections.defaultdict(dict)) - self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict)) - self.basedir = utils.default(basedir, lambda: os.getcwd()) - self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks()) - self.generated_jid = str(random.randint(0, 999999999999)) - self.transport = transport - self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list)) - - self.module_vars = utils.default(module_vars, lambda: {}) - self.default_vars = utils.default(default_vars, lambda: {}) - self.always_run = None - self.connector = connection.Connection(self) - self.conditional = conditional - self.module_name = module_name - self.forks = int(forks) - self.pattern = pattern - self.module_args = module_args - self.timeout = timeout - self.remote_user = remote_user - self.remote_pass = remote_pass - self.remote_port = remote_port - self.private_key_file = private_key_file - self.background = background - self.sudo = sudo - self.sudo_user_var = sudo_user - self.sudo_user = None - self.sudo_pass = sudo_pass - self.is_playbook = is_playbook - self.environment = environment - self.complex_args = complex_args - self.error_on_undefined_vars = error_on_undefined_vars - self.accelerate = accelerate - self.accelerate_port = accelerate_port - self.accelerate_ipv6 = accelerate_ipv6 - self.callbacks.runner = self - self.su = su - self.su_user_var = su_user - self.su_user = None - self.su_pass = su_pass - self.vault_pass = vault_pass - self.no_log = no_log - - if self.transport == 'smart': - # if the transport is 'smart' see if SSH can support ControlPersist if not use paramiko - # 'smart' is the default since 1.2.1/1.3 - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err: - self.transport = "paramiko" - else: - self.transport = "ssh" - - # save the original transport, in case it gets - # changed later via options like accelerate - self.original_transport = self.transport - - # misc housekeeping - if subset and self.inventory._subset is None: - # don't override subset when passed from playbook - self.inventory.subset(subset) - - # If we get a pre-built list of hosts to run on, from say a playbook, use them. - # Also where we will store the hosts to run on once discovered - self.run_hosts = run_hosts - - if self.transport == 'local': - self.remote_user = pwd.getpwuid(os.geteuid())[0] - - if module_path is not None: - for i in module_path.split(os.pathsep): - utils.plugins.module_finder.add_directory(i) - - utils.plugins.push_basedir(self.basedir) - - # ensure we are using unique tmp paths - random.seed() - # ***************************************************** - - def _complex_args_hack(self, complex_args, module_args): - """ - ansible-playbook both allows specifying key=value string arguments and complex arguments - however not all modules use our python common module system and cannot - access these. An example might be a Bash module. This hack allows users to still pass "args" - as a hash of simple scalars to those arguments and is short term. We could technically - just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented - it does mean values in 'args' have LOWER priority than those on the key=value line, allowing - args to provide yet another way to have pluggable defaults. - """ - if complex_args is None: - return module_args - if type(complex_args) != dict: - raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args) - for (k,v) in complex_args.iteritems(): - if isinstance(v, basestring): - module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) - return module_args - - # ***************************************************** - - def _transfer_str(self, conn, tmp, name, data): - ''' transfer string to remote file ''' - - if type(data) == dict: - data = utils.jsonify(data) - - afd, afile = tempfile.mkstemp() - afo = os.fdopen(afd, 'w') - try: - if not isinstance(data, unicode): - #ensure the data is valid UTF-8 - data.decode('utf-8') - else: - data = data.encode('utf-8') - afo.write(data) - except: - raise errors.AnsibleError("failure encoding into utf-8") - afo.flush() - afo.close() - - remote = os.path.join(tmp, name) - try: - conn.put_file(afile, remote) - finally: - os.unlink(afile) - return remote - - # ***************************************************** - - def _compute_environment_string(self, inject=None): - ''' what environment variables to use when running the command? ''' - - if not self.environment: - return "" - enviro = template.template(self.basedir, self.environment, inject, convert_bare=True) - enviro = utils.safe_eval(enviro) - if type(enviro) != dict: - raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro) - result = "" - for (k,v) in enviro.iteritems(): - result = "%s=%s %s" % (k, pipes.quote(str(v)), result) - return result - - # ***************************************************** - - def _compute_delegate(self, host, password, remote_inject): - - """ Build a dictionary of all attributes for the delegate host """ - - delegate = {} - - # allow delegated host to be templated - delegate['host'] = template.template(self.basedir, host, - remote_inject, fail_on_undefined=True) - - delegate['inject'] = remote_inject.copy() - - # set any interpreters - interpreters = [] - for i in delegate['inject']: - if i.startswith("ansible_") and i.endswith("_interpreter"): - interpreters.append(i) - for i in interpreters: - del delegate['inject'][i] - port = C.DEFAULT_REMOTE_PORT - - this_host = delegate['host'] - - # get the vars for the delegate by it's name - try: - this_info = delegate['inject']['hostvars'][this_host] - except: - # make sure the inject is empty for non-inventory hosts - this_info = {} - - # get the real ssh_address for the delegate - # and allow ansible_ssh_host to be templated - delegate['ssh_host'] = template.template(self.basedir, - this_info.get('ansible_ssh_host', this_host), - this_info, fail_on_undefined=True) - - delegate['port'] = this_info.get('ansible_ssh_port', port) - - delegate['user'] = self._compute_delegate_user(this_host, delegate['inject']) - - delegate['pass'] = this_info.get('ansible_ssh_pass', password) - delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', - self.private_key_file) - delegate['transport'] = this_info.get('ansible_connection', self.transport) - delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass) - - if delegate['private_key_file'] is not None: - delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file']) - - for i in this_info: - if i.startswith("ansible_") and i.endswith("_interpreter"): - delegate['inject'][i] = this_info[i] - - return delegate - - def _compute_delegate_user(self, host, inject): - - """ Caculate the remote user based on an order of preference """ - - # inventory > playbook > original_host - - actual_user = inject.get('ansible_ssh_user', self.remote_user) - thisuser = None - - if host in inject['hostvars']: - if inject['hostvars'][host].get('ansible_ssh_user'): - # user for delegate host in inventory - thisuser = inject['hostvars'][host].get('ansible_ssh_user') - - if thisuser is None and self.remote_user: - # user defined by play/runner - thisuser = self.remote_user - - if thisuser is not None: - actual_user = thisuser - else: - # fallback to the inventory user of the play host - #actual_user = inject.get('ansible_ssh_user', actual_user) - actual_user = inject.get('ansible_ssh_user', self.remote_user) - - return actual_user - - - # ***************************************************** - - def _execute_module(self, conn, tmp, module_name, args, - async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True): - - ''' transfer and run a module along with its arguments on the remote side''' - - # hack to support fireball mode - if module_name == 'fireball': - args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host)))) - if 'port' not in args: - args += " port=%s" % C.ZEROMQ_PORT - - ( - module_style, - shebang, - module_data - ) = self._configure_module(conn, module_name, args, inject, complex_args) - - # a remote tmp path may be necessary and not already created - if self._late_needs_tmp_path(conn, tmp, module_style): - tmp = self._make_tmp_path(conn) - - remote_module_path = os.path.join(tmp, module_name) - - if (module_style != 'new' - or async_jid is not None - or not conn.has_pipelining - or not C.ANSIBLE_SSH_PIPELINING - or C.DEFAULT_KEEP_REMOTE_FILES - or self.su): - self._transfer_str(conn, tmp, module_name, module_data) - - environment_string = self._compute_environment_string(inject) - - if tmp.find("tmp") != -1 and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): - # deal with possible umask issues once sudo'ed to other user - cmd_chmod = "chmod a+r %s" % remote_module_path - self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False) - - cmd = "" - in_data = None - if module_style != 'new': - if 'CHECKMODE=True' in args: - # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to - # do --check mode, so to be safe we will not run it. - return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules")) - elif 'NO_LOG' in args: - return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules")) - - args = template.template(self.basedir, args, inject) - - # decide whether we need to transfer JSON or key=value - argsfile = None - if module_style == 'non_native_want_json': - if complex_args: - complex_args.update(utils.parse_kv(args)) - argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args)) - else: - argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args))) - - else: - argsfile = self._transfer_str(conn, tmp, 'arguments', args) - - if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): - # deal with possible umask issues once sudo'ed to other user - cmd_args_chmod = "chmod a+r %s" % argsfile - self._low_level_exec_command(conn, cmd_args_chmod, tmp, sudoable=False) - - if async_jid is None: - cmd = "%s %s" % (remote_module_path, argsfile) - else: - cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]]) - else: - if async_jid is None: - if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.su: - in_data = module_data - else: - cmd = "%s" % (remote_module_path) - else: - cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]]) - - if not shebang: - raise errors.AnsibleError("module is missing interpreter line") - - - cmd = " ".join([environment_string.strip(), shebang.replace("#!","").strip(), cmd]) - cmd = cmd.strip() - - if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if not self.sudo or self.su or self.sudo_user == 'root' or self.su_user == 'root': - # not sudoing or sudoing to root, so can cleanup files in the same step - cmd = cmd + "; rm -rf %s >/dev/null 2>&1" % tmp - - sudoable = True - if module_name == "accelerate": - # always run the accelerate module as the user - # specified in the play, not the sudo_user - sudoable = False - - if self.su: - res = self._low_level_exec_command(conn, cmd, tmp, su=True, in_data=in_data) - else: - res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) - - if tmp.find("tmp") != -1 and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): - # not sudoing to root, so maybe can't delete files as that other user - # have to clean up temp files as original user in a second step - cmd2 = "rm -rf %s >/dev/null 2>&1" % tmp - self._low_level_exec_command(conn, cmd2, tmp, sudoable=False) - - data = utils.parse_json(res['stdout']) - if 'parsed' in data and data['parsed'] == False: - data['msg'] += res['stderr'] - return ReturnData(conn=conn, result=data) - - # ***************************************************** - - def _executor(self, host, new_stdin): - ''' handler for multiprocessing library ''' - - def get_flags(): - # flags are a way of passing arbitrary event information - # back up the chain, since multiprocessing forks and doesn't - # allow state exchange - flags = [] - if template.Flags.LEGACY_TEMPLATE_WARNING: - flags.append('LEGACY_TEMPLATE_WARNING') - return flags - - try: - fileno = sys.stdin.fileno() - except ValueError: - fileno = None - - try: - if not new_stdin and fileno is not None: - self._new_stdin = os.fdopen(os.dup(fileno)) - else: - self._new_stdin = new_stdin - - exec_rc = self._executor_internal(host, new_stdin) - if type(exec_rc) != ReturnData: - raise Exception("unexpected return type: %s" % type(exec_rc)) - exec_rc.flags = get_flags() - # redundant, right? - if not exec_rc.comm_ok: - self.callbacks.on_unreachable(host, exec_rc.result) - return exec_rc - except errors.AnsibleError, ae: - msg = str(ae) - self.callbacks.on_unreachable(host, msg) - return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg), flags=get_flags()) - except Exception: - msg = traceback.format_exc() - self.callbacks.on_unreachable(host, msg) - return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg), flags=get_flags()) - - # ***************************************************** - - def _executor_internal(self, host, new_stdin): - ''' executes any module one or more times ''' - - host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) - host_connection = host_variables.get('ansible_connection', self.transport) - if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]: - port = host_variables.get('ansible_ssh_port', self.remote_port) - if port is None: - port = C.DEFAULT_REMOTE_PORT - else: - # fireball, local, etc - port = self.remote_port - - module_vars = template.template(self.basedir, self.module_vars, host_variables) - - # merge the VARS and SETUP caches for this host - combined_cache = self.setup_cache.copy() - combined_cache.get(host, {}).update(self.vars_cache.get(host, {})) - - inject = {} - inject = utils.combine_vars(inject, self.default_vars) - inject = utils.combine_vars(inject, host_variables) - inject = utils.combine_vars(inject, module_vars) - inject = utils.combine_vars(inject, combined_cache.get(host, {})) - inject.setdefault('ansible_ssh_user', self.remote_user) - inject['hostvars'] = HostVars(combined_cache, self.inventory) - inject['group_names'] = host_variables.get('group_names', []) - inject['groups'] = self.inventory.groups_list() - inject['vars'] = self.module_vars - inject['defaults'] = self.default_vars - inject['environment'] = self.environment - inject['playbook_dir'] = self.basedir - - if self.inventory.basedir() is not None: - inject['inventory_dir'] = self.inventory.basedir() - - if self.inventory.src() is not None: - inject['inventory_file'] = self.inventory.src() - - # allow with_foo to work in playbooks... - items = None - items_plugin = self.module_vars.get('items_lookup_plugin', None) - - if items_plugin is not None and items_plugin in utils.plugins.lookup_loader: - - basedir = self.basedir - if '_original_file' in inject: - basedir = os.path.dirname(inject['_original_file']) - filesdir = os.path.join(basedir, '..', 'files') - if os.path.exists(filesdir): - basedir = filesdir - - items_terms = self.module_vars.get('items_lookup_terms', '') - items_terms = template.template(basedir, items_terms, inject) - items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject) - if type(items) != list: - raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) - - if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng' ]: - # hack for apt, yum, and pkgng so that with_items maps back into a single module call - use_these_items = [] - for x in items: - inject['item'] = x - if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - use_these_items.append(x) - inject['item'] = ",".join(use_these_items) - items = None - - # logic to replace complex args if possible - complex_args = self.complex_args - - # logic to decide how to run things depends on whether with_items is used - if items is None: - if isinstance(complex_args, basestring): - complex_args = template.template(self.basedir, complex_args, inject, convert_bare=True) - complex_args = utils.safe_eval(complex_args) - if type(complex_args) != dict: - raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) - return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args) - elif len(items) > 0: - - # executing using with_items, so make multiple calls - # TODO: refactor - - if self.background > 0: - raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks") - - aggregrate = {} - all_comm_ok = True - all_changed = False - all_failed = False - results = [] - for x in items: - # use a fresh inject for each item - this_inject = inject.copy() - this_inject['item'] = x - - # TODO: this idiom should be replaced with an up-conversion to a Jinja2 template evaluation - if isinstance(self.complex_args, basestring): - complex_args = template.template(self.basedir, self.complex_args, this_inject, convert_bare=True) - complex_args = utils.safe_eval(complex_args) - if type(complex_args) != dict: - raise errors.AnsibleError("args must be a dictionary, received %s" % complex_args) - result = self._executor_internal_inner( - host, - self.module_name, - self.module_args, - this_inject, - port, - complex_args=complex_args - ) - results.append(result.result) - if result.comm_ok == False: - all_comm_ok = False - all_failed = True - break - for x in results: - if x.get('changed') == True: - all_changed = True - if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]: - all_failed = True - break - msg = 'All items completed' - if all_failed: - msg = "One or more items failed." - rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg) - if not all_failed: - del rd_result['failed'] - return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result) - else: - self.callbacks.on_skipped(host, None) - return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True)) - - # ***************************************************** - - def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None): - ''' decides how to invoke a module ''' - - # late processing of parameterized sudo_user (with_items,..) - if self.sudo_user_var is not None: - self.sudo_user = template.template(self.basedir, self.sudo_user_var, inject) - if self.su_user_var is not None: - self.su_user = template.template(self.basedir, self.su_user_var, inject) - - # allow module args to work as a dictionary - # though it is usually a string - new_args = "" - if type(module_args) == dict: - for (k,v) in module_args.iteritems(): - new_args = new_args + "%s='%s' " % (k,v) - module_args = new_args - - # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }}) - module_name = template.template(self.basedir, module_name, inject) - - if module_name in utils.plugins.action_loader: - if self.background != 0: - raise errors.AnsibleError("async mode is not supported with the %s module" % module_name) - handler = utils.plugins.action_loader.get(module_name, self) - elif self.background == 0: - handler = utils.plugins.action_loader.get('normal', self) - else: - handler = utils.plugins.action_loader.get('async', self) - - if type(self.conditional) != list: - self.conditional = [ self.conditional ] - - for cond in self.conditional: - - if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - result = utils.jsonify(dict(changed=False, skipped=True)) - self.callbacks.on_skipped(host, inject.get('item',None)) - return ReturnData(host=host, result=result) - - if getattr(handler, 'setup', None) is not None: - handler.setup(module_name, inject) - conn = None - actual_host = inject.get('ansible_ssh_host', host) - # allow ansible_ssh_host to be templated - actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True) - actual_port = port - actual_user = inject.get('ansible_ssh_user', self.remote_user) - actual_pass = inject.get('ansible_ssh_pass', self.remote_pass) - actual_transport = inject.get('ansible_connection', self.transport) - actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) - actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True) - self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) - self.su = inject.get('ansible_su', self.su) - self.su_pass = inject.get('ansible_su_pass', self.su_pass) - - if actual_private_key_file is not None: - actual_private_key_file = os.path.expanduser(actual_private_key_file) - - if self.accelerate and actual_transport != 'local': - #Fix to get the inventory name of the host to accelerate plugin - if inject.get('ansible_ssh_host', None): - self.accelerate_inventory_host = host - else: - self.accelerate_inventory_host = None - # if we're using accelerated mode, force the - # transport to accelerate - actual_transport = "accelerate" - if not self.accelerate_port: - self.accelerate_port = C.ACCELERATE_PORT - - if actual_transport in [ 'paramiko', 'ssh', 'accelerate' ]: - actual_port = inject.get('ansible_ssh_port', port) - - # the delegated host may have different SSH port configured, etc - # and we need to transfer those, and only those, variables - delegate_to = inject.get('delegate_to', None) - if delegate_to is not None: - delegate = self._compute_delegate(delegate_to, actual_pass, inject) - actual_transport = delegate['transport'] - actual_host = delegate['ssh_host'] - actual_port = delegate['port'] - actual_user = delegate['user'] - actual_pass = delegate['pass'] - actual_private_key_file = delegate['private_key_file'] - self.sudo_pass = delegate['sudo_pass'] - inject = delegate['inject'] - - # user/pass may still contain variables at this stage - actual_user = template.template(self.basedir, actual_user, inject) - actual_pass = template.template(self.basedir, actual_pass, inject) - - # make actual_user available as __magic__ ansible_ssh_user variable - inject['ansible_ssh_user'] = actual_user - - try: - if actual_transport == 'accelerate': - # for accelerate, we stuff both ports into a single - # variable so that we don't have to mangle other function - # calls just to accomodate this one case - actual_port = [actual_port, self.accelerate_port] - elif actual_port is not None: - actual_port = int(template.template(self.basedir, actual_port, inject)) - except ValueError, e: - result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port) - return ReturnData(host=host, comm_ok=False, result=result) - - try: - conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file) - if delegate_to or host != actual_host: - conn.delegate = host - - - except errors.AnsibleConnectionFailed, e: - result = dict(failed=True, msg="FAILED: %s" % str(e)) - return ReturnData(host=host, comm_ok=False, result=result) - - tmp = '' - # action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir - if self._early_needs_tmp_path(module_name, handler): - tmp = self._make_tmp_path(conn) - - # render module_args and complex_args templates - try: - module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars) - complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars) - except jinja2.exceptions.UndefinedError, e: - raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e)) - - - result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) - # Code for do until feature - until = self.module_vars.get('until', None) - if until is not None and result.comm_ok: - inject[self.module_vars.get('register')] = result.result - cond = template.template(self.basedir, until, inject, expand_lists=False) - if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - retries = self.module_vars.get('retries') - delay = self.module_vars.get('delay') - for x in range(1, int(retries) + 1): - # template the delay, cast to float and sleep - delay = template.template(self.basedir, delay, inject, expand_lists=False) - delay = float(delay) - time.sleep(delay) - tmp = '' - if self._early_needs_tmp_path(module_name, handler): - tmp = self._make_tmp_path(conn) - result = handler.run(conn, tmp, module_name, module_args, inject, complex_args) - result.result['attempts'] = x - vv("Result from run %i is: %s" % (x, result.result)) - inject[self.module_vars.get('register')] = result.result - cond = template.template(self.basedir, until, inject, expand_lists=False) - if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - break - if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars): - result.result['failed'] = True - result.result['msg'] = "Task failed as maximum retries was encountered" - else: - result.result['attempts'] = 0 - conn.close() - - if not result.comm_ok: - # connection or parsing errors... - self.callbacks.on_unreachable(host, result.result) - else: - data = result.result - - # https://github.com/ansible/ansible/issues/4958 - if hasattr(sys.stdout, "isatty"): - if "stdout" in data and sys.stdout.isatty(): - if not string_functions.isprintable(data['stdout']): - data['stdout'] = '' - - if 'item' in inject: - result.result['item'] = inject['item'] - - result.result['invocation'] = dict( - module_args=module_args, - module_name=module_name - ) - - changed_when = self.module_vars.get('changed_when') - failed_when = self.module_vars.get('failed_when') - if changed_when is not None or failed_when is not None: - register = self.module_vars.get('register') - if register is not None: - if 'stdout' in data: - data['stdout_lines'] = data['stdout'].splitlines() - inject[register] = data - if changed_when is not None: - data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) - if failed_when is not None: - data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars) - - if is_chained: - # no callbacks - return result - if 'skipped' in data: - self.callbacks.on_skipped(host) - elif not result.is_successful(): - ignore_errors = self.module_vars.get('ignore_errors', False) - self.callbacks.on_failed(host, data, ignore_errors) - else: - if self.diff: - self.callbacks.on_file_diff(conn.host, result.diff) - self.callbacks.on_ok(host, data) - return result - - def _early_needs_tmp_path(self, module_name, handler): - ''' detect if a tmp path should be created before the handler is called ''' - if module_name in utils.plugins.action_loader: - return getattr(handler, 'TRANSFERS_FILES', False) - # other modules never need tmp path at early stage - return False - - def _late_needs_tmp_path(self, conn, tmp, module_style): - if tmp.find("tmp") != -1: - # tmp has already been created - return False - if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su: - # tmp is necessary to store module source code - return True - if not conn.has_pipelining: - # tmp is necessary to store the module source code - # or we want to keep the files on the target system - return True - if module_style != "new": - # even when conn has pipelining, old style modules need tmp to store arguments - return True - return False - - - # ***************************************************** - - def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, - executable=None, su=False, in_data=None): - ''' execute a command string over SSH, return the output ''' - - if executable is None: - executable = C.DEFAULT_EXECUTABLE - - sudo_user = self.sudo_user - su_user = self.su_user - - # compare connection user to (su|sudo)_user and disable if the same - if hasattr(conn, 'user'): - if conn.user == sudo_user or conn.user == su_user: - sudoable = False - su = False - - if su: - rc, stdin, stdout, stderr = conn.exec_command(cmd, - tmp, - su=su, - su_user=su_user, - executable=executable, - in_data=in_data) - else: - rc, stdin, stdout, stderr = conn.exec_command(cmd, - tmp, - sudo_user, - sudoable=sudoable, - executable=executable, - in_data=in_data) - - if type(stdout) not in [ str, unicode ]: - out = ''.join(stdout.readlines()) - else: - out = stdout - - if type(stderr) not in [ str, unicode ]: - err = ''.join(stderr.readlines()) - else: - err = stderr - - if rc is not None: - return dict(rc=rc, stdout=out, stderr=err) - else: - return dict(stdout=out, stderr=err) - - # ***************************************************** - - def _remote_md5(self, conn, tmp, path): - ''' takes a remote md5sum without requiring python, and returns 1 if no file ''' - - path = pipes.quote(path) - # The following test needs to be SH-compliant. BASH-isms will - # not work if /bin/sh points to a non-BASH shell. - test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1; [ -d \"%s\" ] && echo 3 && exit 0" % ((path,) * 3) - md5s = [ - "(/usr/bin/md5sum %s 2>/dev/null)" % path, # Linux - "(/sbin/md5sum -q %s 2>/dev/null)" % path, # ? - "(/usr/bin/digest -a md5 %s 2>/dev/null)" % path, # Solaris 10+ - "(/sbin/md5 -q %s 2>/dev/null)" % path, # Freebsd - "(/usr/bin/md5 -n %s 2>/dev/null)" % path, # Netbsd - "(/bin/md5 -q %s 2>/dev/null)" % path, # Openbsd - "(/usr/bin/csum -h MD5 %s 2>/dev/null)" % path, # AIX - "(/bin/csum -h MD5 %s 2>/dev/null)" % path # AIX also - ] - - cmd = " || ".join(md5s) - cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path) - data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) - data2 = utils.last_non_blank_line(data['stdout']) - try: - if data2 == '': - # this may happen if the connection to the remote server - # failed, so just return "INVALIDMD5SUM" to avoid errors - return "INVALIDMD5SUM" - else: - return data2.split()[0] - except IndexError: - sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n") - sys.stderr.write("command: %s\n" % md5s) - sys.stderr.write("----\n") - sys.stderr.write("output: %s\n" % data) - sys.stderr.write("----\n") - # this will signal that it changed and allow things to keep going - return "INVALIDMD5SUM" - - # ***************************************************** - - def _make_tmp_path(self, conn): - ''' make and return a temporary path on a remote box ''' - - basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) - basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile) - if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root') and basetmp.startswith('$HOME'): - basetmp = os.path.join('/tmp', basefile) - - cmd = 'mkdir -p %s' % basetmp - if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): - cmd += ' && chmod a+rx %s' % basetmp - cmd += ' && echo %s' % basetmp - - result = self._low_level_exec_command(conn, cmd, None, sudoable=False) - - # error handling on this seems a little aggressive? - if result['rc'] != 0: - if result['rc'] == 5: - output = 'Authentication failure.' - elif result['rc'] == 255 and self.transport in ['ssh']: - if utils.VERBOSITY > 3: - output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) - else: - output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue' - else: - output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc']) - if 'stdout' in result and result['stdout'] != '': - output = output + ": %s" % result['stdout'] - raise errors.AnsibleError(output) - - rc = utils.last_non_blank_line(result['stdout']).strip() + '/' - # Catch failure conditions, files should never be - # written to locations in /. - if rc == '/': - raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd)) - return rc - - # ***************************************************** - - def _remove_tmp_path(self, conn, tmp_path): - ''' Remove a tmp_path. ''' - - if "-tmp-" in tmp_path: - cmd = "rm -rf %s >/dev/null 2>&1" % tmp_path - self._low_level_exec_command(conn, cmd, None, sudoable=False) - # If we have gotten here we have a working ssh configuration. - # If ssh breaks we could leave tmp directories out on the remote system. - - # ***************************************************** - - def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None): - ''' transfer a module over SFTP, does not run it ''' - ( - module_style, - module_shebang, - module_data - ) = self._configure_module(conn, module_name, module_args, inject, complex_args) - module_remote_path = os.path.join(tmp, module_name) - - self._transfer_str(conn, tmp, module_name, module_data) - - return (module_remote_path, module_style, module_shebang) - - # ***************************************************** - - def _configure_module(self, conn, module_name, module_args, inject, complex_args=None): - ''' find module and configure it ''' - - # Search module path(s) for named module. - module_path = utils.plugins.module_finder.find_plugin(module_name) - if module_path is None: - raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) - - - # insert shared code and arguments into the module - (module_data, module_style, module_shebang) = module_replacer.modify_module( - module_path, complex_args, module_args, inject - ) - - return (module_style, module_shebang, module_data) - - - # ***************************************************** - - - def _parallel_exec(self, hosts): - ''' handles mulitprocessing when more than 1 fork is required ''' - - manager = multiprocessing.Manager() - job_queue = manager.Queue() - for host in hosts: - job_queue.put(host) - result_queue = manager.Queue() - - workers = [] - for i in range(self.forks): - new_stdin = os.fdopen(os.dup(sys.stdin.fileno())) - prc = multiprocessing.Process(target=_executor_hook, - args=(job_queue, result_queue, new_stdin)) - prc.start() - workers.append(prc) - - try: - for worker in workers: - worker.join() - except KeyboardInterrupt: - for worker in workers: - worker.terminate() - worker.join() - - results = [] - try: - while not result_queue.empty(): - results.append(result_queue.get(block=False)) - except socket.error: - raise errors.AnsibleError("") - return results - - # ***************************************************** - - def _partition_results(self, results): - ''' separate results by ones we contacted & ones we didn't ''' - - if results is None: - return None - results2 = dict(contacted={}, dark={}) - - for result in results: - host = result.host - if host is None: - raise Exception("internal error, host not set") - if result.communicated_ok(): - results2["contacted"][host] = result.result - else: - results2["dark"][host] = result.result - - # hosts which were contacted but never got a chance to return - for host in self.run_hosts: - if not (host in results2['dark'] or host in results2['contacted']): - results2["dark"][host] = {} - return results2 - - # ***************************************************** - - def run(self): - ''' xfer & run module on all matched hosts ''' - - # find hosts that match the pattern - if not self.run_hosts: - self.run_hosts = self.inventory.list_hosts(self.pattern) - hosts = self.run_hosts - if len(hosts) == 0: - self.callbacks.on_no_hosts() - return dict(contacted={}, dark={}) - - global multiprocessing_runner - multiprocessing_runner = self - results = None - - # Check if this is an action plugin. Some of them are designed - # to be ran once per group of hosts. Example module: pause, - # run once per hostgroup, rather than pausing once per each - # host. - p = utils.plugins.action_loader.get(self.module_name, self) - - if self.forks == 0 or self.forks > len(hosts): - self.forks = len(hosts) - - if p and getattr(p, 'BYPASS_HOST_LOOP', None): - - # Expose the current hostgroup to the bypassing plugins - self.host_set = hosts - # We aren't iterating over all the hosts in this - # group. So, just pick the first host in our group to - # construct the conn object with. - result_data = self._executor(hosts[0], None).result - # Create a ResultData item for each host in this group - # using the returned result. If we didn't do this we would - # get false reports of dark hosts. - results = [ ReturnData(host=h, result=result_data, comm_ok=True) \ - for h in hosts ] - del self.host_set - - elif self.forks > 1: - try: - results = self._parallel_exec(hosts) - except IOError, ie: - print ie.errno - if ie.errno == 32: - # broken pipe from Ctrl+C - raise errors.AnsibleError("interrupted") - raise - else: - results = [ self._executor(h, None) for h in hosts ] - - return self._partition_results(results) - - # ***************************************************** - - def run_async(self, time_limit): - ''' Run this module asynchronously and return a poller. ''' - - self.background = time_limit - results = self.run() - return results, poller.AsyncPoller(results, self) - - # ***************************************************** - - def noop_on_check(self, inject): - ''' Should the runner run in check mode or not ? ''' - - # initialize self.always_run on first call - if self.always_run is None: - self.always_run = self.module_vars.get('always_run', False) - self.always_run = check_conditional( - self.always_run, self.basedir, inject, fail_on_undefined=True) - - return (self.check and not self.always_run) From 23c5f4524d090b3aecbf932890614b44c6b4fa2a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 15 Apr 2014 13:04:41 -0400 Subject: [PATCH 761/772] Make sure args in run_command are encoded for shlex.split calls on py2.6 --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 94326711ba2..0ab1ad03abe 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1054,7 +1054,7 @@ class AnsibleModule(object): elif isinstance(args, basestring) and use_unsafe_shell: shell = True elif isinstance(args, basestring): - args = shlex.split(args) + args = shlex.split(args.encode('utf-8')) else: msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) From d240d073eb6e190f5dffef8eec8b1545db4e06d6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 15 Apr 2014 13:44:43 -0500 Subject: [PATCH 762/772] Changing SSL cert detection method to allow for auto-negotiation of SSL protocols Fixes #6904 --- lib/ansible/module_utils/urls.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index e02f171aee4..76ee34d7748 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -50,6 +50,7 @@ try: except: HAS_SSL=False +import socket import tempfile @@ -162,12 +163,20 @@ class SSLValidationHandler(urllib2.BaseHandler): def http_request(self, req): tmp_ca_cert_path, paths_checked = self.get_ca_certs() try: - server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=tmp_ca_cert_path) - except ssl.SSLError: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + ssl_s.connect((self.hostname, self.port)) + ssl_s.close() + except (ssl.SSLError, socket.error), e: # fail if we tried all of the certs but none worked - self.module.fail_json(msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \ - 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ - 'Paths checked for this platform: %s' % ", ".join(paths_checked)) + if 'connection refused' in str(e).lower(): + self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port)) + else: + self.module.fail_json( + msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \ + 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ + 'Paths checked for this platform: %s' % ", ".join(paths_checked) + ) try: # cleanup the temp file created, don't worry # if it fails for some reason From f547733b1f2136a531432ba652edebaec6873baf Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 15 Apr 2014 15:03:52 -0500 Subject: [PATCH 763/772] Check to make sure the firewalld client is connected before proceeding Fixes #6911 --- library/system/firewalld | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/library/system/firewalld b/library/system/firewalld index 62c90d0656c..22db165aad3 100644 --- a/library/system/firewalld +++ b/library/system/firewalld @@ -85,8 +85,13 @@ try: from firewall.client import FirewallClient fw = FirewallClient() + if not fw.connected: + raise Exception('failed to connect to the firewalld daemon') except ImportError: - print "fail=True msg='firewalld required for this module'" + print "failed=True msg='firewalld required for this module'" + sys.exit(1) +except Exception, e: + print "failed=True msg='%s'" % str(e) sys.exit(1) ################ From a03914608132ee757f4460914cdd1247e1dcc58f Mon Sep 17 00:00:00 2001 From: Adam Menges Date: Tue, 15 Apr 2014 13:11:46 -0700 Subject: [PATCH 764/772] added homebrew to install doc --- docsite/rst/intro_installation.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 7541e2a5da5..fb1f7fabf45 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -204,6 +204,18 @@ You may also wish to install from ports, run: $ sudo make -C /usr/ports/sysutils/ansible install +.. _from_brew: + +Latest Releases Via Homebrew (Mac OSX) +++++++++++++++++++++++++++++++++++++++ + +To install on a Mac, make sure you have Homebrew, then run: + +.. code-block:: bash + + $ brew update + $ brew install ansible + .. _from_pip: Latest Releases Via Pip From 88b30a74d2ab91525b81c62ebd16f7fda7d1202a Mon Sep 17 00:00:00 2001 From: evanccopengeo Date: Tue, 15 Apr 2014 17:55:26 -0400 Subject: [PATCH 765/772] fixing bug where if both private_ip and assign_public_p are set ansible fails out --- library/cloud/ec2 | 45 +++++++++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 0752f40fa4b..d09e799359a 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -814,13 +814,21 @@ def create_instances(module, ec2, override_count=None): msg="assign_public_ip only available with vpc_subnet_id") else: - interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=vpc_subnet_id, - groups=group_id, - associate_public_ip_address=assign_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - params['network_interfaces'] = interfaces - + if private_ip: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + private_ip_address=private_ip, + groups=group_id, + associate_public_ip_address=assign_public_ip) + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + params['network_interfaces'] = interfaces + else: + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=vpc_subnet_id, + groups=group_id, + associate_public_ip_address=assign_public_ip) + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + params['network_interfaces'] = interfaces else: params['subnet_id'] = vpc_subnet_id if vpc_subnet_id: @@ -842,13 +850,22 @@ def create_instances(module, ec2, override_count=None): # check to see if we're using spot pricing first before starting instances if not spot_price: - params.update(dict( - min_count = count_remaining, - max_count = count_remaining, - client_token = id, - placement_group = placement_group, - private_ip_address = private_ip, - )) + if assign_public_ip and private_ip: + params.update(dict( + min_count = count_remaining, + max_count = count_remaining, + client_token = id, + placement_group = placement_group, + )) + else: + params.update(dict( + min_count = count_remaining, + max_count = count_remaining, + client_token = id, + placement_group = placement_group, + private_ip_address = private_ip, + )) + res = ec2.run_instances(**params) instids = [ i.id for i in res.instances ] while True: From ac64f3c8cf295c72ffbb6eae83be8fa3a9690f8d Mon Sep 17 00:00:00 2001 From: evanccopengeo Date: Tue, 15 Apr 2014 18:16:34 -0400 Subject: [PATCH 766/772] cleaning up the code a bit more --- library/cloud/ec2 | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/library/cloud/ec2 b/library/cloud/ec2 index d09e799359a..5935b7dc578 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -820,15 +820,13 @@ def create_instances(module, ec2, override_count=None): private_ip_address=private_ip, groups=group_id, associate_public_ip_address=assign_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - params['network_interfaces'] = interfaces else: interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id=vpc_subnet_id, groups=group_id, associate_public_ip_address=assign_public_ip) - interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - params['network_interfaces'] = interfaces + interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) + params['network_interfaces'] = interfaces else: params['subnet_id'] = vpc_subnet_id if vpc_subnet_id: From fd35c59fc1cffdb106229a8d1e85857a2e9ab3f5 Mon Sep 17 00:00:00 2001 From: Timothy Appnel Date: Tue, 15 Apr 2014 18:02:14 -0400 Subject: [PATCH 767/772] Fixed escaping of " in test_make_sudo_cmd in TestUtils so that it passes as it should. Signed-off-by: Timothy Appnel --- test/units/TestUtils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 85564c96cc7..a0c9490b74a 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -503,7 +503,7 @@ class TestUtils(unittest.TestCase): self.assertTrue(isinstance(cmd, tuple)) self.assertEqual(len(cmd), 3) self.assertTrue('-u root' in cmd[0]) - self.assertTrue('-p "[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key')) + self.assertTrue('-p \\"[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key')) self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-')) self.assertTrue('sudo -k' in cmd[0]) From a5e7492c4f410cd75875a0619f5662b8e8322d05 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 15 Apr 2014 20:21:50 -0400 Subject: [PATCH 768/772] This test appears OS specific, so disabling for now. --- test/units/TestUtils.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index a0c9490b74a..c60a0d82910 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -498,14 +498,14 @@ class TestUtils(unittest.TestCase): self.assertEqual(ansible.utils.boolean(0), False) self.assertEqual(ansible.utils.boolean("foo"), False) - def test_make_sudo_cmd(self): - cmd = ansible.utils.make_sudo_cmd('root', '/bin/sh', '/bin/ls') - self.assertTrue(isinstance(cmd, tuple)) - self.assertEqual(len(cmd), 3) - self.assertTrue('-u root' in cmd[0]) - self.assertTrue('-p \\"[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key')) - self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-')) - self.assertTrue('sudo -k' in cmd[0]) + #def test_make_sudo_cmd(self): + # cmd = ansible.utils.make_sudo_cmd('root', '/bin/sh', '/bin/ls') + # self.assertTrue(isinstance(cmd, tuple)) + # self.assertEqual(len(cmd), 3) + # self.assertTrue('-u root' in cmd[0]) + # self.assertTrue('-p "[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key')) + # self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-')) + # self.assertTrue('sudo -k' in cmd[0]) def test_make_su_cmd(self): cmd = ansible.utils.make_su_cmd('root', '/bin/sh', '/bin/ls') From e8b33636aef5e79855351501edbb35818a251b6b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 15 Apr 2014 20:38:39 -0500 Subject: [PATCH 769/772] Get the bin path for commands in the unarchive module Fixes #6927 --- library/files/unarchive | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/library/files/unarchive b/library/files/unarchive index ab04e57475c..29e9ddb9e48 100644 --- a/library/files/unarchive +++ b/library/files/unarchive @@ -81,17 +81,20 @@ class ZipFile(object): self.src = src self.dest = dest self.module = module + self.cmd_path = self.module.get_bin_path('unzip') def is_unarchived(self): return dict(unarchived=False) def unarchive(self): - cmd = 'unzip -o "%s" -d "%s"' % (self.src, self.dest) + cmd = '%s -o "%s" -d "%s"' % (self.cmd_path, self.src, self.dest) rc, out, err = self.module.run_command(cmd) return dict(cmd=cmd, rc=rc, out=out, err=err) def can_handle_archive(self): - cmd = 'unzip -l "%s"' % self.src + if not self.cmd_path: + return False + cmd = '%s -l "%s"' % (self.cmd_path, self.src) rc, out, err = self.module.run_command(cmd) if rc == 0: return True @@ -105,23 +108,26 @@ class TgzFile(object): self.src = src self.dest = dest self.module = module + self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'z' def is_unarchived(self): dirof = os.path.dirname(self.dest) destbase = os.path.basename(self.dest) - cmd = 'tar -v -C "%s" --diff -%sf "%s"' % (self.dest, self.zipflag, self.src) + cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) rc, out, err = self.module.run_command(cmd) unarchived = (rc == 0) return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) def unarchive(self): - cmd = 'tar -C "%s" -x%sf "%s"' % (self.dest, self.zipflag, self.src) + cmd = '%s -C "%s" -x%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) rc, out, err = self.module.run_command(cmd) return dict(cmd=cmd, rc=rc, out=out, err=err) def can_handle_archive(self): - cmd = 'tar -t%sf "%s"' % (self.zipflag, self.src) + if not self.cmd_path: + return False + cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) rc, out, err = self.module.run_command(cmd) if rc == 0: if len(out.splitlines(True)) > 0: @@ -135,6 +141,7 @@ class TarFile(TgzFile): self.src = src self.dest = dest self.module = module + self.cmd_path = self.module.get_bin_path('tar') self.zipflag = '' @@ -144,6 +151,7 @@ class TarBzip(TgzFile): self.src = src self.dest = dest self.module = module + self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'j' @@ -153,6 +161,7 @@ class TarXz(TgzFile): self.src = src self.dest = dest self.module = module + self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'J' @@ -163,7 +172,7 @@ def pick_handler(src, dest, module): obj = handler(src, dest, module) if obj.can_handle_archive(): return obj - raise RuntimeError('Failed to find handler to unarchive "%s"' % src) + module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.') def main(): From 202fc57253cd521fd863eb8016f55628a32e27d1 Mon Sep 17 00:00:00 2001 From: "Hernandes B. de Sousa" Date: Wed, 16 Apr 2014 01:17:39 -0300 Subject: [PATCH 770/772] Add missing info for the append option Both 'default' and 'choices' options were missing at the documentation. --- library/system/user | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/system/user b/library/system/user index d33244dba54..8c649c0607c 100644 --- a/library/system/user +++ b/library/system/user @@ -61,6 +61,8 @@ options: except the primary group. append: required: false + default: "no" + choices: [ "yes", "no" ] description: - If C(yes), will only add groups, not set them to just the list in I(groups). From 48bf7a000d843a3d67999fdb36dc5844bff23834 Mon Sep 17 00:00:00 2001 From: smoothify Date: Wed, 16 Apr 2014 15:40:01 +0100 Subject: [PATCH 771/772] Try to pass in item to on_skippped callback. --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index f72d5399e7a..432ee854793 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -884,7 +884,7 @@ class Runner(object): # no callbacks return result if 'skipped' in data: - self.callbacks.on_skipped(host) + self.callbacks.on_skipped(host, inject.get('item',None)) elif not result.is_successful(): ignore_errors = self.module_vars.get('ignore_errors', False) self.callbacks.on_failed(host, data, ignore_errors) From a37a84243b968087668a3c2e73b5032b6d3ceeee Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Apr 2014 12:21:32 -0500 Subject: [PATCH 772/772] Check resource_tags value before using it in ec2_vpc Fixes #7024 --- library/cloud/ec2_vpc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/cloud/ec2_vpc b/library/cloud/ec2_vpc index 44d207b3896..1bd569f478c 100644 --- a/library/cloud/ec2_vpc +++ b/library/cloud/ec2_vpc @@ -237,7 +237,7 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC - if set(resource_tags.items()).issubset(set(vpc_tags.items())): + if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())): found_vpcs.append(vpc) found_vpc = None @@ -309,7 +309,7 @@ def create_vpc(module, vpc_conn): vpc_spec_tags = module.params.get('resource_tags') vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id})) - if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())): + if vpc_spec_tags and not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())): new_tags = {} for (key, value) in set(vpc_spec_tags.items()):