diff --git a/.gitmodules b/.gitmodules index 3f14953ec8f..793522a29c6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +1,12 @@ [submodule "lib/ansible/modules/core"] path = lib/ansible/modules/core - url = https://github.com/ansible/ansible-modules-core.git - branch = devel + url = https://github.com/ansible/ansible-modules-core [submodule "lib/ansible/modules/extras"] path = lib/ansible/modules/extras - url = https://github.com/ansible/ansible-modules-extras.git - branch = devel -[submodule "v2/ansible/modules/core"] - path = v2/ansible/modules/core - url = https://github.com/ansible/ansible-modules-core.git - branch = devel -[submodule "v2/ansible/modules/extras"] - path = v2/ansible/modules/extras - url = https://github.com/ansible/ansible-modules-extras.git - branch = devel + url = https://github.com/ansible/ansible-modules-extras +[submodule "v1/ansible/modules/core"] + path = v1/ansible/modules/core + url = https://github.com/ansible/ansible-modules-core +[submodule "v1/ansible/modules/extras"] + path = v1/ansible/modules/extras + url = https://github.com/ansible/ansible-modules-extras diff --git a/.travis.yml b/.travis.yml index 6e18e06050c..335a8e58e31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,20 @@ sudo: false language: python env: + - TOKENV=py24 - TOXENV=py26 - TOXENV=py27 +addons: + apt: + sources: + - deadsnakes + packages: + - python2.4 install: - - pip install tox + - pip install tox PyYAML Jinja2 sphinx script: - - tox +- if test x"$TOKENV" != x'py24' ; then tox ; fi +- if test x"$TOKENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi + #- make -C docsite all after_success: - coveralls diff --git a/CHANGELOG.md b/CHANGELOG.md index 98006503692..fdf0e4aa846 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,19 +4,47 @@ Ansible Changes By Release ## 2.0 "TBD" - ACTIVE DEVELOPMENT Major Changes: + * Introducing the new block/rescue/always directives, allow for making task blocks and introducing exception like semantics + * New strategy plugins, allow to control the flow of execution of tasks per play, the default will be the same as before + * Improved error handling, now you get much more detailed parser messages. General exception handling and display has been revamped. + * Task includes now get evaluated during execution, end behaviour will be the same but it now allows for more dynamic includes and options. + * First feature of the more dynamic includes is that with_ loops are now usable with them. + * callback, connection and lookup plugin APIs have changed, some will require modification to work with new version + * callbacks are now shipped in the active directory and don't need to be copied, just whitelisted in ansible.cfg + * Many API changes, this will break those currently using it directly, but the new API is much easier to use and test + * Settings are now more inheritable, what you set at play, block or role will be automatically inhertited by the contained. + This allows for new features to automatically be settable at all levels, previously we had to manually code this + * Many more tests, new API makes things more testable and we took advantage of it * big_ip modules now support turning off ssl certificate validation (use only for self signed) - * template code now retains types for bools and Numbers instead of turning them into strings + * template code now retains types for bools and numbers instead of turning them into strings. If you need the old behaviour, quote the value and it will get passed around as a string + * Consiidated code from modules using urllib2 to normalize features, TLS and SNI support Deprecated Modules (new ones in parens): * ec2_ami_search (ec2_ami_find) * quantum_network (os_network) + * glance_image * nova_compute (os_server) + * quantum_floating_ip (os_floating_ip) New Modules: - * find - * ec2_ami_find - * ec2_win_password + * amazon: ec2_ami_copy + * amazon: ec2_ami_find + * amazon: ec2_eni + * amazon: ec2_eni_facts + * amazon: ec2_vpc_net + * amazon: ec2_vpc_route_table_facts + * amazon: ec2_vpc_subnet + * amazon: ec2_win_password + * amazon: elasticache_subnet_group + * amazon: iam + * amazon: iam_policy + * amazon: route53_zone + * amazon: sts_assume_role + * amazon: s3_logging + * apk + * bundler + * centurylink: clc_publicip * circonus_annotation * consul * consul_acl @@ -25,46 +53,131 @@ New Modules: * cloudtrail * cloudstack: cs_account * cloudstack: cs_affinitygroup + * cloudstack: cs_facts * cloudstack: cs_firewall * cloudstack: cs_iso * cloudstack: cs_instance * cloudstack: cs_instancegroup + * cloudstack: cs_network * cloudstack: cs_portforward + * cloudstack: cs_project * cloudstack: cs_sshkeypair * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule + * cloudstack: cs_staticnat + * cloudstack: cs_template * cloudstack: cs_vmsnapshot + * datadog_monitor + * dpkg_selections + * elasticsearch_plugin + * expect + * find + * hall + * libvirt: virt_net + * libvirt: virt_pool * maven_artifact + * openstack: os_ironic + * openstack: os_ironic_node + * openstack: os_client_config + * openstack: os_floating_ip + * openstack: os_image * openstack: os_network + * openstack: os_nova_flavor + * openstack: os_object + * openstack: os_security_group + * openstack: os_security_group_rule * openstack: os_server * openstack: os_server_actions * openstack: os_server_facts * openstack: os_server_volume * openstack: os_subnet * openstack: os_volume + * osx_defaults + * pam_limits + * pear + * profitbricks: profitbricks * proxmox + * proxmox_template + * puppet * pushover * pushbullet + * rax: rax_mon_alarm + * rax: rax_mon_check + * rax: rax_mon_entity + * rax: rax_mon_notification + * rax: rax_mon_notification_plan * rabbitmq_binding * rabbitmq_exchange * rabbitmq_queue - * zabbix_host - * zabbix_hostmacro - * zabbix_screen + * selinux_permissive + * sensu_check + * sensu_subscription + * slackpkg * vertica_configuration * vertica_facts * vertica_role * vertica_schema * vertica_user - * vmware_datacenter + * vmware: vmware_datacenter + * vmware: vca_fw + * vmware: vca_nat + * vmware: vsphere_copy + * webfaction_app + * webfaction_db + * webfaction_domain + * webfaction_mailbox + * webfaction_site * win_environment + * win_scheduled_task + * win_iis_virtualdirectory + * win_iis_webapplication + * win_iis_webapppool + * win_iis_webbinding + * win_iis_website + * win_regedit + * win_unzip + * xenserver_facts + * zabbix_host + * zabbix_hostmacro + * zabbix_screen New Inventory scripts: * cloudstack * fleetctl + * openvz + * proxmox + * serf Other Notable Changes: +## 1.9.2 "Dancing In the Street" - Jun 26, 2015 + +* Security fixes to check that hostnames match certificates with https urls (CVE-2015-3908) + - get_url and uri modules + - url and etcd lookup plugins +* Security fixes to the zone (Solaris containers), jail (bsd containers), + and chroot connection plugins. These plugins can be used to connect to + their respective container types in leiu of the standard ssh connection. + Prior to this fix being applied these connection plugins didn't properly + handle symlinks within the containers which could lead to files intended to + be written to or read from the container being written to or read from the + host system instead. (CVE pending) +* Fixed a bug in the service module where init scripts were being incorrectly used instead of upstart/systemd. +* Fixed a bug where sudo/su settings were not inherited from ansible.cfg correctly. +* Fixed a bug in the rds module where a traceback may occur due to an unbound variable. +* Fixed a bug where certain remote file systems where the SELinux context was not being properly set. +* Re-enabled several windows modules which had been partially merged (via action plugins): + - win_copy.ps1 + - win_copy.py + - win_file.ps1 + - win_file.py + - win_template.py +* Fix bug using with_sequence and a count that is zero. Also allows counting backwards isntead of forwards +* Fix get_url module bug preventing use of custom ports with https urls +* Fix bug disabling repositories in the yum module. +* Fix giving yum module a url to install a package from on RHEL/CENTOS5 +* Fix bug in dnf module preventing it from working when yum-utils was not already installed + ## 1.9.1 "Dancing In the Street" - Apr 27, 2015 * Fixed a bug related to Kerberos auth when using winrm with a domain account. @@ -99,7 +212,7 @@ Major changes: * Added travis integration to github for basic tests, this should speed up ticket triage and merging. * environment: directive now can also be applied to play and is inhertited by tasks, which can still override it. * expanded facts and OS/distribution support for existing facts and improved performance with pypy. -* new 'wantlist' option to lookups allows for selecting a list typed variable vs a command delimited string as the return. +* new 'wantlist' option to lookups allows for selecting a list typed variable vs a comma delimited string as the return. * the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes). * allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules). * sped up YAML parsing in ansible by up to 25% by switching to CParser loader. @@ -313,7 +426,7 @@ And various other bug fixes and improvements ... - Fixes a bug in vault where the password file option was not being used correctly internally. - Improved multi-line parsing when using YAML literal blocks (using > or |). - Fixed a bug with the file module and the creation of relative symlinks. -- Fixed a bug where checkmode was not being honored during the templating of files. +- Fixed a bug where checkmode was not being honoured during the templating of files. - Other various bug fixes. ## 1.7.1 "Summer Nights" - Aug 14, 2014 @@ -356,7 +469,7 @@ New Modules: Other notable changes: * Security fixes - - Prevent the use of lookups when using legaxy "{{ }}" syntax around variables and with_* loops. + - Prevent the use of lookups when using legacy "{{ }}" syntax around variables and with_* loops. - Remove relative paths in TAR-archived file names used by ansible-galaxy. * Inventory speed improvements for very large inventories. * Vault password files can now be executable, to support scripts that fetch the vault password. @@ -1033,7 +1146,7 @@ the variable is still registered for the host, with the attribute skipped: True. * service pattern argument now correctly read for BSD services * fetch location can now be controlled more directly via the 'flat' parameter. * added basename and dirname as Jinja2 filters available to all templates -* pip works better when sudoing from unpriveledged users +* pip works better when sudoing from unprivileged users * fix for user creation with groups specification reporting 'changed' incorrectly in some cases * fix for some unicode encoding errors in outputing some data in verbose mode * improved FreeBSD, NetBSD and Solaris facts diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index ac252d54146..094501db906 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,6 +1,13 @@ ##### Issue Type: -Can you help us out in labelling this by telling us what kind of ticket this this? You can say “Bug Report”, “Feature Idea”, “Feature Pull Request”, “New Module Pull Request”, “Bugfix Pull Request”, “Documentation Report”, or “Docs Pull Request”. +Can you help us out in labelling this by telling us what kind of ticket this this? You can say: + - Bug Report + - Feature Idea + - Feature Pull Request + - New Module Pull Request + - Bugfix Pull Request + - Documentation Report + - Docs Pull Request ##### Ansible Version: diff --git a/MANIFEST.in b/MANIFEST.in index f4e727d8c4d..b9bf5f42764 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,9 +10,12 @@ include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 recursive-include lib/ansible/modules * recursive-include docs * -recursive-include plugins * include Makefile include VERSION include MANIFEST.in +include contrib/README.md +include contrib/inventory * +exclude lib/ansible/modules/core/.git* +exclude lib/ansible/modules/extras/.git* prune lib/ansible/modules/core/.git prune lib/ansible/modules/extras/.git diff --git a/Makefile b/Makefile index e01e1a9713c..69d749b7194 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,11 @@ RELEASE := $(shell cat VERSION | cut -f2 -d' ') # Get the branch information from git ifneq ($(shell which git),) GIT_DATE := $(shell git log -n 1 --format="%ai") +GIT_HASH := $(shell git log -n 1 --format="%h") +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.]//g') +GITINFO = .$(GIT_HASH).$(GIT_BRANCH) +else +GITINFO = '' endif ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1) @@ -62,7 +67,7 @@ ifeq ($(OFFICIAL),yes) DEBUILD_OPTS += -k$(DEBSIGN_KEYID) endif else - DEB_RELEASE = 0.git$(DATE) + DEB_RELEASE = 0.git$(DATE)$(GITINFO) # Do not sign unofficial builds DEBUILD_OPTS += -uc -us DPUT_OPTS += -u @@ -78,7 +83,7 @@ RPMSPEC = $(RPMSPECDIR)/ansible.spec RPMDIST = $(shell rpm --eval '%{?dist}') RPMRELEASE = $(RELEASE) ifneq ($(OFFICIAL),yes) - RPMRELEASE = 0.git$(DATE) + RPMRELEASE = 0.git$(DATE)$(GITINFO) endif RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)" @@ -136,7 +141,7 @@ clean: @echo "Cleaning up byte compiled python stuff" find . -type f -regex ".*\.py[co]$$" -delete @echo "Cleaning up editor backup files" - find . -type f \( -name "*~" -or -name "#*" \) -delete + find . -type f \( -name "*~" -or -name "#*" \) |grep -v test/units/inventory_test_data/group_vars/noparse/all.yml~ |xargs -n 1024 -r rm find . -type f \( -name "*.swp" \) -delete @echo "Cleaning up manpage stuff" find ./docs/man -type f -name "*.xml" -delete diff --git a/README.md b/README.md index 2a7d8e03af7..cea24c84772 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) -[![PyPI downloads](https://pypip.in/d/ansible/badge.png)](https://pypi.python.org/pypi/ansible) +[![PyPI version](https://badge.fury.io/py/ansible.svg)](http://badge.fury.io/py/ansible) +[![PyPI downloads](https://pypip.in/d/ansible/badge.svg)](https://pypi.python.org/pypi/ansible) [![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=devel)](https://travis-ci.org/ansible/ansible) @@ -51,7 +51,7 @@ Branch Info Authors ======= -Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 900 users (and growing). Thanks everyone! +Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 1000 users (and growing). Thanks everyone! Ansible is sponsored by [Ansible, Inc](http://ansible.com) diff --git a/bin/ansible b/bin/ansible index 7fec34ec81e..209b235c88d 100755 --- a/bin/ansible +++ b/bin/ansible @@ -18,6 +18,8 @@ # along with Ansible. If not, see . ######################################################## +from __future__ import (absolute_import, print_function) +__metaclass__ = type __requires__ = ['ansible'] try: @@ -32,176 +34,77 @@ except Exception: import os import sys +import traceback -from ansible.runner import Runner -import ansible.constants as C -from ansible import utils -from ansible import errors -from ansible import callbacks -from ansible import inventory -######################################################## - -class Cli(object): - ''' code behind bin/ansible ''' - - # ---------------------------------------------- - - def __init__(self): - self.stats = callbacks.AggregateStats() - self.callbacks = callbacks.CliRunnerCallbacks() - if C.DEFAULT_LOAD_CALLBACK_PLUGINS: - callbacks.load_callback_plugins() - - # ---------------------------------------------- - - def parse(self): - ''' create an options parser for bin/ansible ''' - - parser = utils.base_parser( - constants=C, - runas_opts=True, - subset_opts=True, - async_opts=True, - output_opts=True, - connect_opts=True, - check_opts=True, - diff_opts=False, - usage='%prog [options]' - ) - - parser.add_option('-a', '--args', dest='module_args', - help="module arguments", default=C.DEFAULT_MODULE_ARGS) - parser.add_option('-m', '--module-name', dest='module_name', - help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, - default=C.DEFAULT_MODULE_NAME) - - options, args = parser.parse_args() - self.callbacks.options = options - - if len(args) == 0 or len(args) > 1: - parser.print_help() - sys.exit(1) - - # privlege escalation command line arguments need to be mutually exclusive - utils.check_mutually_exclusive_privilege(options, parser) - - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - - return (options, args) - - # ---------------------------------------------- - - def run(self, options, args): - ''' use Runner lib to do SSH things ''' - - pattern = args[0] - - sshpass = becomepass = vault_pass = become_method = None - - # Never ask for an SSH password when we run with local connection - if options.connection == "local": - options.ask_pass = False - else: - options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - - options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS +from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError +from ansible.utils.display import Display - # become - utils.normalize_become_options(options) - prompt_method = utils.choose_pass_prompt(options) - (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method) +######################################## +### OUTPUT OF LAST RESORT ### +class LastResort(object): + def display(self, msg): + print(msg, file=sys.stderr) - # read vault_pass from a file - if not options.ask_vault_pass and options.vault_password_file: - vault_pass = utils.read_vault_file(options.vault_password_file) + def error(self, msg, wrap_text=None): + print(msg, file=sys.stderr) - extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) - inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass) - if options.subset: - inventory_manager.subset(options.subset) - hosts = inventory_manager.list_hosts(pattern) - - if len(hosts) == 0: - callbacks.display("No hosts matched", stderr=True) - sys.exit(0) - - if options.listhosts: - for host in hosts: - callbacks.display(' %s' % host) - sys.exit(0) - - if options.module_name in ['command','shell'] and not options.module_args: - callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True) - sys.exit(1) - - if options.tree: - utils.prepare_writeable_dir(options.tree) - - runner = Runner( - module_name=options.module_name, - module_path=options.module_path, - module_args=options.module_args, - remote_user=options.remote_user, - remote_pass=sshpass, - inventory=inventory_manager, - timeout=options.timeout, - private_key_file=options.private_key_file, - forks=options.forks, - pattern=pattern, - callbacks=self.callbacks, - transport=options.connection, - subset=options.subset, - check=options.check, - diff=options.check, - vault_pass=vault_pass, - become=options.become, - become_method=options.become_method, - become_pass=becomepass, - become_user=options.become_user, - extra_vars=extra_vars, - ) - - if options.seconds: - callbacks.display("background launch...\n\n", color='cyan') - results, poller = runner.run_async(options.seconds) - results = self.poll_while_needed(poller, options) - else: - results = runner.run() - - return (runner, results) - - # ---------------------------------------------- - - def poll_while_needed(self, poller, options): - ''' summarize results from Runner ''' - - # BACKGROUND POLL LOGIC when -B and -P are specified - if options.seconds and options.poll_interval > 0: - poller.wait(options.seconds, options.poll_interval) - - return poller.results - - -######################################################## +######################################## if __name__ == '__main__': - callbacks.display("", log_only=True) - callbacks.display(" ".join(sys.argv), log_only=True) - callbacks.display("", log_only=True) - cli = Cli() - (options, args) = cli.parse() + display = LastResort() + cli = None + me = os.path.basename(sys.argv[0]) + try: - (runner, results) = cli.run(options, args) - for result in results['contacted'].values(): - if 'failed' in result or result.get('rc', 0) != 0: - sys.exit(2) - if results['dark']: - sys.exit(3) - except errors.AnsibleError, e: - # Generic handler for ansible specific errors - callbacks.display("ERROR: %s" % str(e), stderr=True, color='red') + display = Display() + + if me == 'ansible-playbook': + from ansible.cli.playbook import PlaybookCLI as mycli + elif me == 'ansible': + from ansible.cli.adhoc import AdHocCLI as mycli + elif me == 'ansible-pull': + from ansible.cli.pull import PullCLI as mycli + elif me == 'ansible-doc': + from ansible.cli.doc import DocCLI as mycli + elif me == 'ansible-vault': + from ansible.cli.vault import VaultCLI as mycli + elif me == 'ansible-galaxy': + from ansible.cli.galaxy import GalaxyCLI as mycli + + cli = mycli(sys.argv, display=display) + if cli: + cli.parse() + sys.exit(cli.run()) + else: + raise AnsibleError("Program not implemented: %s" % me) + + except AnsibleOptionsError as e: + cli.parser.print_help() + display.error(str(e), wrap_text=False) + sys.exit(5) + except AnsibleParserError as e: + display.error(str(e), wrap_text=False) + sys.exit(4) +# TQM takes care of these, but leaving comment to reserve the exit codes +# except AnsibleHostUnreachable as e: +# display.error(str(e)) +# sys.exit(3) +# except AnsibleHostFailed as e: +# display.error(str(e)) +# sys.exit(2) + except AnsibleError as e: + display.error(str(e), wrap_text=False) sys.exit(1) - + except KeyboardInterrupt: + display.error("User interrupted execution") + sys.exit(99) + except Exception as e: + have_cli_options = cli is not None and cli.options is not None + display.error("Unexpected Exception: %s" % str(e), wrap_text=False) + if not have_cli_options or have_cli_options and cli.options.verbosity > 2: + display.display("the full traceback was:\n\n%s" % traceback.format_exc()) + else: + display.display("to see the full traceback, use -vvv") + sys.exit(250) diff --git a/bin/ansible-doc b/bin/ansible-doc deleted file mode 100755 index dff7cecce79..00000000000 --- a/bin/ansible-doc +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import os -import sys -import textwrap -import re -import optparse -import datetime -import subprocess -import fcntl -import termios -import struct - -from ansible import utils -from ansible.utils import module_docs -import ansible.constants as C -from ansible.utils import version -import traceback - -MODULEDIR = C.DEFAULT_MODULE_PATH - -BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') -IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"] - -_ITALIC = re.compile(r"I\(([^)]+)\)") -_BOLD = re.compile(r"B\(([^)]+)\)") -_MODULE = re.compile(r"M\(([^)]+)\)") -_URL = re.compile(r"U\(([^)]+)\)") -_CONST = re.compile(r"C\(([^)]+)\)") -PAGER = 'less' -LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) - # -S (chop long lines) -X (disable termcap init and de-init) - -def pager_print(text): - ''' just print text ''' - print text - -def pager_pipe(text, cmd): - ''' pipe text through a pager ''' - if 'LESS' not in os.environ: - os.environ['LESS'] = LESS_OPTS - try: - cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) - cmd.communicate(input=text) - except IOError: - pass - except KeyboardInterrupt: - pass - -def pager(text): - ''' find reasonable way to display text ''' - # this is a much simpler form of what is in pydoc.py - if not sys.stdout.isatty(): - pager_print(text) - elif 'PAGER' in os.environ: - if sys.platform == 'win32': - pager_print(text) - else: - pager_pipe(text, os.environ['PAGER']) - elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: - pager_pipe(text, 'less') - else: - pager_print(text) - -def tty_ify(text): - - t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' - t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* - t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] - t = _URL.sub(r"\1", t) # U(word) => word - t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' - - return t - -def get_man_text(doc): - - opt_indent=" " - text = [] - text.append("> %s\n" % doc['module'].upper()) - - desc = " ".join(doc['description']) - - text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" ")) - - if 'option_keys' in doc and len(doc['option_keys']) > 0: - text.append("Options (= is mandatory):\n") - - for o in sorted(doc['option_keys']): - opt = doc['options'][o] - - if opt.get('required', False): - opt_leadin = "=" - else: - opt_leadin = "-" - - text.append("%s %s" % (opt_leadin, o)) - - desc = " ".join(opt['description']) - - if 'choices' in opt: - choices = ", ".join(str(i) for i in opt['choices']) - desc = desc + " (Choices: " + choices + ")" - if 'default' in opt: - default = str(opt['default']) - desc = desc + " [Default: " + default + "]" - text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent, - subsequent_indent=opt_indent)) - - if 'notes' in doc and len(doc['notes']) > 0: - notes = " ".join(doc['notes']) - text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ", - subsequent_indent=opt_indent)) - - - if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: - req = ", ".join(doc['requirements']) - text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ", - subsequent_indent=opt_indent)) - - if 'examples' in doc and len(doc['examples']) > 0: - text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) - for ex in doc['examples']: - text.append("%s\n" % (ex['code'])) - - if 'plainexamples' in doc and doc['plainexamples'] is not None: - text.append("EXAMPLES:") - text.append(doc['plainexamples']) - if 'returndocs' in doc and doc['returndocs'] is not None: - text.append("RETURN VALUES:") - text.append(doc['returndocs']) - text.append('') - - return "\n".join(text) - - -def get_snippet_text(doc): - - text = [] - desc = tty_ify(" ".join(doc['short_description'])) - text.append("- name: %s" % (desc)) - text.append(" action: %s" % (doc['module'])) - - for o in sorted(doc['options'].keys()): - opt = doc['options'][o] - desc = tty_ify(" ".join(opt['description'])) - - if opt.get('required', False): - s = o + "=" - else: - s = o - - text.append(" %-20s # %s" % (s, desc)) - text.append('') - - return "\n".join(text) - -def get_module_list_text(module_list): - tty_size = 0 - if os.isatty(0): - tty_size = struct.unpack('HHHH', - fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1] - columns = max(60, tty_size) - displace = max(len(x) for x in module_list) - linelimit = columns - displace - 5 - text = [] - deprecated = [] - for module in sorted(set(module_list)): - - if module in module_docs.BLACKLIST_MODULES: - continue - - filename = utils.plugins.module_finder.find_plugin(module) - - if filename is None: - continue - if filename.endswith(".ps1"): - continue - if os.path.isdir(filename): - continue - - try: - doc, plainexamples, returndocs = module_docs.get_docstring(filename) - desc = tty_ify(doc.get('short_description', '?')).strip() - if len(desc) > linelimit: - desc = desc[:linelimit] + '...' - - if module.startswith('_'): # Handle deprecated - deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) - else: - text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) - except: - traceback.print_exc() - sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) - - if len(deprecated) > 0: - text.append("\nDEPRECATED:") - text.extend(deprecated) - return "\n".join(text) - -def find_modules(path, module_list): - - if os.path.isdir(path): - for module in os.listdir(path): - if module.startswith('.'): - continue - elif os.path.isdir(module): - find_modules(module, module_list) - elif any(module.endswith(x) for x in BLACKLIST_EXTS): - continue - elif module.startswith('__'): - continue - elif module in IGNORE_FILES: - continue - elif module.startswith('_'): - fullpath = '/'.join([path,module]) - if os.path.islink(fullpath): # avoids aliases - continue - - module = os.path.splitext(module)[0] # removes the extension - module_list.append(module) - -def main(): - - p = optparse.OptionParser( - version=version("%prog"), - usage='usage: %prog [options] [module...]', - description='Show Ansible module documentation', - ) - - p.add_option("-M", "--module-path", - action="store", - dest="module_path", - default=MODULEDIR, - help="Ansible modules/ directory") - p.add_option("-l", "--list", - action="store_true", - default=False, - dest='list_dir', - help='List available modules') - p.add_option("-s", "--snippet", - action="store_true", - default=False, - dest='show_snippet', - help='Show playbook snippet for specified module(s)') - p.add_option('-v', action='version', help='Show version number and exit') - - (options, args) = p.parse_args() - - if options.module_path is not None: - for i in options.module_path.split(os.pathsep): - utils.plugins.module_finder.add_directory(i) - - if options.list_dir: - # list modules - paths = utils.plugins.module_finder._get_paths() - module_list = [] - for path in paths: - find_modules(path, module_list) - - pager(get_module_list_text(module_list)) - sys.exit() - - if len(args) == 0: - p.print_help() - - def print_paths(finder): - ''' Returns a string suitable for printing of the search path ''' - - # Uses a list to get the order right - ret = [] - for i in finder._get_paths(): - if i not in ret: - ret.append(i) - return os.pathsep.join(ret) - - text = '' - for module in args: - - filename = utils.plugins.module_finder.find_plugin(module) - if filename is None: - sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder))) - continue - - if any(filename.endswith(x) for x in BLACKLIST_EXTS): - continue - - try: - doc, plainexamples, returndocs = module_docs.get_docstring(filename) - except: - traceback.print_exc() - sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) - continue - - if doc is not None: - - all_keys = [] - for (k,v) in doc['options'].iteritems(): - all_keys.append(k) - all_keys = sorted(all_keys) - doc['option_keys'] = all_keys - - doc['filename'] = filename - doc['docuri'] = doc['module'].replace('_', '-') - doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') - doc['plainexamples'] = plainexamples - doc['returndocs'] = returndocs - - if options.show_snippet: - text += get_snippet_text(doc) - else: - text += get_man_text(doc) - else: - # this typically means we couldn't even parse the docstring, not just that the YAML is busted, - # probably a quoting issue. - sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module) - pager(text) - -if __name__ == '__main__': - main() diff --git a/bin/ansible-doc b/bin/ansible-doc new file mode 120000 index 00000000000..cabb1f519aa --- /dev/null +++ b/bin/ansible-doc @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy deleted file mode 100755 index a6d625671ec..00000000000 --- a/bin/ansible-galaxy +++ /dev/null @@ -1,957 +0,0 @@ -#!/usr/bin/env python - -######################################################################## -# -# (C) 2013, James Cammarata -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -######################################################################## - -import datetime -import json -import os -import os.path -import shutil -import subprocess -import sys -import tarfile -import tempfile -import urllib -import urllib2 -import yaml - -from collections import defaultdict -from distutils.version import LooseVersion -from jinja2 import Environment -from optparse import OptionParser - -import ansible.constants as C -import ansible.utils -from ansible.errors import AnsibleError - -default_meta_template = """--- -galaxy_info: - author: {{ author }} - description: {{description}} - company: {{ company }} - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: {{ issue_tracker_url }} - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: {{ license }} - min_ansible_version: {{ min_ansible_version }} - # - # Below are all platforms currently available. Just uncomment - # the ones that apply to your role. If you don't see your - # platform on this list, let us know and we'll get it added! - # - #platforms: - {%- for platform,versions in platforms.iteritems() %} - #- name: {{ platform }} - # versions: - # - all - {%- for version in versions %} - # - {{ version }} - {%- endfor %} - {%- endfor %} - # - # Below are all categories currently available. Just as with - # the platforms above, uncomment those that apply to your role. - # - #categories: - {%- for category in categories %} - #- {{ category.name }} - {%- endfor %} -dependencies: [] - # List your role dependencies here, one per line. - # Be sure to remove the '[]' above if you add dependencies - # to this list. - {% for dependency in dependencies %} - #- {{ dependency }} - {% endfor %} - -""" - -default_readme_template = """Role Name -========= - -A brief description of the role goes here. - -Requirements ------------- - -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. - -Role Variables --------------- - -A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. - -Dependencies ------------- - -A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. - -Example Playbook ----------------- - -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } - -License -------- - -BSD - -Author Information ------------------- - -An optional section for the role authors to include contact information, or a website (HTML is not allowed). -""" - -#------------------------------------------------------------------------------------- -# Utility functions for parsing actions/options -#------------------------------------------------------------------------------------- - -VALID_ACTIONS = ("init", "info", "install", "list", "remove") -SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - -def get_action(args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - return arg - return None - -def build_option_parser(action): - """ - Builds an option parser object based on the action - the user wants to execute. - """ - - usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS) - epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) - OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(usage=usage, epilog=epilog) - - if not action: - parser.print_help() - sys.exit() - - # options for all actions - # - none yet - - # options specific to actions - if action == "info": - parser.set_usage("usage: %prog info [options] role_name[,version]") - elif action == "init": - parser.set_usage("usage: %prog init [options] role_name") - parser.add_option( - '-p', '--init-path', dest='init_path', default="./", - help='The path in which the skeleton role will be created. ' - 'The default is the current working directory.') - parser.add_option( - '--offline', dest='offline', default=False, action='store_true', - help="Don't query the galaxy API when creating roles") - elif action == "install": - parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") - parser.add_option( - '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, - help='Ignore errors and continue with the next specified role.') - parser.add_option( - '-n', '--no-deps', dest='no_deps', action='store_true', default=False, - help='Don\'t download roles listed as dependencies') - parser.add_option( - '-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') - elif action == "remove": - parser.set_usage("usage: %prog remove role1 role2 ...") - elif action == "list": - parser.set_usage("usage: %prog list [role_name]") - - # options that apply to more than one action - if action != "init": - parser.add_option( - '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, - help='The path to the directory containing your roles. ' - 'The default is the roles_path configured in your ' - 'ansible.cfg file (/etc/ansible/roles if not configured)') - - if action in ("info","init","install"): - parser.add_option( - '-s', '--server', dest='api_server', default="galaxy.ansible.com", - help='The API server destination') - - if action in ("init","install"): - parser.add_option( - '-f', '--force', dest='force', action='store_true', default=False, - help='Force overwriting an existing role') - # done, return the parser - return parser - -def get_opt(options, k, defval=""): - """ - Returns an option from an Optparse values instance. - """ - try: - data = getattr(options, k) - except: - return defval - if k == "roles_path": - if os.pathsep in data: - data = data.split(os.pathsep)[0] - return data - -def exit_without_ignore(options, rc=1): - """ - Exits with the specified return code unless the - option --ignore-errors was specified - """ - - if not get_opt(options, "ignore_errors", False): - print '- you can use --ignore-errors to skip failed roles.' - sys.exit(rc) - - -#------------------------------------------------------------------------------------- -# Galaxy API functions -#------------------------------------------------------------------------------------- - -def api_get_config(api_server): - """ - Fetches the Galaxy API current version to ensure - the API server is up and reachable. - """ - - try: - url = 'https://%s/api/' % api_server - data = json.load(urllib2.urlopen(url)) - if not data.get("current_version",None): - return None - else: - return data - except: - return None - -def api_lookup_role_by_name(api_server, role_name, notify=True): - """ - Uses the Galaxy API to do a lookup on the role owner/name. - """ - - role_name = urllib.quote(role_name) - - try: - parts = role_name.split(".") - user_name = ".".join(parts[0:-1]) - role_name = parts[-1] - if notify: - print "- downloading role '%s', owned by %s" % (role_name, user_name) - except: - parser.print_help() - print "- invalid role name (%s). Specify role as format: username.rolename" % role_name - sys.exit(1) - - url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name) - try: - data = json.load(urllib2.urlopen(url)) - if len(data["results"]) == 0: - return None - else: - return data["results"][0] - except: - return None - -def api_fetch_role_related(api_server, related, role_id): - """ - Uses the Galaxy API to fetch the list of related items for - the given role. The url comes from the 'related' field of - the role. - """ - - try: - url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related) - data = json.load(urllib2.urlopen(url)) - results = data['results'] - done = (data.get('next', None) == None) - while not done: - url = 'https://%s%s' % (api_server, data['next']) - print url - data = json.load(urllib2.urlopen(url)) - results += data['results'] - done = (data.get('next', None) == None) - return results - except: - return None - -def api_get_list(api_server, what): - """ - Uses the Galaxy API to fetch the list of items specified. - """ - - try: - url = 'https://%s/api/v1/%s/?page_size' % (api_server, what) - data = json.load(urllib2.urlopen(url)) - if "results" in data: - results = data['results'] - else: - results = data - done = True - if "next" in data: - done = (data.get('next', None) == None) - while not done: - url = 'https://%s%s' % (api_server, data['next']) - print url - data = json.load(urllib2.urlopen(url)) - results += data['results'] - done = (data.get('next', None) == None) - return results - except: - print "- failed to download the %s list" % what - return None - -#------------------------------------------------------------------------------------- -# scm repo utility functions -#------------------------------------------------------------------------------------- - -def scm_archive_role(scm, role_url, role_version, role_name): - if scm not in ['hg', 'git']: - print "- scm %s is not currently supported" % scm - return False - tempdir = tempfile.mkdtemp() - clone_cmd = [scm, 'clone', role_url, role_name] - with open('/dev/null', 'w') as devnull: - try: - print "- executing: %s" % " ".join(clone_cmd) - popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull) - except: - raise AnsibleError("error executing: %s" % " ".join(clone_cmd)) - rc = popen.wait() - if rc != 0: - print "- command %s failed" % ' '.join(clone_cmd) - print " in directory %s" % tempdir - return False - - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar') - if scm == 'hg': - archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name] - if role_version: - archive_cmd.extend(['-r', role_version]) - archive_cmd.append(temp_file.name) - if scm == 'git': - archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name] - if role_version: - archive_cmd.append(role_version) - else: - archive_cmd.append('HEAD') - - with open('/dev/null', 'w') as devnull: - print "- executing: %s" % " ".join(archive_cmd) - popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name), - stderr=devnull, stdout=devnull) - rc = popen.wait() - if rc != 0: - print "- command %s failed" % ' '.join(archive_cmd) - print " in directory %s" % tempdir - return False - - shutil.rmtree(tempdir, ignore_errors=True) - - return temp_file.name - - -#------------------------------------------------------------------------------------- -# Role utility functions -#------------------------------------------------------------------------------------- - -def get_role_path(role_name, options): - """ - Returns the role path based on the roles_path option - and the role name. - """ - roles_path = get_opt(options,'roles_path') - roles_path = os.path.join(roles_path, role_name) - roles_path = os.path.expanduser(roles_path) - return roles_path - -def get_role_metadata(role_name, options): - """ - Returns the metadata as YAML, if the file 'meta/main.yml' - exists in the specified role_path - """ - role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml') - try: - if os.path.isfile(role_path): - f = open(role_path, 'r') - meta_data = yaml.safe_load(f) - f.close() - return meta_data - else: - return None - except: - return None - -def get_galaxy_install_info(role_name, options): - """ - Returns the YAML data contained in 'meta/.galaxy_install_info', - if it exists. - """ - - try: - info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') - if os.path.isfile(info_path): - f = open(info_path, 'r') - info_data = yaml.safe_load(f) - f.close() - return info_data - else: - return None - except: - return None - -def write_galaxy_install_info(role_name, role_version, options): - """ - Writes a YAML-formatted file to the role's meta/ directory - (named .galaxy_install_info) which contains some information - we can use later for commands like 'list' and 'info'. - """ - - info = dict( - version = role_version, - install_date = datetime.datetime.utcnow().strftime("%c"), - ) - try: - info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') - f = open(info_path, 'w+') - info_data = yaml.safe_dump(info, f) - f.close() - except: - return False - return True - - -def remove_role(role_name, options): - """ - Removes the specified role from the roles path. There is a - sanity check to make sure there's a meta/main.yml file at this - path so the user doesn't blow away random directories - """ - if get_role_metadata(role_name, options): - role_path = get_role_path(role_name, options) - shutil.rmtree(role_path) - return True - else: - return False - -def fetch_role(role_name, target, role_data, options): - """ - Downloads the archived role from github to a temp location, extracts - it, and then copies the extracted role to the role library path. - """ - - # first grab the file and save it to a temp location - if '://' in role_name: - archive_url = role_name - else: - archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target) - print "- downloading role from %s" % archive_url - - try: - url_file = urllib2.urlopen(archive_url) - temp_file = tempfile.NamedTemporaryFile(delete=False) - data = url_file.read() - while data: - temp_file.write(data) - data = url_file.read() - temp_file.close() - return temp_file.name - except Exception, e: - # TODO: better urllib2 error handling for error - # messages that are more exact - print "- error: failed to download the file." - return False - -def install_role(role_name, role_version, role_filename, options): - # the file is a tar, so open it that way and extract it - # to the specified (or default) roles directory - - if not tarfile.is_tarfile(role_filename): - print "- error: the file downloaded was not a tar.gz" - return False - else: - if role_filename.endswith('.gz'): - role_tar_file = tarfile.open(role_filename, "r:gz") - else: - role_tar_file = tarfile.open(role_filename, "r") - # verify the role's meta file - meta_file = None - members = role_tar_file.getmembers() - # next find the metadata file - for member in members: - if "/meta/main.yml" in member.name: - meta_file = member - break - if not meta_file: - print "- error: this role does not appear to have a meta/main.yml file." - return False - else: - try: - meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file)) - except: - print "- error: this role does not appear to have a valid meta/main.yml file." - return False - - # we strip off the top-level directory for all of the files contained within - # the tar file here, since the default is 'github_repo-target', and change it - # to the specified role's name - role_path = os.path.join(get_opt(options, 'roles_path'), role_name) - role_path = os.path.expanduser(role_path) - print "- extracting %s to %s" % (role_name, role_path) - try: - if os.path.exists(role_path): - if not os.path.isdir(role_path): - print "- error: the specified roles path exists and is not a directory." - return False - elif not get_opt(options, "force", False): - print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name - return False - else: - # using --force, remove the old path - if not remove_role(role_name, options): - print "- error: %s doesn't appear to contain a role." % role_path - print " please remove this directory manually if you really want to put the role here." - return False - else: - os.makedirs(role_path) - - # now we do the actual extraction to the role_path - for member in members: - # we only extract files, and remove any relative path - # bits that might be in the file for security purposes - # and drop the leading directory, as mentioned above - if member.isreg() or member.issym(): - parts = member.name.split("/")[1:] - final_parts = [] - for part in parts: - if part != '..' and '~' not in part and '$' not in part: - final_parts.append(part) - member.name = os.path.join(*final_parts) - role_tar_file.extract(member, role_path) - - # write out the install info file for later use - write_galaxy_install_info(role_name, role_version, options) - except OSError, e: - print "- error: you do not have permission to modify files in %s" % role_path - return False - - # return the parsed yaml metadata - print "- %s was installed successfully" % role_name - return meta_file_data - -#------------------------------------------------------------------------------------- -# Action functions -#------------------------------------------------------------------------------------- - -def execute_init(args, options, parser): - """ - Executes the init action, which creates the skeleton framework - of a role that complies with the galaxy metadata format. - """ - - init_path = get_opt(options, 'init_path', './') - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - force = get_opt(options, 'force', False) - offline = get_opt(options, 'offline', False) - - if not offline: - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - try: - role_name = args.pop(0).strip() - if role_name == "": - raise Exception("") - role_path = os.path.join(init_path, role_name) - if os.path.exists(role_path): - if os.path.isfile(role_path): - print "- the path %s already exists, but is a file - aborting" % role_path - sys.exit(1) - elif not force: - print "- the directory %s already exists." % role_path - print " you can use --force to re-initialize this directory,\n" + \ - " however it will reset any main.yml files that may have\n" + \ - " been modified there already." - sys.exit(1) - except Exception, e: - parser.print_help() - print "- no role name specified for init" - sys.exit(1) - - ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') - - # create the default README.md - if not os.path.exists(role_path): - os.makedirs(role_path) - readme_path = os.path.join(role_path, "README.md") - f = open(readme_path, "wb") - f.write(default_readme_template) - f.close - - for dir in ROLE_DIRS: - dir_path = os.path.join(init_path, role_name, dir) - main_yml_path = os.path.join(dir_path, 'main.yml') - # create the directory if it doesn't exist already - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - # now create the main.yml file for that directory - if dir == "meta": - # create a skeleton meta/main.yml with a valid galaxy_info - # datastructure in place, plus with all of the available - # tags/platforms included (but commented out) and the - # dependencies section - platforms = [] - if not offline: - platforms = api_get_list(api_server, "platforms") or [] - categories = [] - if not offline: - categories = api_get_list(api_server, "categories") or [] - - # group the list of platforms from the api based - # on their names, with the release field being - # appended to a list of versions - platform_groups = defaultdict(list) - for platform in platforms: - platform_groups[platform['name']].append(platform['release']) - platform_groups[platform['name']].sort() - - inject = dict( - author = 'your name', - company = 'your company (optional)', - license = 'license (GPLv2, CC-BY, etc)', - issue_tracker_url = 'http://example.com/issue/tracker', - min_ansible_version = '1.2', - platforms = platform_groups, - categories = categories, - ) - rendered_meta = Environment().from_string(default_meta_template).render(inject) - f = open(main_yml_path, 'w') - f.write(rendered_meta) - f.close() - pass - elif dir not in ('files','templates'): - # just write a (mostly) empty YAML file for main.yml - f = open(main_yml_path, 'w') - f.write('---\n# %s file for %s\n' % (dir,role_name)) - f.close() - print "- %s was created successfully" % role_name - -def execute_info(args, options, parser): - """ - Executes the info action. This action prints out detailed - information about an installed role as well as info available - from the galaxy API. - """ - - if len(args) == 0: - # the user needs to specify a role - parser.print_help() - print "- you must specify a user/role name" - sys.exit(1) - - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - api_config = api_get_config(api_server) - roles_path = get_opt(options, "roles_path") - - for role in args: - - role_info = {} - - install_info = get_galaxy_install_info(role, options) - if install_info: - if 'version' in install_info: - install_info['intalled_version'] = install_info['version'] - del install_info['version'] - role_info.update(install_info) - - remote_data = api_lookup_role_by_name(api_server, role, False) - if remote_data: - role_info.update(remote_data) - - metadata = get_role_metadata(role, options) - if metadata: - role_info.update(metadata) - - role_spec = ansible.utils.role_spec_parse(role) - if role_spec: - role_info.update(role_spec) - - if role_info: - print "- %s:" % (role) - for k in sorted(role_info.keys()): - - if k in SKIP_INFO_KEYS: - continue - - if isinstance(role_info[k], dict): - print "\t%s: " % (k) - for key in sorted(role_info[k].keys()): - if key in SKIP_INFO_KEYS: - continue - print "\t\t%s: %s" % (key, role_info[k][key]) - else: - print "\t%s: %s" % (k, role_info[k]) - else: - print "- the role %s was not found" % role - -def execute_install(args, options, parser): - """ - Executes the installation action. The args list contains the - roles to be installed, unless -f was specified. The list of roles - can be a name (which will be downloaded via the galaxy API and github), - or it can be a local .tar.gz file. - """ - - role_file = get_opt(options, "role_file", None) - - if len(args) == 0 and role_file is None: - # the user needs to specify one of either --role-file - # or specify a single user/role name - parser.print_help() - print "- you must specify a user/role name or a roles file" - sys.exit() - elif len(args) == 1 and not role_file is None: - # using a role file is mutually exclusive of specifying - # the role name on the command line - parser.print_help() - print "- please specify a user/role name, or a roles file, but not both" - sys.exit(1) - - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - no_deps = get_opt(options, "no_deps", False) - roles_path = get_opt(options, "roles_path") - - roles_done = [] - if role_file: - f = open(role_file, 'r') - if role_file.endswith('.yaml') or role_file.endswith('.yml'): - roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) - else: - # roles listed in a file, one per line - roles_left = map(ansible.utils.role_spec_parse, f.readlines()) - f.close() - else: - # roles were specified directly, so we'll just go out grab them - # (and their dependencies, unless the user doesn't want us to). - roles_left = map(ansible.utils.role_spec_parse, args) - - while len(roles_left) > 0: - # query the galaxy API for the role data - role_data = None - role = roles_left.pop(0) - role_src = role.get("src") - role_scm = role.get("scm") - role_path = role.get("path") - - if role_path: - options.roles_path = role_path - else: - options.roles_path = roles_path - - if os.path.isfile(role_src): - # installing a local tar.gz - tmp_file = role_src - else: - if role_scm: - # create tar file from scm url - tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name")) - elif '://' in role_src: - # just download a URL - version will probably be in the URL - tmp_file = fetch_role(role_src, None, None, options) - else: - # installing from galaxy - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - role_data = api_lookup_role_by_name(api_server, role_src) - if not role_data: - print "- sorry, %s was not found on %s." % (role_src, api_server) - exit_without_ignore(options) - continue - - role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) - if "version" not in role or role['version'] == '': - # convert the version names to LooseVersion objects - # and sort them to get the latest version. If there - # are no versions in the list, we'll grab the head - # of the master branch - if len(role_versions) > 0: - loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] - loose_versions.sort() - role["version"] = str(loose_versions[-1]) - else: - role["version"] = 'master' - elif role['version'] != 'master': - if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]: - print 'role is %s' % role - print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions) - exit_without_ignore(options) - continue - - # download the role. if --no-deps was specified, we stop here, - # otherwise we recursively grab roles and all of their deps. - tmp_file = fetch_role(role_src, role["version"], role_data, options) - installed = False - if tmp_file: - installed = install_role(role.get("name"), role.get("version"), tmp_file, options) - # we're done with the temp file, clean it up - if tmp_file != role_src: - os.unlink(tmp_file) - # install dependencies, if we want them - if not no_deps and installed: - if not role_data: - role_data = get_role_metadata(role.get("name"), options) - role_dependencies = role_data['dependencies'] - else: - role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) - for dep in role_dependencies: - if isinstance(dep, basestring): - dep = ansible.utils.role_spec_parse(dep) - else: - dep = ansible.utils.role_yaml_parse(dep) - if not get_role_metadata(dep["name"], options): - if dep not in roles_left: - print '- adding dependency: %s' % dep["name"] - roles_left.append(dep) - else: - print '- dependency %s already pending installation.' % dep["name"] - else: - print '- dependency %s is already installed, skipping.' % dep["name"] - if not tmp_file or not installed: - print "- %s was NOT installed successfully." % role.get("name") - exit_without_ignore(options) - sys.exit(0) - -def execute_remove(args, options, parser): - """ - Executes the remove action. The args list contains the list - of roles to be removed. This list can contain more than one role. - """ - - if len(args) == 0: - parser.print_help() - print '- you must specify at least one role to remove.' - sys.exit() - - for role in args: - if get_role_metadata(role, options): - if remove_role(role, options): - print '- successfully removed %s' % role - else: - print "- failed to remove role: %s" % role - else: - print '- %s is not installed, skipping.' % role - sys.exit(0) - -def execute_list(args, options, parser): - """ - Executes the list action. The args list can contain zero - or one role. If one is specified, only that role will be - shown, otherwise all roles in the specified directory will - be shown. - """ - - if len(args) > 1: - print "- please specify only one role to list, or specify no roles to see a full list" - sys.exit(1) - - if len(args) == 1: - # show only the request role, if it exists - role_name = args[0] - metadata = get_role_metadata(role_name, options) - if metadata: - install_info = get_galaxy_install_info(role_name, options) - version = None - if install_info: - version = install_info.get("version", None) - if not version: - version = "(unknown version)" - # show some more info about single roles here - print "- %s, %s" % (role_name, version) - else: - print "- the role %s was not found" % role_name - else: - # show all valid roles in the roles_path directory - roles_path = get_opt(options, 'roles_path') - roles_path = os.path.expanduser(roles_path) - if not os.path.exists(roles_path): - parser.print_help() - print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path - sys.exit(1) - elif not os.path.isdir(roles_path): - print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path - parser.print_help() - sys.exit(1) - path_files = os.listdir(roles_path) - for path_file in path_files: - if get_role_metadata(path_file, options): - install_info = get_galaxy_install_info(path_file, options) - version = None - if install_info: - version = install_info.get("version", None) - if not version: - version = "(unknown version)" - print "- %s, %s" % (path_file, version) - sys.exit(0) - -#------------------------------------------------------------------------------------- -# The main entry point -#------------------------------------------------------------------------------------- - -def main(): - # parse the CLI options - action = get_action(sys.argv) - parser = build_option_parser(action) - (options, args) = parser.parse_args() - - # execute the desired action - if 1: #try: - fn = globals()["execute_%s" % action] - fn(args, options, parser) - #except KeyError, e: - # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS)) - # sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy new file mode 120000 index 00000000000..cabb1f519aa --- /dev/null +++ b/bin/ansible-galaxy @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-playbook b/bin/ansible-playbook deleted file mode 100755 index 3d6e1f9f402..00000000000 --- a/bin/ansible-playbook +++ /dev/null @@ -1,330 +0,0 @@ -#!/usr/bin/env python -# (C) 2012, Michael DeHaan, - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -####################################################### - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - -import sys -import os -import stat - -# Augment PYTHONPATH to find Python modules relative to this file path -# This is so that we can find the modules when running from a local checkout -# installed as editable with `pip install -e ...` or `python setup.py develop` -local_module_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', 'lib') -) -sys.path.append(local_module_path) - -import ansible.playbook -import ansible.constants as C -import ansible.utils.template -from ansible import errors -from ansible import callbacks -from ansible import utils -from ansible.color import ANSIBLE_COLOR, stringc -from ansible.callbacks import display - -def colorize(lead, num, color): - """ Print 'lead' = 'num' in 'color' """ - if num != 0 and ANSIBLE_COLOR and color is not None: - return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color)) - else: - return "%s=%-4s" % (lead, str(num)) - -def hostcolor(host, stats, color=True): - if ANSIBLE_COLOR and color: - if stats['failures'] != 0 or stats['unreachable'] != 0: - return "%-37s" % stringc(host, 'red') - elif stats['changed'] != 0: - return "%-37s" % stringc(host, 'yellow') - else: - return "%-37s" % stringc(host, 'green') - return "%-26s" % host - - -def main(args): - ''' run ansible-playbook operations ''' - - # create parser for CLI options - parser = utils.base_parser( - constants=C, - usage = "%prog playbook.yml", - connect_opts=True, - runas_opts=True, - subset_opts=True, - check_opts=True, - diff_opts=True - ) - #parser.add_option('--vault-password', dest="vault_password", - # help="password for vault encrypted files") - parser.add_option('-t', '--tags', dest='tags', default='all', - help="only run plays and tasks tagged with these values") - parser.add_option('--skip-tags', dest='skip_tags', - help="only run plays and tasks whose tags do not match these values") - parser.add_option('--syntax-check', dest='syntax', action='store_true', - help="perform a syntax check on the playbook, but do not execute it") - parser.add_option('--list-tasks', dest='listtasks', action='store_true', - help="list all tasks that would be executed") - parser.add_option('--list-tags', dest='listtags', action='store_true', - help="list all available tags") - parser.add_option('--step', dest='step', action='store_true', - help="one-step-at-a-time: confirm each task before running") - parser.add_option('--start-at-task', dest='start_at', - help="start the playbook at the task matching this name") - parser.add_option('--force-handlers', dest='force_handlers', - default=C.DEFAULT_FORCE_HANDLERS, action='store_true', - help="run handlers even if a task fails") - parser.add_option('--flush-cache', dest='flush_cache', action='store_true', - help="clear the fact cache") - - options, args = parser.parse_args(args) - - if len(args) == 0: - parser.print_help(file=sys.stderr) - return 1 - - # privlege escalation command line arguments need to be mutually exclusive - utils.check_mutually_exclusive_privilege(options, parser) - - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - - sshpass = None - becomepass = None - vault_pass = None - - options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - - if options.listhosts or options.syntax or options.listtasks or options.listtags: - (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass) - else: - options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - # Never ask for an SSH password when we run with local connection - if options.connection == "local": - options.ask_pass = False - - # set pe options - utils.normalize_become_options(options) - prompt_method = utils.choose_pass_prompt(options) - (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, - become_ask_pass=options.become_ask_pass, - ask_vault_pass=options.ask_vault_pass, - become_method=prompt_method) - - # read vault_pass from a file - if not options.ask_vault_pass and options.vault_password_file: - vault_pass = utils.read_vault_file(options.vault_password_file) - - extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) - - only_tags = options.tags.split(",") - skip_tags = options.skip_tags - if options.skip_tags is not None: - skip_tags = options.skip_tags.split(",") - - for playbook in args: - if not os.path.exists(playbook): - raise errors.AnsibleError("the playbook: %s could not be found" % playbook) - if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): - raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook) - - inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass) - - # Note: slightly wrong, this is written so that implicit localhost - # (which is not returned in list_hosts()) is taken into account for - # warning if inventory is empty. But it can't be taken into account for - # checking if limit doesn't match any hosts. Instead we don't worry about - # limit if only implicit localhost was in inventory to start with. - # - # Fix this in v2 - no_hosts = False - if len(inventory.list_hosts()) == 0: - # Empty inventory - utils.warning("provided hosts list is empty, only localhost is available") - no_hosts = True - inventory.subset(options.subset) - if len(inventory.list_hosts()) == 0 and no_hosts is False: - # Invalid limit - raise errors.AnsibleError("Specified --limit does not match any hosts") - - # run all playbooks specified on the command line - for playbook in args: - - stats = callbacks.AggregateStats() - playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY) - if options.step: - playbook_cb.step = options.step - if options.start_at: - playbook_cb.start_at = options.start_at - runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY) - - pb = ansible.playbook.PlayBook( - playbook=playbook, - module_path=options.module_path, - inventory=inventory, - forks=options.forks, - remote_user=options.remote_user, - remote_pass=sshpass, - callbacks=playbook_cb, - runner_callbacks=runner_cb, - stats=stats, - timeout=options.timeout, - transport=options.connection, - become=options.become, - become_method=options.become_method, - become_user=options.become_user, - become_pass=becomepass, - extra_vars=extra_vars, - private_key_file=options.private_key_file, - only_tags=only_tags, - skip_tags=skip_tags, - check=options.check, - diff=options.diff, - vault_password=vault_pass, - force_handlers=options.force_handlers, - ) - - if options.flush_cache: - display(callbacks.banner("FLUSHING FACT CACHE")) - pb.SETUP_CACHE.flush() - - if options.listhosts or options.listtasks or options.syntax or options.listtags: - print '' - print 'playbook: %s' % playbook - print '' - playnum = 0 - for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs): - playnum += 1 - play = ansible.playbook.Play(pb, play_ds, play_basedir, - vault_password=pb.vault_password) - label = play.name - hosts = pb.inventory.list_hosts(play.hosts) - - if options.listhosts: - print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts)) - for host in hosts: - print ' %s' % host - - if options.listtags or options.listtasks: - print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags)))) - - if options.listtags: - tags = [] - for task in pb.tasks_to_run_in_play(play): - tags.extend(task.tags) - print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged'])))) - - if options.listtasks: - - for task in pb.tasks_to_run_in_play(play): - if getattr(task, 'name', None) is not None: - # meta tasks have no names - print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged'])))) - - if options.listhosts or options.listtasks or options.listtags: - print '' - continue - - if options.syntax: - # if we've not exited by now then we are fine. - print 'Playbook Syntax is fine' - return 0 - - failed_hosts = [] - unreachable_hosts = [] - - try: - - pb.run() - - hosts = sorted(pb.stats.processed.keys()) - display(callbacks.banner("PLAY RECAP")) - playbook_cb.on_stats(pb.stats) - - for h in hosts: - t = pb.stats.summarize(h) - if t['failures'] > 0: - failed_hosts.append(h) - if t['unreachable'] > 0: - unreachable_hosts.append(h) - - retries = failed_hosts + unreachable_hosts - - if C.RETRY_FILES_ENABLED and len(retries) > 0: - filename = pb.generate_retry_inventory(retries) - if filename: - display(" to retry, use: --limit @%s\n" % filename) - - for h in hosts: - t = pb.stats.summarize(h) - - display("%s : %s %s %s %s" % ( - hostcolor(h, t), - colorize('ok', t['ok'], 'green'), - colorize('changed', t['changed'], 'yellow'), - colorize('unreachable', t['unreachable'], 'red'), - colorize('failed', t['failures'], 'red')), - screen_only=True - ) - - display("%s : %s %s %s %s" % ( - hostcolor(h, t, False), - colorize('ok', t['ok'], None), - colorize('changed', t['changed'], None), - colorize('unreachable', t['unreachable'], None), - colorize('failed', t['failures'], None)), - log_only=True - ) - - - print "" - if len(failed_hosts) > 0: - return 2 - if len(unreachable_hosts) > 0: - return 3 - - except errors.AnsibleError, e: - display("ERROR: %s" % e, color='red') - return 1 - - return 0 - - -if __name__ == "__main__": - display(" ", log_only=True) - display(" ".join(sys.argv), log_only=True) - display(" ", log_only=True) - try: - sys.exit(main(sys.argv[1:])) - except errors.AnsibleError, e: - display("ERROR: %s" % e, color='red', stderr=True) - sys.exit(1) - except KeyboardInterrupt, ke: - display("ERROR: interrupted", color='red', stderr=True) - sys.exit(1) diff --git a/bin/ansible-playbook b/bin/ansible-playbook new file mode 120000 index 00000000000..cabb1f519aa --- /dev/null +++ b/bin/ansible-playbook @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-pull b/bin/ansible-pull deleted file mode 100755 index d4887631e0f..00000000000 --- a/bin/ansible-pull +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Stephen Fromm -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# ansible-pull is a script that runs ansible in local mode -# after checking out a playbooks directory from source repo. There is an -# example playbook to bootstrap this script in the examples/ dir which -# installs ansible and sets it up to run on cron. - -# usage: -# ansible-pull -d /var/lib/ansible \ -# -U http://example.net/content.git [-C production] \ -# [path/playbook.yml] -# -# the -d and -U arguments are required; the -C argument is optional. -# -# ansible-pull accepts an optional argument to specify a playbook -# location underneath the workdir and then searches the source repo -# for playbooks in the following order, stopping at the first match: -# -# 1. $workdir/path/playbook.yml, if specified -# 2. $workdir/$fqdn.yml -# 3. $workdir/$hostname.yml -# 4. $workdir/local.yml -# -# the source repo must contain at least one of these playbooks. - -import os -import shutil -import sys -import datetime -import socket -import random -import time -from ansible import utils -from ansible.utils import cmd_functions -from ansible import errors -from ansible import inventory - -DEFAULT_REPO_TYPE = 'git' -DEFAULT_PLAYBOOK = 'local.yml' -PLAYBOOK_ERRORS = {1: 'File does not exist', - 2: 'File is not readable'} - -VERBOSITY=0 - -def increment_debug(option, opt, value, parser): - global VERBOSITY - VERBOSITY += 1 - -def try_playbook(path): - if not os.path.exists(path): - return 1 - if not os.access(path, os.R_OK): - return 2 - return 0 - - -def select_playbook(path, args): - playbook = None - if len(args) > 0 and args[0] is not None: - playbook = "%s/%s" % (path, args[0]) - rc = try_playbook(playbook) - if rc != 0: - print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc]) - return None - return playbook - else: - fqdn = socket.getfqdn() - hostpb = "%s/%s.yml" % (path, fqdn) - shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0]) - localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK) - errors = [] - for pb in [hostpb, shorthostpb, localpb]: - rc = try_playbook(pb) - if rc == 0: - playbook = pb - break - else: - errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc])) - if playbook is None: - print >>sys.stderr, "\n".join(errors) - return playbook - - -def main(args): - """ Set up and run a local playbook """ - usage = "%prog [options] [playbook.yml]" - parser = utils.SortedOptParser(usage=usage) - parser.add_option('--purge', default=False, action='store_true', - help='purge checkout after playbook run') - parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', - help='only run the playbook if the repository has been updated') - parser.add_option('-s', '--sleep', dest='sleep', default=None, - help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests') - parser.add_option('-f', '--force', dest='force', default=False, - action='store_true', - help='run the playbook even if the repository could ' - 'not be updated') - parser.add_option('-d', '--directory', dest='dest', default=None, - help='directory to checkout repository to') - #parser.add_option('-l', '--live', default=True, action='store_live', - # help='Print the ansible-playbook output while running') - parser.add_option('-U', '--url', dest='url', default=None, - help='URL of the playbook repository') - parser.add_option('-C', '--checkout', dest='checkout', - help='branch/tag/commit to checkout. ' - 'Defaults to behavior of repository module.') - parser.add_option('-i', '--inventory-file', dest='inventory', - help="location of the inventory host file") - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) - parser.add_option('-v', '--verbose', default=False, action="callback", - callback=increment_debug, - help='Pass -vvvv to ansible-playbook') - parser.add_option('-m', '--module-name', dest='module_name', - default=DEFAULT_REPO_TYPE, - help='Module name used to check out repository. ' - 'Default is %s.' % DEFAULT_REPO_TYPE) - parser.add_option('--vault-password-file', dest='vault_password_file', - help="vault password file") - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password') - parser.add_option('-t', '--tags', dest='tags', default=False, - help='only run plays and tasks tagged with these values') - parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', - help='adds the hostkey for the repo url if not already added') - parser.add_option('--key-file', dest='key_file', - help="Pass '-i ' to the SSH arguments used by git.") - options, args = parser.parse_args(args) - - hostname = socket.getfqdn() - if not options.dest: - # use a hostname dependent directory, in case of $HOME on nfs - options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname) - - options.dest = os.path.abspath(options.dest) - - if not options.url: - parser.error("URL for repository not specified, use -h for help") - return 1 - - now = datetime.datetime.now() - print now.strftime("Starting ansible-pull at %F %T") - - # Attempt to use the inventory passed in as an argument - # It might not yet have been downloaded so use localhost if note - if not options.inventory or not os.path.exists(options.inventory): - inv_opts = 'localhost,' - else: - inv_opts = options.inventory - limit_opts = 'localhost:%s:127.0.0.1' % hostname - repo_opts = "name=%s dest=%s" % (options.url, options.dest) - - if VERBOSITY == 0: - base_opts = '-c local --limit "%s"' % limit_opts - elif VERBOSITY > 0: - debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ]) - base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts) - - if options.checkout: - repo_opts += ' version=%s' % options.checkout - - # Only git module is supported - if options.module_name == DEFAULT_REPO_TYPE: - if options.accept_host_key: - repo_opts += ' accept_hostkey=yes' - - if options.key_file: - repo_opts += ' key_file=%s' % options.key_file - - path = utils.plugins.module_finder.find_plugin(options.module_name) - if path is None: - sys.stderr.write("module '%s' not found.\n" % options.module_name) - return 1 - - bin_path = os.path.dirname(os.path.abspath(__file__)) - cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( - bin_path, inv_opts, base_opts, options.module_name, repo_opts - ) - - for ev in options.extra_vars: - cmd += ' -e "%s"' % ev - - if options.sleep: - try: - secs = random.randint(0,int(options.sleep)); - except ValueError: - parser.error("%s is not a number." % options.sleep) - return 1 - - print >>sys.stderr, "Sleeping for %d seconds..." % secs - time.sleep(secs); - - - # RUN THe CHECKOUT COMMAND - rc, out, err = cmd_functions.run_cmd(cmd, live=True) - - if rc != 0: - if options.force: - print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook." - else: - return rc - elif options.ifchanged and '"changed": true' not in out: - print "Repository has not changed, quitting." - return 0 - - playbook = select_playbook(options.dest, args) - - if playbook is None: - print >>sys.stderr, "Could not find a playbook to run." - return 1 - - cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) - if options.vault_password_file: - cmd += " --vault-password-file=%s" % options.vault_password_file - if options.inventory: - cmd += ' -i "%s"' % options.inventory - for ev in options.extra_vars: - cmd += ' -e "%s"' % ev - if options.ask_sudo_pass: - cmd += ' -K' - if options.tags: - cmd += ' -t "%s"' % options.tags - os.chdir(options.dest) - - # RUN THE PLAYBOOK COMMAND - rc, out, err = cmd_functions.run_cmd(cmd, live=True) - - if options.purge: - os.chdir('/') - try: - shutil.rmtree(options.dest) - except Exception, e: - print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e)) - - return rc - -if __name__ == '__main__': - try: - sys.exit(main(sys.argv[1:])) - except KeyboardInterrupt, e: - print >>sys.stderr, "Exit on user request.\n" - sys.exit(1) diff --git a/bin/ansible-pull b/bin/ansible-pull new file mode 120000 index 00000000000..cabb1f519aa --- /dev/null +++ b/bin/ansible-pull @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-vault b/bin/ansible-vault deleted file mode 100755 index 22cfc0e1487..00000000000 --- a/bin/ansible-vault +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, James Tanner -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# ansible-vault is a script that encrypts/decrypts YAML files. See -# http://docs.ansible.com/playbooks_vault.html for more details. - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - -import os -import sys -import traceback - -import ansible.constants as C - -from ansible import utils -from ansible import errors -from ansible.utils.vault import VaultEditor - -from optparse import OptionParser - -#------------------------------------------------------------------------------------- -# Utility functions for parsing actions/options -#------------------------------------------------------------------------------------- - -VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") - -def build_option_parser(action): - """ - Builds an option parser object based on the action - the user wants to execute. - """ - - usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS) - epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) - OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(usage=usage, epilog=epilog) - - if not action: - parser.print_help() - sys.exit() - - # options for all actions - #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use") - parser.add_option('--debug', dest='debug', action="store_true", help="debug") - parser.add_option('--vault-password-file', dest='password_file', - help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE) - - # options specific to actions - if action == "create": - parser.set_usage("usage: %prog create [options] file_name") - elif action == "decrypt": - parser.set_usage("usage: %prog decrypt [options] file_name") - elif action == "edit": - parser.set_usage("usage: %prog edit [options] file_name") - elif action == "view": - parser.set_usage("usage: %prog view [options] file_name") - elif action == "encrypt": - parser.set_usage("usage: %prog encrypt [options] file_name") - elif action == "rekey": - parser.set_usage("usage: %prog rekey [options] file_name") - - # done, return the parser - return parser - -def get_action(args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - return arg - return None - -def get_opt(options, k, defval=""): - """ - Returns an option from an Optparse values instance. - """ - try: - data = getattr(options, k) - except: - return defval - if k == "roles_path": - if os.pathsep in data: - data = data.split(os.pathsep)[0] - return data - -#------------------------------------------------------------------------------------- -# Command functions -#------------------------------------------------------------------------------------- - -def execute_create(args, options, parser): - if len(args) > 1: - raise errors.AnsibleError("'create' does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - this_editor = VaultEditor(cipher, password, args[0]) - this_editor.create_file() - -def execute_decrypt(args, options, parser): - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.decrypt_file() - - print "Decryption successful" - -def execute_edit(args, options, parser): - - if len(args) > 1: - raise errors.AnsibleError("edit does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.edit_file() - -def execute_view(args, options, parser): - - if len(args) > 1: - raise errors.AnsibleError("view does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.view_file() - -def execute_encrypt(args, options, parser): - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.encrypt_file() - - print "Encryption successful" - -def execute_rekey(args, options, parser): - - if not options.password_file: - password, __ = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True) - - cipher = None - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.rekey_file(new_password) - - print "Rekey successful" - -#------------------------------------------------------------------------------------- -# MAIN -#------------------------------------------------------------------------------------- - -def main(): - - action = get_action(sys.argv) - parser = build_option_parser(action) - (options, args) = parser.parse_args() - - if not len(args): - raise errors.AnsibleError( - "The '%s' command requires a filename as the first argument" % action - ) - - # execute the desired action - try: - fn = globals()["execute_%s" % action] - fn(args, options, parser) - except Exception, err: - if options.debug: - print traceback.format_exc() - print "ERROR:",err - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/bin/ansible-vault b/bin/ansible-vault new file mode 120000 index 00000000000..cabb1f519aa --- /dev/null +++ b/bin/ansible-vault @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/contrib/README.md b/contrib/README.md new file mode 100644 index 00000000000..dab0da4ba72 --- /dev/null +++ b/contrib/README.md @@ -0,0 +1,17 @@ +inventory +========= + +Inventory scripts allow you to store your hosts, groups, and variables in any way +you like. Examples include discovering inventory from EC2 or pulling it from +Cobbler. These could also be used to interface with LDAP or database. + +chmod +x an inventory plugin and either name it /etc/ansible/hosts or use ansible +with -i to designate the path to the script. You might also need to copy a configuration +file with the same name and/or set environment variables, the scripts or configuration +files have more details. + +contributions welcome +===================== + +Send in pull requests to add plugins of your own. The sky is the limit! + diff --git a/plugins/inventory/abiquo.ini b/contrib/inventory/abiquo.ini similarity index 100% rename from plugins/inventory/abiquo.ini rename to contrib/inventory/abiquo.ini diff --git a/plugins/inventory/abiquo.py b/contrib/inventory/abiquo.py similarity index 100% rename from plugins/inventory/abiquo.py rename to contrib/inventory/abiquo.py diff --git a/plugins/inventory/apache-libcloud.py b/contrib/inventory/apache-libcloud.py similarity index 96% rename from plugins/inventory/apache-libcloud.py rename to contrib/inventory/apache-libcloud.py index 95804095da9..151daeefe08 100755 --- a/plugins/inventory/apache-libcloud.py +++ b/contrib/inventory/apache-libcloud.py @@ -222,12 +222,17 @@ class LibcloudInventory(object): self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) ''' # Inventory: Group by key pair - if node.extra['keyname']: - self.push(self.inventory, self.to_safe('key_' + node.extra['keyname']), dest) + if node.extra['key_name']: + self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) # Inventory: Group by security group, quick thing to handle single sg - if node.extra['securitygroup']: - self.push(self.inventory, self.to_safe('sg_' + node.extra['securitygroup'][0]), dest) + if node.extra['security_group']: + self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) + + # Inventory: Group by tag + if node.extra['tags']: + for tagkey in node.extra['tags'].keys(): + self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) def get_host_info(self): ''' diff --git a/plugins/inventory/cloudstack.ini b/contrib/inventory/cloudstack.ini similarity index 100% rename from plugins/inventory/cloudstack.ini rename to contrib/inventory/cloudstack.ini diff --git a/plugins/inventory/cloudstack.py b/contrib/inventory/cloudstack.py similarity index 100% rename from plugins/inventory/cloudstack.py rename to contrib/inventory/cloudstack.py diff --git a/plugins/inventory/cobbler.ini b/contrib/inventory/cobbler.ini similarity index 100% rename from plugins/inventory/cobbler.ini rename to contrib/inventory/cobbler.ini diff --git a/plugins/inventory/cobbler.py b/contrib/inventory/cobbler.py similarity index 100% rename from plugins/inventory/cobbler.py rename to contrib/inventory/cobbler.py diff --git a/plugins/inventory/collins.ini b/contrib/inventory/collins.ini similarity index 100% rename from plugins/inventory/collins.ini rename to contrib/inventory/collins.ini diff --git a/plugins/inventory/collins.py b/contrib/inventory/collins.py similarity index 100% rename from plugins/inventory/collins.py rename to contrib/inventory/collins.py diff --git a/plugins/inventory/consul.ini b/contrib/inventory/consul.ini similarity index 100% rename from plugins/inventory/consul.ini rename to contrib/inventory/consul.ini diff --git a/plugins/inventory/consul_io.py b/contrib/inventory/consul_io.py similarity index 100% rename from plugins/inventory/consul_io.py rename to contrib/inventory/consul_io.py diff --git a/plugins/inventory/digital_ocean.ini b/contrib/inventory/digital_ocean.ini similarity index 66% rename from plugins/inventory/digital_ocean.ini rename to contrib/inventory/digital_ocean.ini index c4e3fe21419..021899731c4 100644 --- a/plugins/inventory/digital_ocean.ini +++ b/contrib/inventory/digital_ocean.ini @@ -3,12 +3,11 @@ [digital_ocean] -# The module needs your DigitalOcean Client ID and API Key. -# These may also be specified on the command line via --client-id and --api-key -# or via the environment variables DO_CLIENT_ID and DO_API_KEY +# The module needs your DigitalOcean API Token. +# It may also be specified on the command line via --api-token +# or via the environment variables DO_API_TOKEN or DO_API_KEY # -#client_id = abcdefg123456 -#api_key = 123456abcdefg +#api_token = 123456abcdefg # API calls to DigitalOcean may be slow. For this reason, we cache the results diff --git a/plugins/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py similarity index 55% rename from plugins/inventory/digital_ocean.py rename to contrib/inventory/digital_ocean.py index 1c3eccd21ed..1927f09fdf3 100755 --- a/plugins/inventory/digital_ocean.py +++ b/contrib/inventory/digital_ocean.py @@ -24,12 +24,12 @@ found. You can force this script to use the cache with --force-cache. Configuration is read from `digital_ocean.ini`, then from environment variables, then and command-line arguments. -Most notably, the DigitalOcean Client ID and API Key must be specified. They -can be specified in the INI file or with the following environment variables: - export DO_CLIENT_ID='DO123' DO_API_KEY='abc123' +Most notably, the DigitalOcean API Token must be specified. It can be specified +in the INI file or with the following environment variables: + export DO_API_TOKEN='abc123' or + export DO_API_KEY='abc123' -Alternatively, they can be passed on the command-line with --client-id and ---api-key. +Alternatively, it can be passed on the command-line with --api-token. If you specify DigitalOcean credentials in the INI file, a handy way to get them into your environment (e.g., to use the digital_ocean module) @@ -43,25 +43,31 @@ The following groups are generated from --list: - image_ID - image_NAME - distro_NAME (distribution NAME from image) - - region_ID - region_NAME - - size_ID - size_NAME - status_STATUS When run against a specific host, this script returns the following variables: + - do_backup_ids - do_created_at - - do_distroy + - do_disk + - do_features - list - do_id - - do_image - - do_image_id + - do_image - object - do_ip_address + - do_private_ip_address + - do_kernel - object + - do_locked + - de_memory - do_name - - do_region - - do_region_id - - do_size - - do_size_id + - do_networks - object + - do_next_backup_window + - do_region - object + - do_size - object + - do_size_slug + - do_snapshot_ids - list - do_status + - do_vcpus ----- ``` @@ -70,8 +76,9 @@ usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--ssh-keys] [--domains] [--pretty] [--cache-path CACHE_PATH] [--cache-max_age CACHE_MAX_AGE] - [--refresh-cache] [--client-id CLIENT_ID] - [--api-key API_KEY] + [--force-cache] + [--refresh-cache] + [--api-token API_TOKEN] Produce an Ansible Inventory file based on DigitalOcean credentials @@ -93,12 +100,11 @@ optional arguments: Path to the cache files (default: .) --cache-max_age CACHE_MAX_AGE Maximum age of the cached items (default: 0) + --force-cache Only use data from the cache --refresh-cache Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files) - --client-id CLIENT_ID, -c CLIENT_ID - DigitalOcean Client ID - --api-key API_KEY, -a API_KEY - DigitalOcean API Key + --api-token API_TOKEN, -a API_TOKEN + DigitalOcean API Token ``` ''' @@ -106,7 +112,7 @@ optional arguments: # (c) 2013, Evan Wies # # Inspired by the EC2 inventory plugin: -# https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # @@ -157,7 +163,6 @@ class DigitalOceanInventory(object): # DigitalOceanInventory data self.data = {} # All DigitalOcean data self.inventory = {} # Ansible Inventory - self.index = {} # Various indices of Droplet metadata # Define defaults self.cache_path = '.' @@ -169,49 +174,61 @@ class DigitalOceanInventory(object): self.read_cli_args() # Verify credentials were set - if not hasattr(self, 'client_id') or not hasattr(self, 'api_key'): - print '''Could not find values for DigitalOcean client_id and api_key. -They must be specified via either ini file, command line argument (--client-id and --api-key), -or environment variables (DO_CLIENT_ID and DO_API_KEY)''' + if not hasattr(self, 'api_token'): + print '''Could not find values for DigitalOcean api_token. +They must be specified via either ini file, command line argument (--api-token), +or environment variables (DO_API_TOKEN)''' sys.exit(-1) # env command, show DigitalOcean credentials if self.args.env: - print "DO_CLIENT_ID=%s DO_API_KEY=%s" % (self.client_id, self.api_key) + print "DO_API_TOKEN=%s" % self.api_token sys.exit(0) # Manage cache self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" self.cache_refreshed = False - if not self.args.force_cache and self.args.refresh_cache or not self.is_cache_valid(): - self.load_all_data_from_digital_ocean() - else: + if self.is_cache_valid: self.load_from_cache() if len(self.data) == 0: if self.args.force_cache: print '''Cache is empty and --force-cache was specified''' sys.exit(-1) - self.load_all_data_from_digital_ocean() - else: - # We always get fresh droplets for --list, --host, --all, and --droplets - # unless --force-cache is specified - if not self.args.force_cache and ( - self.args.list or self.args.host or self.args.all or self.args.droplets): - self.load_droplets_from_digital_ocean() + + self.manager = DoManager(None, self.api_token, api_version=2) # Pick the json_data to print based on the CLI command - if self.args.droplets: json_data = { 'droplets': self.data['droplets'] } - elif self.args.regions: json_data = { 'regions': self.data['regions'] } - elif self.args.images: json_data = { 'images': self.data['images'] } - elif self.args.sizes: json_data = { 'sizes': self.data['sizes'] } - elif self.args.ssh_keys: json_data = { 'ssh_keys': self.data['ssh_keys'] } - elif self.args.domains: json_data = { 'domains': self.data['domains'] } - elif self.args.all: json_data = self.data - - elif self.args.host: json_data = self.load_droplet_variables_for_host() + if self.args.droplets: + self.load_from_digital_ocean('droplets') + json_data = {'droplets': self.data['droplets']} + elif self.args.regions: + self.load_from_digital_ocean('regions') + json_data = {'regions': self.data['regions']} + elif self.args.images: + self.load_from_digital_ocean('images') + json_data = {'images': self.data['images']} + elif self.args.sizes: + self.load_from_digital_ocean('sizes') + json_data = {'sizes': self.data['sizes']} + elif self.args.ssh_keys: + self.load_from_digital_ocean('ssh_keys') + json_data = {'ssh_keys': self.data['ssh_keys']} + elif self.args.domains: + self.load_from_digital_ocean('domains') + json_data = {'domains': self.data['domains']} + elif self.args.all: + self.load_from_digital_ocean() + json_data = self.data + elif self.args.host: + json_data = self.load_droplet_variables_for_host() else: # '--list' this is last to make it default - json_data = self.inventory + self.load_from_digital_ocean('droplets') + self.build_inventory() + json_data = self.inventory + + if self.cache_refreshed: + self.write_to_cache() if self.args.pretty: print json.dumps(json_data, sort_keys=True, indent=2) @@ -230,10 +247,8 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') # Credentials - if config.has_option('digital_ocean', 'client_id'): - self.client_id = config.get('digital_ocean', 'client_id') - if config.has_option('digital_ocean', 'api_key'): - self.api_key = config.get('digital_ocean', 'api_key') + if config.has_option('digital_ocean', 'api_token'): + self.api_token = config.get('digital_ocean', 'api_token') # Cache related if config.has_option('digital_ocean', 'cache_path'): @@ -245,8 +260,10 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' def read_environment(self): ''' Reads the settings from environment variables ''' # Setup credentials - if os.getenv("DO_CLIENT_ID"): self.client_id = os.getenv("DO_CLIENT_ID") - if os.getenv("DO_API_KEY"): self.api_key = os.getenv("DO_API_KEY") + if os.getenv("DO_API_TOKEN"): + self.api_token = os.getenv("DO_API_TOKEN") + if os.getenv("DO_API_KEY"): + self.api_token = os.getenv("DO_API_KEY") def read_cli_args(self): @@ -269,70 +286,57 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') - parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') + parser.add_argument('--refresh-cache','-r', action='store_true', default=False, + help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') - parser.add_argument('--env','-e', action='store_true', help='Display DO_CLIENT_ID and DO_API_KEY') - parser.add_argument('--client-id','-c', action='store', help='DigitalOcean Client ID') - parser.add_argument('--api-key','-a', action='store', help='DigitalOcean API Key') + parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN') + parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token') self.args = parser.parse_args() - if self.args.client_id: self.client_id = self.args.client_id - if self.args.api_key: self.api_key = self.args.api_key - if self.args.cache_path: self.cache_path = self.args.cache_path - if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age + if self.args.api_token: + self.api_token = self.args.api_token # Make --list default if none of the other commands are specified - if (not self.args.droplets and not self.args.regions and not self.args.images and - not self.args.sizes and not self.args.ssh_keys and not self.args.domains and - not self.args.all and not self.args.host): - self.args.list = True + if (not self.args.droplets and not self.args.regions and + not self.args.images and not self.args.sizes and + not self.args.ssh_keys and not self.args.domains and + not self.args.all and not self.args.host): + self.args.list = True ########################################################################### # Data Management ########################################################################### - def load_all_data_from_digital_ocean(self): - ''' Use dopy to get all the information from DigitalOcean and save data in cache files ''' - manager = DoManager(self.client_id, self.api_key) - - self.data = {} - self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) - self.data['regions'] = self.sanitize_list(manager.all_regions()) - self.data['images'] = self.sanitize_list(manager.all_images(filter=None)) - self.data['sizes'] = self.sanitize_list(manager.sizes()) - self.data['ssh_keys'] = self.sanitize_list(manager.all_ssh_keys()) - self.data['domains'] = self.sanitize_list(manager.all_domains()) - - self.index = {} - self.index['region_to_name'] = self.build_index(self.data['regions'], 'id', 'name') - self.index['size_to_name'] = self.build_index(self.data['sizes'], 'id', 'name') - self.index['image_to_name'] = self.build_index(self.data['images'], 'id', 'name') - self.index['image_to_distro'] = self.build_index(self.data['images'], 'id', 'distribution') - self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) - - self.build_inventory() - - self.write_to_cache() - - - def load_droplets_from_digital_ocean(self): - ''' Use dopy to get droplet information from DigitalOcean and save data in cache files ''' - manager = DoManager(self.client_id, self.api_key) - self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) - self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) - self.build_inventory() - self.write_to_cache() - - - def build_index(self, source_seq, key_from, key_to, use_slug=True): - dest_dict = {} - for item in source_seq: - name = (use_slug and item.has_key('slug')) and item['slug'] or item[key_to] - key = item[key_from] - dest_dict[key] = name - return dest_dict + def load_from_digital_ocean(self, resource=None): + '''Get JSON from DigitalOcean API''' + if self.args.force_cache: + return + # We always get fresh droplets + if self.is_cache_valid() and not (resource=='droplets' or resource is None): + return + if self.args.refresh_cache: + resource=None + + if resource == 'droplets' or resource is None: + self.data['droplets'] = self.manager.all_active_droplets() + self.cache_refreshed = True + if resource == 'regions' or resource is None: + self.data['regions'] = self.manager.all_regions() + self.cache_refreshed = True + if resource == 'images' or resource is None: + self.data['images'] = self.manager.all_images(filter=None) + self.cache_refreshed = True + if resource == 'sizes' or resource is None: + self.data['sizes'] = self.manager.sizes() + self.cache_refreshed = True + if resource == 'ssh_keys' or resource is None: + self.data['ssh_keys'] = self.manager.all_ssh_keys() + self.cache_refreshed = True + if resource == 'domains' or resource is None: + self.data['domains'] = self.manager.all_domains() + self.cache_refreshed = True def build_inventory(self): @@ -341,74 +345,46 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' # add all droplets by id and name for droplet in self.data['droplets']: - dest = droplet['ip_address'] + #when using private_networking, the API reports the private one in "ip_address", which is useless. We need the public one for Ansible to work + if 'private_networking' in droplet['features']: + for net in droplet['networks']['v4']: + if net['type']=='public': + dest=net['ip_address'] + else: + continue + else: + dest = droplet['ip_address'] self.inventory[droplet['id']] = [dest] self.push(self.inventory, droplet['name'], dest) - self.push(self.inventory, 'region_'+droplet['region_id'], dest) - self.push(self.inventory, 'image_' +droplet['image_id'], dest) - self.push(self.inventory, 'size_' +droplet['size_id'], dest) - self.push(self.inventory, 'status_'+droplet['status'], dest) + self.push(self.inventory, 'region_' + droplet['region']['slug'], dest) + self.push(self.inventory, 'image_' + str(droplet['image']['id']), dest) + self.push(self.inventory, 'size_' + droplet['size']['slug'], dest) - region_name = self.index['region_to_name'].get(droplet['region_id']) - if region_name: - self.push(self.inventory, 'region_'+region_name, dest) - - size_name = self.index['size_to_name'].get(droplet['size_id']) - if size_name: - self.push(self.inventory, 'size_'+size_name, dest) - - image_name = self.index['image_to_name'].get(droplet['image_id']) - if image_name: - self.push(self.inventory, 'image_'+image_name, dest) + image_slug = droplet['image']['slug'] + if image_slug: + self.push(self.inventory, 'image_' + self.to_safe(image_slug), dest) + else: + image_name = droplet['image']['name'] + if image_name: + self.push(self.inventory, 'image_' + self.to_safe(image_name), dest) - distro_name = self.index['image_to_distro'].get(droplet['image_id']) - if distro_name: - self.push(self.inventory, 'distro_'+distro_name, dest) + self.push(self.inventory, 'distro_' + self.to_safe(droplet['image']['distribution']), dest) + self.push(self.inventory, 'status_' + droplet['status'], dest) def load_droplet_variables_for_host(self): '''Generate a JSON response to a --host call''' - host = self.to_safe(str(self.args.host)) + host = int(self.args.host) - if not host in self.index['host_to_droplet']: - # try updating cache - if not self.args.force_cache: - self.load_all_data_from_digital_ocean() - if not host in self.index['host_to_droplet']: - # host might not exist anymore - return {} - - droplet = None - if self.cache_refreshed: - for drop in self.data['droplets']: - if drop['ip_address'] == host: - droplet = self.sanitize_dict(drop) - break - else: - # Cache wasn't refreshed this run, so hit DigitalOcean API - manager = DoManager(self.client_id, self.api_key) - droplet_id = self.index['host_to_droplet'][host] - droplet = self.sanitize_dict(manager.show_droplet(droplet_id)) - - if not droplet: - return {} + droplet = self.manager.show_droplet(host) # Put all the information in a 'do_' namespace info = {} for k, v in droplet.items(): info['do_'+k] = v - # Generate user-friendly variables (i.e. not the ID's) - if droplet.has_key('region_id'): - info['do_region'] = self.index['region_to_name'].get(droplet['region_id']) - if droplet.has_key('size_id'): - info['do_size'] = self.index['size_to_name'].get(droplet['size_id']) - if droplet.has_key('image_id'): - info['do_image'] = self.index['image_to_name'].get(droplet['image_id']) - info['do_distro'] = self.index['image_to_distro'].get(droplet['image_id']) - - return info + return {'droplet': info} @@ -428,19 +404,21 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' def load_from_cache(self): ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' - cache = open(self.cache_filename, 'r') - json_data = cache.read() - cache.close() - data = json.loads(json_data) + try: + cache = open(self.cache_filename, 'r') + json_data = cache.read() + cache.close() + data = json.loads(json_data) + except IOError: + data = {'data': {}, 'inventory': {}} self.data = data['data'] self.inventory = data['inventory'] - self.index = data['index'] def write_to_cache(self): ''' Writes data in JSON format to a file ''' - data = { 'data': self.data, 'index': self.index, 'inventory': self.inventory } + data = { 'data': self.data, 'inventory': self.inventory } json_data = json.dumps(data, sort_keys=True, indent=2) cache = open(self.cache_filename, 'w') @@ -448,7 +426,6 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' cache.close() - ########################################################################### # Utilities ########################################################################### @@ -456,7 +433,7 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' if key in my_dict: - my_dict[key].append(element); + my_dict[key].append(element) else: my_dict[key] = [element] @@ -466,21 +443,6 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)''' return re.sub("[^A-Za-z0-9\-\.]", "_", word) - def sanitize_dict(self, d): - new_dict = {} - for k, v in d.items(): - if v != None: - new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) - return new_dict - - - def sanitize_list(self, seq): - new_seq = [] - for d in seq: - new_seq.append(self.sanitize_dict(d)) - return new_seq - - ########################################################################### # Run the script diff --git a/plugins/inventory/docker.py b/contrib/inventory/docker.py similarity index 100% rename from plugins/inventory/docker.py rename to contrib/inventory/docker.py diff --git a/plugins/inventory/docker.yml b/contrib/inventory/docker.yml similarity index 100% rename from plugins/inventory/docker.yml rename to contrib/inventory/docker.yml diff --git a/plugins/inventory/ec2.ini b/contrib/inventory/ec2.ini similarity index 75% rename from plugins/inventory/ec2.ini rename to contrib/inventory/ec2.ini index 1866f0bf3d6..50430ce0ed4 100644 --- a/plugins/inventory/ec2.ini +++ b/contrib/inventory/ec2.ini @@ -35,6 +35,9 @@ destination_variable = public_dns_name # private subnet, this should be set to 'private_ip_address', and Ansible must # be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. +# WARNING: - instances that are in the private vpc, _without_ public ip address +# will not be listed in the inventory until You set: +# vpc_destination_variable = 'private_ip_address' vpc_destination_variable = ip_address # To tag instances on EC2 with the resource records that point to them from @@ -44,6 +47,9 @@ route53 = False # To exclude RDS instances from the inventory, uncomment and set to False. #rds = False +# To exclude ElastiCache instances from the inventory, uncomment and set to False. +#elasticache = False + # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. # route53_excluded_zones = samplezone1.com, samplezone2.com @@ -52,10 +58,27 @@ route53 = False # 'all_instances' to True to return all instances regardless of state. all_instances = False +# By default, only EC2 instances in the 'running' state are returned. Specify +# EC2 instance states to return as a comma-separated list. This +# option is overriden when 'all_instances' is True. +# instance_states = pending, running, shutting-down, terminated, stopping, stopped + # By default, only RDS instances in the 'available' state are returned. Set # 'all_rds_instances' to True return all RDS instances regardless of state. all_rds_instances = False +# By default, only ElastiCache clusters and nodes in the 'available' state +# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' +# to True return all ElastiCache clusters and nodes, regardless of state. +# +# Note that all_elasticache_nodes only applies to listed clusters. That means +# if you set all_elastic_clusters to false, no node will be return from +# unavailable clusters, regardless of the state and to what you set for +# all_elasticache_nodes. +all_elasticache_replication_groups = False +all_elasticache_clusters = False +all_elasticache_nodes = False + # API calls to EC2 are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: @@ -86,12 +109,16 @@ group_by_tag_none = True group_by_route53_names = True group_by_rds_engine = True group_by_rds_parameter_group = True +group_by_elasticache_engine = True +group_by_elasticache_cluster = True +group_by_elasticache_parameter_group = True +group_by_elasticache_replication_group = True # If you only want to include hosts that match a certain regular expression -# pattern_include = stage-* +# pattern_include = staging-* # If you want to exclude any hosts that match a certain regular expression -# pattern_exclude = stage-* +# pattern_exclude = staging-* # Instance filters can be used to control which instances are retrieved for # inventory. For the full list of possible filters, please read the EC2 API @@ -99,14 +126,14 @@ group_by_rds_parameter_group = True # Filters are key/value pairs separated by '=', to list multiple filters use # a list separated by commas. See examples below. -# Retrieve only instances with (key=value) env=stage tag -# instance_filters = tag:env=stage +# Retrieve only instances with (key=value) env=staging tag +# instance_filters = tag:env=staging # Retrieve only instances with role=webservers OR role=dbservers tag # instance_filters = tag:role=webservers,tag:role=dbservers -# Retrieve only t1.micro instances OR instances with tag env=stage -# instance_filters = instance-type=t1.micro,tag:env=stage +# Retrieve only t1.micro instances OR instances with tag env=staging +# instance_filters = instance-type=t1.micro,tag:env=staging # You can use wildcards in filter values also. Below will list instances which # tag Name value matches webservers1* diff --git a/plugins/inventory/ec2.py b/contrib/inventory/ec2.py similarity index 58% rename from plugins/inventory/ec2.py rename to contrib/inventory/ec2.py index 16ac93f5ee4..e4b0b072d43 100755 --- a/plugins/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -121,6 +121,7 @@ from time import time import boto from boto import ec2 from boto import rds +from boto import elasticache from boto import route53 import six @@ -191,7 +192,7 @@ class Ec2Inventory(object): else: config = configparser.ConfigParser() ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') - ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) + ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path))) config.read(ec2_ini_path) # is eucalyptus? @@ -232,16 +233,63 @@ class Ec2Inventory(object): if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') - # Return all EC2 and RDS instances (if RDS is enabled) + # Include ElastiCache instances? + self.elasticache_enabled = True + if config.has_option('ec2', 'elasticache'): + self.elasticache_enabled = config.getboolean('ec2', 'elasticache') + + # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False + + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + ec2_valid_instance_states = [ + 'pending', + 'running', + 'shutting-down', + 'terminated', + 'stopping', + 'stopped' + ] + self.ec2_instance_states = [] + if self.all_instances: + self.ec2_instance_states = ec2_valid_instance_states + elif config.has_option('ec2', 'instance_states'): + for instance_state in config.get('ec2', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in ec2_valid_instance_states: + continue + self.ec2_instance_states.append(instance_state) + else: + self.ec2_instance_states = ['running'] + + # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False + # Return all ElastiCache replication groups? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: + self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') + else: + self.all_elasticache_replication_groups = False + + # Return all ElastiCache clusters? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: + self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') + else: + self.all_elasticache_clusters = False + + # Return all ElastiCache nodes? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: + self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') + else: + self.all_elasticache_nodes = False + # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if not os.path.exists(cache_dir): @@ -272,6 +320,10 @@ class Ec2Inventory(object): 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', + 'group_by_elasticache_engine', + 'group_by_elasticache_cluster', + 'group_by_elasticache_parameter_group', + 'group_by_elasticache_replication_group', ] for option in group_by_options: if config.has_option('ec2', option): @@ -334,6 +386,9 @@ class Ec2Inventory(object): self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) + if self.elasticache_enabled: + self.get_elasticache_clusters_by_region(region) + self.get_elasticache_replication_groups_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) @@ -373,7 +428,7 @@ class Ec2Inventory(object): else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) - self.fail_with_error(error) + self.fail_with_error(error, 'getting EC2 instances') def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular @@ -387,12 +442,82 @@ class Ec2Inventory(object): self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: error = e.reason - + if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message - self.fail_with_error(error) + self.fail_with_error(error, 'getting RDS instances') + + def get_elasticache_clusters_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache clusters (with + nodes' info) in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + # show_cache_node_info = True + # because we also want nodes' information + response = conn.describe_cache_clusters(None, None, None, True) + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to CacheClusters or + # CacheNodes. Because of that wo can't make use of the get_list + # method in the AWSQueryConnection. Let's do the work manually + clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] + + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for cluster in clusters: + self.add_elasticache_cluster(cluster, region) + + def get_elasticache_replication_groups_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache replication groups + in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + response = conn.describe_replication_groups() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to ReplicationGroups + # Because of that wo can't make use of the get_list method in the + # AWSQueryConnection. Let's do the work manually + replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] + + except KeyError as e: + error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for replication_group in replication_groups: + self.add_elasticache_replication_group(replication_group, region) def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' @@ -410,9 +535,12 @@ class Ec2Inventory(object): errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors) - - def fail_with_error(self, err_msg): + + def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) @@ -428,8 +556,8 @@ class Ec2Inventory(object): ''' Adds an instance to the inventory and index, as long as it is addressable ''' - # Only want running instances unless all_instances is True - if not self.all_instances and instance.state != 'running': + # Only return instances with desired instance states + if instance.state not in self.ec2_instance_states: return # Select the best destination address @@ -520,7 +648,10 @@ class Ec2Inventory(object): # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): - key = self.to_safe("tag_" + k + "=" + v) + if v: + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) @@ -629,6 +760,243 @@ class Ec2Inventory(object): self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + def add_elasticache_cluster(self, cluster, region): + ''' Adds an ElastiCache cluster to the inventory and index, as long as + it's nodes are addressable ''' + + # Only want available clusters unless all_elasticache_clusters is True + if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': + return + + # Select the best destination address + if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: + # Memcached cluster + dest = cluster['ConfigurationEndpoint']['Address'] + is_redis = False + else: + # Redis sigle node cluster + # Because all Redis clusters are single nodes, we'll merge the + # info from the cluster with info about the node + dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + is_redis = True + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, cluster['CacheClusterId']] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[cluster['CacheClusterId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) + + # Inventory: Group by region + if self.group_by_region and not is_redis: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone and not is_redis: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type and not is_redis: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group and not is_redis: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine and not is_redis: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) + + # Inventory: Group by parameter group + if self.group_by_elasticache_parameter_group: + self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) + + host_info = self.get_host_info_dict_from_describe_dict(cluster) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + # Add the nodes + for node in cluster['CacheNodes']: + self.add_elasticache_node(node, cluster, region) + + def add_elasticache_node(self, node, cluster, region): + ''' Adds an ElastiCache node to the inventory and index, as long as + it is addressable ''' + + # Only want available nodes unless all_elasticache_nodes is True + if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': + return + + # Select the best destination address + dest = node['Endpoint']['Address'] + + if not dest: + # Skip nodes we cannot address (e.g. private VPC subnet) + return + + node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) + + # Add to index + self.index[dest] = [region, node_id] + + # Inventory: Group by node ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[node_id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', node_id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) + + # Inventory: Group by parameter group (done at cluster level) + + # Inventory: Group by replication group (done at cluster level) + + # Inventory: Group by ElastiCache Cluster + if self.group_by_elasticache_cluster: + self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) + + # Global Tag: all ElastiCache nodes + self.push(self.inventory, 'elasticache_nodes', dest) + + host_info = self.get_host_info_dict_from_describe_dict(node) + + if dest in self.inventory["_meta"]["hostvars"]: + self.inventory["_meta"]["hostvars"][dest].update(host_info) + else: + self.inventory["_meta"]["hostvars"][dest] = host_info + + def add_elasticache_replication_group(self, replication_group, region): + ''' Adds an ElastiCache replication group to the inventory and index ''' + + # Only want available clusters unless all_elasticache_replication_groups is True + if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': + return + + # Select the best destination address (PrimaryEndpoint) + dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, replication_group['ReplicationGroupId']] + + # Inventory: Group by ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[replication_group['ReplicationGroupId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone (doesn't apply to replication groups) + + # Inventory: Group by node type (doesn't apply to replication groups) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for replication groups + + # Inventory: Group by security group (doesn't apply to replication groups) + # Check this value in cluster level + + # Inventory: Group by engine (replication groups are always Redis) + if self.group_by_elasticache_engine: + self.push(self.inventory, 'elasticache_redis', dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', 'redis') + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) + + host_info = self.get_host_info_dict_from_describe_dict(replication_group) + + self.inventory["_meta"]["hostvars"][dest] = host_info def get_route53_records(self): ''' Get and store the map of resource records to domain names that @@ -677,7 +1045,6 @@ class Ec2Inventory(object): return list(name_list) - def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): @@ -723,6 +1090,91 @@ class Ec2Inventory(object): return instance_vars + def get_host_info_dict_from_describe_dict(self, describe_dict): + ''' Parses the dictionary returned by the API call into a flat list + of parameters. This method should be used only when 'describe' is + used directly because Boto doesn't provide specific classes. ''' + + # I really don't agree with prefixing everything with 'ec2' + # because EC2, RDS and ElastiCache are different services. + # I'm just following the pattern used until now to not break any + # compatibility. + + host_info = {} + for key in describe_dict: + value = describe_dict[key] + key = self.to_safe('ec2_' + self.uncammelize(key)) + + # Handle complex types + + # Target: Memcached Cache Clusters + if key == 'ec2_configuration_endpoint' and value: + host_info['ec2_configuration_endpoint_address'] = value['Address'] + host_info['ec2_configuration_endpoint_port'] = value['Port'] + + # Target: Cache Nodes and Redis Cache Clusters (single node) + if key == 'ec2_endpoint' and value: + host_info['ec2_endpoint_address'] = value['Address'] + host_info['ec2_endpoint_port'] = value['Port'] + + # Target: Redis Replication Groups + if key == 'ec2_node_groups' and value: + host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] + host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + replica_count = 0 + for node in value[0]['NodeGroupMembers']: + if node['CurrentRole'] == 'primary': + host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] + host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] + host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] + elif node['CurrentRole'] == 'replica': + host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] + host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] + host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] + replica_count += 1 + + # Target: Redis Replication Groups + if key == 'ec2_member_clusters' and value: + host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) + + # Target: All Cache Clusters + elif key == 'ec2_cache_parameter_group': + host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) + host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] + host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + + # Target: Almost everything + elif key == 'ec2_security_groups': + + # Skip if SecurityGroups is None + # (it is possible to have the key defined but no value in it). + if value is not None: + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Target: Everything + # Preserve booleans and integers + elif type(value) in [int, bool]: + host_info[key] = value + + # Target: Everything + # Sanitize string values + elif isinstance(value, six.string_types): + host_info[key] = value.strip() + + # Target: Everything + # Replace None by an empty string + elif type(value) == type(None): + host_info[key] = '' + + else: + # Remove non-processed complex types + pass + + return host_info + def get_host_info(self): ''' Get variables about a specific host ''' @@ -786,6 +1238,9 @@ class Ec2Inventory(object): cache.write(json_data) cache.close() + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be @@ -793,7 +1248,6 @@ class Ec2Inventory(object): return re.sub("[^A-Za-z0-9\_]", "_", word) - def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' diff --git a/plugins/inventory/fleet.py b/contrib/inventory/fleet.py similarity index 100% rename from plugins/inventory/fleet.py rename to contrib/inventory/fleet.py diff --git a/plugins/inventory/freeipa.py b/contrib/inventory/freeipa.py similarity index 100% rename from plugins/inventory/freeipa.py rename to contrib/inventory/freeipa.py diff --git a/plugins/inventory/gce.ini b/contrib/inventory/gce.ini similarity index 100% rename from plugins/inventory/gce.ini rename to contrib/inventory/gce.ini diff --git a/plugins/inventory/gce.py b/contrib/inventory/gce.py similarity index 96% rename from plugins/inventory/gce.py rename to contrib/inventory/gce.py index 76e14f23012..740e112332c 100755 --- a/plugins/inventory/gce.py +++ b/contrib/inventory/gce.py @@ -66,7 +66,7 @@ Examples: $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance + $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson Version: 0.0.1 @@ -221,7 +221,7 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - 'gce_public_ip': inst.public_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -230,7 +230,7 @@ class GceInventory(object): 'gce_metadata': md, 'gce_network': net, # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] + 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] } def get_instance(self, instance_name): @@ -257,7 +257,10 @@ class GceInventory(object): tags = node.extra['tags'] for t in tags: - tag = 'tag_%s' % t + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] diff --git a/plugins/inventory/jail.py b/contrib/inventory/jail.py similarity index 100% rename from plugins/inventory/jail.py rename to contrib/inventory/jail.py diff --git a/contrib/inventory/landscape.py b/contrib/inventory/landscape.py new file mode 100755 index 00000000000..4b53171c34e --- /dev/null +++ b/contrib/inventory/landscape.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Canonical's +# Landscape (http://www.ubuntu.com/management/landscape-features). +# +# Requires the `landscape_api` Python module +# See: +# - https://landscape.canonical.com/static/doc/api/api-client-package.html +# - https://landscape.canonical.com/static/doc/api/python-api.html +# +# Environment variables +# --------------------- +# - `LANDSCAPE_API_URI` +# - `LANDSCAPE_API_KEY` +# - `LANDSCAPE_API_SECRET` +# - `LANDSCAPE_API_SSL_CA_FILE` (optional) + + +import argparse +import collections +import os +import sys + +from landscape_api.base import API, HTTPError + +try: + import json +except ImportError: + import simplejson as json + +_key = 'landscape' + + +class EnvironmentConfig(object): + uri = os.getenv('LANDSCAPE_API_URI') + access_key = os.getenv('LANDSCAPE_API_KEY') + secret_key = os.getenv('LANDSCAPE_API_SECRET') + ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') + + +def _landscape_client(): + env = EnvironmentConfig() + return API( + uri=env.uri, + access_key=env.access_key, + secret_key=env.secret_key, + ssl_ca_file=env.ssl_ca_file) + + +def get_landscape_members_data(): + return _landscape_client().get_computers() + + +def get_nodes(data): + return [node['hostname'] for node in data] + + +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for value in node['tags']: + groups[value].append(node['hostname']) + + return groups + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['hostname']] = {'tags': node['tags']} + return meta + + +def print_list(): + data = get_landscape_members_data() + nodes = get_nodes(data) + groups = get_groups(data) + meta = get_meta(data) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) + + +def print_host(host): + data = get_landscape_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from landscape cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from landscape cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/plugins/inventory/libcloud.ini b/contrib/inventory/libcloud.ini similarity index 100% rename from plugins/inventory/libcloud.ini rename to contrib/inventory/libcloud.ini diff --git a/plugins/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py similarity index 100% rename from plugins/inventory/libvirt_lxc.py rename to contrib/inventory/libvirt_lxc.py diff --git a/plugins/inventory/linode.ini b/contrib/inventory/linode.ini similarity index 100% rename from plugins/inventory/linode.ini rename to contrib/inventory/linode.ini diff --git a/plugins/inventory/linode.py b/contrib/inventory/linode.py similarity index 100% rename from plugins/inventory/linode.py rename to contrib/inventory/linode.py diff --git a/plugins/inventory/nova.ini b/contrib/inventory/nova.ini similarity index 100% rename from plugins/inventory/nova.ini rename to contrib/inventory/nova.ini diff --git a/plugins/inventory/nova.py b/contrib/inventory/nova.py old mode 100644 new mode 100755 similarity index 100% rename from plugins/inventory/nova.py rename to contrib/inventory/nova.py diff --git a/plugins/inventory/openshift.py b/contrib/inventory/openshift.py similarity index 100% rename from plugins/inventory/openshift.py rename to contrib/inventory/openshift.py diff --git a/plugins/inventory/openstack.py b/contrib/inventory/openstack.py similarity index 100% rename from plugins/inventory/openstack.py rename to contrib/inventory/openstack.py diff --git a/plugins/inventory/openstack.yml b/contrib/inventory/openstack.yml similarity index 100% rename from plugins/inventory/openstack.yml rename to contrib/inventory/openstack.yml diff --git a/contrib/inventory/openvz.py b/contrib/inventory/openvz.py new file mode 100644 index 00000000000..fd0bd9ff794 --- /dev/null +++ b/contrib/inventory/openvz.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# openvz.py +# +# Copyright 2014 jordonr +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Inspired by libvirt_lxc.py inventory script +# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py +# +# Groups are determined by the description field of openvz guests +# multiple groups can be seperated by commas: webserver,dbserver + +from subprocess import Popen,PIPE +import sys +import json + + +#List openvz hosts +vzhosts = ['vzhost1','vzhost2','vzhost3'] +#Add openvz hosts to the inventory and Add "_meta" trick +inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} +#default group, when description not defined +default_group = ['vzguest'] + +def get_guests(): + #Loop through vzhosts + for h in vzhosts: + #SSH to vzhost and get the list of guests in json + pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True) + + #Load Json info of guests + json_data = json.loads(pipe.stdout.read()) + + #loop through guests + for j in json_data: + #Add information to host vars + inventory['_meta']['hostvars'][j['hostname']] = {'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip']} + + #determine group from guest description + if j['description'] is not None: + groups = j['description'].split(",") + else: + groups = default_group + + #add guest to inventory + for g in groups: + if g not in inventory: + inventory[g] = {'hosts': []} + + inventory[g]['hosts'].append(j['hostname']) + + return inventory + + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + inv_json = get_guests() + print json.dumps(inv_json, sort_keys=True) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print json.dumps({}); +else: + print "Need an argument, either --list or --host " diff --git a/contrib/inventory/ovirt.ini b/contrib/inventory/ovirt.ini new file mode 100644 index 00000000000..a52f9d63ff5 --- /dev/null +++ b/contrib/inventory/ovirt.ini @@ -0,0 +1,33 @@ +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# Author: Josha Inglis based on the gce.ini by Eric Johnson + +[ovirt] +# ovirt Service Account configuration information can be stored in the +# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already +# exist in your PYTHONPATH and be picked up automatically with an import +# statement in the inventory script. However, you can specify an absolute +# path to the secrets.py file with 'libcloud_secrets' parameter. +ovirt_api_secrets = + +# If you are not going to use a 'secrets.py' file, you can set the necessary +# authorization parameters here. +ovirt_url = +ovirt_username = +ovirt_password = diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py new file mode 100755 index 00000000000..dc022c5dfd2 --- /dev/null +++ b/contrib/inventory/ovirt.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python +# Copyright 2015 IIX Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +ovirt external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests to +oVirt via the ovirt-engine-sdk-python library. + +When run against a specific host, this script returns the following variables +based on the data obtained from the ovirt_sdk Node object: + - ovirt_uuid + - ovirt_id + - ovirt_image + - ovirt_machine_type + - ovirt_ips + - ovirt_name + - ovirt_description + - ovirt_status + - ovirt_zone + - ovirt_tags + - ovirt_stats + +When run in --list mode, instances are grouped by the following categories: + + - zone: + zone group name. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - running status: + group name prefixed with 'status_' (e.g. status_up, status_down,..) + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" + + Use the ovirt inventory script to print out instance specific information + $ contrib/inventory/ovirt.py --host my_instance + +Author: Josha Inglis based on the gce.py by Eric Johnson +Version: 0.0.1 +""" + +USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" +USER_AGENT_VERSION = "v1" + +import sys +import os +import argparse +import ConfigParser +from collections import defaultdict + +try: + import json +except ImportError: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import simplejson as json + +try: + # noinspection PyUnresolvedReferences + from ovirtsdk.api import API + # noinspection PyUnresolvedReferences + from ovirtsdk.xml import params +except ImportError: + print("ovirt inventory script requires ovirt-engine-sdk-python") + sys.exit(1) + + +class OVirtInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.args = self.parse_cli_args() + self.driver = self.get_ovirt_driver() + + # Just display data for specific host + if self.args.host: + print self.json_format_dict( + self.node_to_dict(self.get_instance(self.args.host)), + pretty=self.args.pretty + ) + sys.exit(0) + + # Otherwise, assume user wants all instances grouped + print( + self.json_format_dict( + data=self.group_instances(), + pretty=self.args.pretty + ) + ) + sys.exit(0) + + @staticmethod + def get_ovirt_driver(): + """ + Determine the ovirt authorization settings and return a ovirt_sdk driver. + + :rtype : ovirtsdk.api.API + """ + kwargs = {} + + ovirt_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") + ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'ovirt_url': '', + 'ovirt_username': '', + 'ovirt_password': '', + 'ovirt_api_secrets': '', + }) + if 'ovirt' not in config.sections(): + config.add_section('ovirt') + config.read(ovirt_ini_path) + + # Attempt to get ovirt params from a configuration file, if one + # exists. + secrets_path = config.get('ovirt', 'ovirt_api_secrets') + secrets_found = False + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + secrets_found = True + except ImportError: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" + print(err) + sys.exit(1) + sys.path.append(os.path.dirname(secrets_path)) + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + except ImportError: + pass + if not secrets_found: + kwargs = { + 'url': config.get('ovirt', 'ovirt_url'), + 'username': config.get('ovirt', 'ovirt_username'), + 'password': config.get('ovirt', 'ovirt_password'), + } + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + kwargs['url'] = os.environ.get('OVIRT_URL') + kwargs['username'] = os.environ.get('OVIRT_EMAIL') + kwargs['password'] = os.environ.get('OVIRT_PASS') + + # Retrieve and return the ovirt driver. + return API(insecure=True, **kwargs) + + @staticmethod + def parse_cli_args(): + """ + Command line argument processing + + :rtype : argparse.Namespace + """ + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') + return parser.parse_args() + + def node_to_dict(self, inst): + """ + :type inst: params.VM + """ + if inst is None: + return {} + + inst.get_custom_properties() + ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ + if inst.get_guest_info() is not None else [] + stats = {} + for stat in inst.get_statistics().list(): + stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() + + return { + 'ovirt_uuid': inst.get_id(), + 'ovirt_id': inst.get_id(), + 'ovirt_image': inst.get_os().get_type(), + 'ovirt_machine_type': inst.get_instance_type(), + 'ovirt_ips': ips, + 'ovirt_name': inst.get_name(), + 'ovirt_description': inst.get_description(), + 'ovirt_status': inst.get_status().get_state(), + 'ovirt_zone': inst.get_cluster().get_id(), + 'ovirt_tags': self.get_tags(inst), + 'ovirt_stats': stats, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': ips[0] if len(ips) > 0 else None + } + + @staticmethod + def get_tags(inst): + """ + :type inst: params.VM + """ + return [x.get_name() for x in inst.get_tags().list()] + + # noinspection PyBroadException,PyUnusedLocal + def get_instance(self, instance_name): + """Gets details about a specific instance """ + try: + return self.driver.vms.get(name=instance_name) + except Exception as e: + return None + + def group_instances(self): + """Group all instances""" + groups = defaultdict(list) + meta = {"hostvars": {}} + + for node in self.driver.vms.list(): + assert isinstance(node, params.VM) + name = node.get_name() + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.get_cluster().get_name() + groups[zone].append(name) + + tags = self.get_tags(node) + for t in tags: + tag = 'tag_%s' % t + groups[tag].append(name) + + nets = [x.get_name() for x in node.get_nics().list()] + for net in nets: + net = 'network_%s' % net + groups[net].append(name) + + status = node.get_status().get_state() + stat = 'status_%s' % status.lower() + if stat in groups: + groups[stat].append(name) + else: + groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + @staticmethod + def json_format_dict(data, pretty=False): + """ Converts a dict to a JSON object and dumps it as a formatted + string """ + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + +# Run the script +OVirtInventory() diff --git a/contrib/inventory/proxmox.py b/contrib/inventory/proxmox.py new file mode 100755 index 00000000000..80f6628d973 --- /dev/null +++ b/contrib/inventory/proxmox.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python + +# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import urllib +import urllib2 +try: + import json +except ImportError: + import simplejson as json +import os +import sys +from optparse import OptionParser + +class ProxmoxNodeList(list): + def get_names(self): + return [node['node'] for node in self] + +class ProxmoxQemu(dict): + def get_variables(self): + variables = {} + for key, value in self.iteritems(): + variables['proxmox_' + key] = value + return variables + +class ProxmoxQemuList(list): + def __init__(self, data=[]): + for item in data: + self.append(ProxmoxQemu(item)) + + def get_names(self): + return [qemu['name'] for qemu in self if qemu['template'] != 1] + + def get_by_name(self, name): + results = [qemu for qemu in self if qemu['name'] == name] + return results[0] if len(results) > 0 else None + + def get_variables(self): + variables = {} + for qemu in self: + variables[qemu['name']] = qemu.get_variables() + + return variables + +class ProxmoxPoolList(list): + def get_names(self): + return [pool['poolid'] for pool in self] + +class ProxmoxPool(dict): + def get_members_name(self): + return [member['name'] for member in self['members'] if member['template'] != 1] + +class ProxmoxAPI(object): + def __init__(self, options): + self.options = options + self.credentials = None + + if not options.url: + raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') + elif not options.username: + raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') + elif not options.password: + raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') + + def auth(self): + request_path = '{}api2/json/access/ticket'.format(self.options.url) + + request_params = urllib.urlencode({ + 'username': self.options.username, + 'password': self.options.password, + }) + + data = json.load(urllib2.urlopen(request_path, request_params)) + + self.credentials = { + 'ticket': data['data']['ticket'], + 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], + } + + def get(self, url, data=None): + opener = urllib2.build_opener() + opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket']))) + + request_path = '{}{}'.format(self.options.url, url) + request = opener.open(request_path, data) + + response = json.load(request) + return response['data'] + + def nodes(self): + return ProxmoxNodeList(self.get('api2/json/nodes')) + + def node_qemu(self, node): + return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node))) + + def pools(self): + return ProxmoxPoolList(self.get('api2/json/pools')) + + def pool(self, poolid): + return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid))) + +def main_list(options): + results = { + 'all': { + 'hosts': [], + }, + '_meta': { + 'hostvars': {}, + } + } + + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + for node in proxmox_api.nodes().get_names(): + qemu_list = proxmox_api.node_qemu(node) + results['all']['hosts'] += qemu_list.get_names() + results['_meta']['hostvars'].update(qemu_list.get_variables()) + + # pools + for pool in proxmox_api.pools().get_names(): + results[pool] = { + 'hosts': proxmox_api.pool(pool).get_members_name(), + } + + return results + +def main_host(options): + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + for node in proxmox_api.nodes().get_names(): + qemu_list = proxmox_api.node_qemu(node) + qemu = qemu_list.get_by_name(options.host) + if qemu: + return qemu.get_variables() + + return {} + +def main(): + parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') + parser.add_option('--list', action="store_true", default=False, dest="list") + parser.add_option('--host', dest="host") + parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') + parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') + parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') + parser.add_option('--pretty', action="store_true", default=False, dest='pretty') + (options, args) = parser.parse_args() + + if options.list: + data = main_list(options) + elif options.host: + data = main_host(options) + else: + parser.print_help() + sys.exit(1) + + indent = None + if options.pretty: + indent = 2 + + print json.dumps(data, indent=indent) + +if __name__ == '__main__': + main() diff --git a/plugins/inventory/rax.ini b/contrib/inventory/rax.ini similarity index 100% rename from plugins/inventory/rax.ini rename to contrib/inventory/rax.ini diff --git a/plugins/inventory/rax.py b/contrib/inventory/rax.py old mode 100644 new mode 100755 similarity index 100% rename from plugins/inventory/rax.py rename to contrib/inventory/rax.py diff --git a/contrib/inventory/serf.py b/contrib/inventory/serf.py new file mode 100755 index 00000000000..e1340da92df --- /dev/null +++ b/contrib/inventory/serf.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Serf +# (https://serfdom.io/). +# +# Requires the `serfclient` Python module from +# https://pypi.python.org/pypi/serfclient +# +# Environment variables +# --------------------- +# - `SERF_RPC_ADDR` +# - `SERF_RPC_AUTH` +# +# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr + +import argparse +import collections +import os +import sys + +# https://pypi.python.org/pypi/serfclient +from serfclient import SerfClient, EnvironmentConfig + +try: + import json +except ImportError: + import simplejson as json + +_key = 'serf' + + +def _serf_client(): + env = EnvironmentConfig() + return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) + + +def get_serf_members_data(): + return _serf_client().members().body['Members'] + + +def get_nodes(data): + return [node['Name'] for node in data] + + +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for key, value in node['Tags'].items(): + groups[value].append(node['Name']) + + return groups + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['Name']] = node['Tags'] + return meta + + +def print_list(): + data = get_serf_members_data() + nodes = get_nodes(data) + groups = get_groups(data) + meta = get_meta(data) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) + + +def print_host(host): + data = get_serf_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from serf cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from serf cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/plugins/inventory/softlayer.py b/contrib/inventory/softlayer.py similarity index 100% rename from plugins/inventory/softlayer.py rename to contrib/inventory/softlayer.py diff --git a/plugins/inventory/spacewalk.py b/contrib/inventory/spacewalk.py similarity index 100% rename from plugins/inventory/spacewalk.py rename to contrib/inventory/spacewalk.py diff --git a/plugins/inventory/ssh_config.py b/contrib/inventory/ssh_config.py similarity index 75% rename from plugins/inventory/ssh_config.py rename to contrib/inventory/ssh_config.py index 7c04c8cc6da..55401a664d3 100755 --- a/plugins/inventory/ssh_config.py +++ b/contrib/inventory/ssh_config.py @@ -19,6 +19,10 @@ # Dynamic inventory script which lets you use aliases from ~/.ssh/config. # +# There were some issues with various Paramiko versions. I took a deeper look +# and tested heavily. Now, ansible parses this alright with Paramiko versions +# 1.7.2 to 1.15.2. +# # It prints inventory based on parsed ~/.ssh/config. You can refer to hosts # with their alias, rather than with the IP or hostname. It takes advantage # of the ansible_ssh_{host,port,user,private_key_file}. @@ -39,7 +43,6 @@ import argparse import os.path import sys - import paramiko try: @@ -47,6 +50,8 @@ try: except ImportError: import simplejson as json +SSH_CONF = '~/.ssh/config' + _key = 'ssh_config' _ssh_to_ansible = [('user', 'ansible_ssh_user'), @@ -56,15 +61,25 @@ _ssh_to_ansible = [('user', 'ansible_ssh_user'), def get_config(): - with open(os.path.expanduser('~/.ssh/config')) as f: + if not os.path.isfile(os.path.expanduser(SSH_CONF)): + return {} + with open(os.path.expanduser(SSH_CONF)) as f: cfg = paramiko.SSHConfig() cfg.parse(f) ret_dict = {} for d in cfg._config: + if type(d['host']) is list: + alias = d['host'][0] + else: + alias = d['host'] + if ('?' in alias) or ('*' in alias): + continue _copy = dict(d) del _copy['host'] - for host in d['host']: - ret_dict[host] = _copy['config'] + if 'config' in _copy: + ret_dict[alias] = _copy['config'] + else: + ret_dict[alias] = _copy return ret_dict @@ -75,7 +90,12 @@ def print_list(): tmp_dict = {} for ssh_opt, ans_opt in _ssh_to_ansible: if ssh_opt in attributes: - tmp_dict[ans_opt] = attributes[ssh_opt] + # If the attribute is a list, just take the first element. + # Private key is returned in a list for some reason. + attr = attributes[ssh_opt] + if type(attr) is list: + attr = attr[0] + tmp_dict[ans_opt] = attr if tmp_dict: meta['hostvars'][alias] = tmp_dict diff --git a/plugins/inventory/vagrant.py b/contrib/inventory/vagrant.py similarity index 65% rename from plugins/inventory/vagrant.py rename to contrib/inventory/vagrant.py index 7f6dc925e83..10dc61cdb24 100755 --- a/plugins/inventory/vagrant.py +++ b/contrib/inventory/vagrant.py @@ -13,6 +13,7 @@ Example Vagrant configuration using this script: """ # Copyright (C) 2013 Mark Mandel +# 2015 Igor Khomyakov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -33,17 +34,26 @@ Example Vagrant configuration using this script: # import sys +import os.path import subprocess import re -import string +from paramiko import SSHConfig +from cStringIO import StringIO from optparse import OptionParser +from collections import defaultdict try: import json except: import simplejson as json +_group = 'vagrant' # a default group +_ssh_to_ansible = [('user', 'ansible_ssh_user'), + ('hostname', 'ansible_ssh_host'), + ('identityfile', 'ansible_ssh_private_key_file'), + ('port', 'ansible_ssh_port')] + # Options -#------------------------------ +# ------------------------------ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", @@ -56,19 +66,13 @@ parser.add_option('--host', default=None, dest="host", # helper functions # + # get all the ssh configs for all boxes in an array of dictionaries. def get_ssh_config(): - configs = [] - - boxes = list_running_boxes() + return {k: get_a_ssh_config(k) for k in list_running_boxes()} - for box in boxes: - config = get_a_ssh_config(box) - configs.append(config) - return configs - -#list all the running boxes +# list all the running boxes def list_running_boxes(): output = subprocess.check_output(["vagrant", "status"]).split('\n') @@ -79,54 +83,47 @@ def list_running_boxes(): if matcher: boxes.append(matcher.group(1)) - return boxes -#get the ssh config for a single box + +# get the ssh config for a single box def get_a_ssh_config(box_name): """Gives back a map of all the machine's ssh configurations""" - output = subprocess.check_output(["vagrant", "ssh-config", box_name]).split('\n') + output = subprocess.check_output(["vagrant", "ssh-config", box_name]) + config = SSHConfig() + config.parse(StringIO(output)) + host_config = config.lookup(box_name) - config = {} - for line in output: - if line.strip() != '': - matcher = re.search("( )?([a-zA-Z]+) (.*)", line) - config[matcher.group(2)] = matcher.group(3) - - return config + # man 5 ssh_config: + # > It is possible to have multiple identity files ... + # > all these identities will be tried in sequence. + for id in host_config['identityfile']: + if os.path.isfile(id): + host_config['identityfile'] = id + return {v: host_config[k] for k, v in _ssh_to_ansible} # List out servers that vagrant has running -#------------------------------ +# ------------------------------ if options.list: ssh_config = get_ssh_config() - hosts = { 'vagrant': []} + meta = defaultdict(dict) - for data in ssh_config: - hosts['vagrant'].append(data['HostName']) + for host in ssh_config: + meta['hostvars'][host] = ssh_config[host] - print json.dumps(hosts) + print json.dumps({_group: list(ssh_config.keys()), '_meta': meta}) sys.exit(0) # Get out the host details -#------------------------------ +# ------------------------------ elif options.host: - result = {} - ssh_config = get_ssh_config() - - details = filter(lambda x: (x['HostName'] == options.host), ssh_config) - if len(details) > 0: - #pass through the port, in case it's non standard. - result = details[0] - result['ansible_ssh_port'] = result['Port'] - - print json.dumps(result) + print json.dumps(get_a_ssh_config(options.host)) sys.exit(0) - # Print out help -#------------------------------ +# ------------------------------ else: parser.print_help() sys.exit(0) diff --git a/plugins/inventory/vbox.py b/contrib/inventory/vbox.py similarity index 100% rename from plugins/inventory/vbox.py rename to contrib/inventory/vbox.py diff --git a/plugins/inventory/vmware.ini b/contrib/inventory/vmware.ini similarity index 91% rename from plugins/inventory/vmware.ini rename to contrib/inventory/vmware.ini index 964be18c14e..5097735fd0e 100644 --- a/plugins/inventory/vmware.ini +++ b/contrib/inventory/vmware.ini @@ -23,6 +23,10 @@ guests_only = True # caching will be disabled. #cache_dir = ~/.cache/ansible +# Specify a prefix filter. Any VMs with names beginning with this string will +# not be returned. +# prefix_filter = test_ + [auth] # Specify hostname or IP address of vCenter/ESXi server. A port may be diff --git a/plugins/inventory/vmware.py b/contrib/inventory/vmware.py similarity index 97% rename from plugins/inventory/vmware.py rename to contrib/inventory/vmware.py index 92030d66e56..b708d599946 100755 --- a/plugins/inventory/vmware.py +++ b/contrib/inventory/vmware.py @@ -55,7 +55,7 @@ from suds.sudsobject import Object as SudsObject class VMwareInventory(object): - + def __init__(self, guests_only=None): self.config = ConfigParser.SafeConfigParser() if os.environ.get('VMWARE_INI', ''): @@ -95,7 +95,7 @@ class VMwareInventory(object): Saves the value to cache with the name given. ''' if self.config.has_option('defaults', 'cache_dir'): - cache_dir = self.config.get('defaults', 'cache_dir') + cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir')) if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_file = os.path.join(cache_dir, name) @@ -115,7 +115,7 @@ class VMwareInventory(object): else: cache_max_age = 0 cache_stat = os.stat(cache_file) - if (cache_stat.st_mtime + cache_max_age) < time.time(): + if (cache_stat.st_mtime + cache_max_age) >= time.time(): with open(cache_file) as cache: return json.load(cache) return default @@ -305,6 +305,11 @@ class VMwareInventory(object): else: vm_group = default_group + '_vm' + if self.config.has_option('defaults', 'prefix_filter'): + prefix_filter = self.config.get('defaults', 'prefix_filter') + else: + prefix_filter = None + # Loop through physical hosts: for host in HostSystem.all(self.client): @@ -318,6 +323,9 @@ class VMwareInventory(object): # Loop through all VMs on physical host. for vm in host.vm: + if prefix_filter: + if vm.name.startswith( prefix_filter ): + continue self._add_host(inv, 'all', vm.name) self._add_host(inv, vm_group, vm.name) vm_info = self._get_vm_info(vm) diff --git a/plugins/inventory/windows_azure.ini b/contrib/inventory/windows_azure.ini similarity index 100% rename from plugins/inventory/windows_azure.ini rename to contrib/inventory/windows_azure.ini diff --git a/plugins/inventory/windows_azure.py b/contrib/inventory/windows_azure.py similarity index 100% rename from plugins/inventory/windows_azure.py rename to contrib/inventory/windows_azure.py diff --git a/plugins/inventory/zabbix.ini b/contrib/inventory/zabbix.ini similarity index 100% rename from plugins/inventory/zabbix.ini rename to contrib/inventory/zabbix.ini diff --git a/plugins/inventory/zabbix.py b/contrib/inventory/zabbix.py similarity index 100% rename from plugins/inventory/zabbix.py rename to contrib/inventory/zabbix.py diff --git a/plugins/inventory/zone.py b/contrib/inventory/zone.py similarity index 100% rename from plugins/inventory/zone.py rename to contrib/inventory/zone.py diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1 index f1a1babc763..0c820b72e37 100644 --- a/docs/man/man1/ansible-playbook.1 +++ b/docs/man/man1/ansible-playbook.1 @@ -2,12 +2,12 @@ .\" Title: ansible-playbook .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 05/05/2015 +.\" Date: 07/23/2015 .\" Manual: System administration commands -.\" Source: Ansible 2.0.0 +.\" Source: Ansible %VERSION% .\" Language: English .\" -.TH "ANSIBLE\-PLAYBOOK" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands" +.TH "ANSIBLE\-PLAYBOOK" "1" "07/23/2015" "Ansible %VERSION%" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -43,9 +43,77 @@ The names of one or more YAML format files to run as ansible playbooks\&. .RE .SH "OPTIONS" .PP -\fB\-v\fR, \fB\-\-verbose\fR +\fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 -Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. +Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. +.RE +.PP +\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR +.RS 4 +Prompt for the password to use for playbook plays that request sudo access, if any\&. +.RE +.PP +\fB\-b\fR, \fB\-\-become\fR +.RS 4 +Run operations with become (nopasswd implied) +.RE +.PP +\fB\-\-become\-method=BECOME_METHOD\fR +.RS 4 +Privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | runas ] +.RE +.PP +\fB\-\-become\-user=BECOME_USER\fR +.RS 4 +Run operations as this user (default=None)\&. +.RE +.PP +\fB\-C\fR, \fB\-\-check\fR +.RS 4 +Do not make any changes on the remote system, but test resources to see what might have changed\&. Note this can not scan all possible resource types and is only a simulation\&. +.RE +.PP +\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR +.RS 4 +Connection type to use\&. Possible options are +\fIparamiko\fR +(SSH), +\fIssh\fR, and +\fIlocal\fR\&. +\fIlocal\fR +is mostly useful for crontab or kickstarts\&. +.RE +.PP +\fB\-D\fR, \fB\-\-diff\fR +.RS 4 +When changing any templated files, show the unified diffs of how they changed\&. When used with \-\-check, shows how the files would have changed if \-\-check were not used\&. +.RE +.PP +\fB\-e\fR \fIVARS\fR, \fB\-\-extra\-vars=\fR\fIVARS\fR +.RS 4 +Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&. +.RE +.PP +\fB\-\-flush\-cache\fR +.RS 4 +Clear the fact cache\&. +.RE +.PP +\fB\-\-force\-handlers\fR +.RS 4 +Run handlers even if a task fails\&. +.RE +.PP +\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR +.RS 4 +Level of parallelism\&. +\fINUM\fR +is specified as an integer, the default is 5\&. +.RE +.PP +\fB\-h\fR, \fB\-\-help\fR +.RS 4 +Show help page and exit .RE .PP \fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR @@ -56,6 +124,26 @@ to the inventory hosts file, which defaults to \fI/etc/ansible/hosts\fR\&. .RE .PP +\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR +.RS 4 +Further limits the selected host/group patterns\&. +.RE +.PP +\fB\-\-list\-hosts\fR +.RS 4 +Outputs a list of matching hosts; does not execute anything else\&. +.RE +.PP +\fB\-\-list\-tags\fR +.RS 4 +List all available tags\&. +.RE +.PP +\fB\-\-list\-tasks\fR +.RS 4 +List all tasks that would be executed +.RE +.PP \fB\-M\fR \fIDIRECTORY\fR, \fB\-\-module\-path=\fR\fIDIRECTORY\fR .RS 4 The @@ -64,36 +152,44 @@ search path to load modules from\&. The default is \fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&. .RE .PP -\fB\-e\fR \fIVARS\fR, \fB\-\-extra\-vars=\fR\fIVARS\fR +\fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR .RS 4 -Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&. +Use this file to authenticate the connection .RE .PP -\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR +\fB\-\-skip\-tages=\fR\fISKIP_TAGS\fR .RS 4 -Level of parallelism\&. -\fINUM\fR -is specified as an integer, the default is 5\&. +Only run plays and tasks whose tags do not match these values\&. .RE .PP -\fB\-k\fR, \fB\-\-ask\-pass\fR +\fB\-\-start\-at\-task=\fR\fISTART_AT\fR .RS 4 -Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. +Start the playbook at the task matching this name\&. .RE .PP -\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR +\fB\-\-step\fR .RS 4 -Prompt for the password to use for playbook plays that request sudo access, if any\&. +One\-step\-at\-a\-time: confirm each task before running\&. .RE .PP -\fB\-U\fR, \fISUDO_USER\fR, \fB\-\-sudo\-user=\fR\fISUDO_USER\fR +\fB\-S\fR, \-\-su* .RS 4 -Desired sudo user (default=root)\&. +Run operations with su (deprecated, use become) .RE .PP -\fB\-t\fR, \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR +\fB\-R SU\-USER\fR, \fB\-\-su\-user=\fR\fISU_USER\fR .RS 4 -Only run plays and tasks tagged with these values\&. +run operations with su as this user (default=root) (deprecated, use become) +.RE +.PP +\fB\-s\fR, \fB\-\-sudo\fR +.RS 4 +Run operations with sudo (nopasswd) (deprecated, use become) +.RE +.PP +\fB\-U\fR, \fISUDO_USER\fR, \fB\-\-sudo\-user=\fR\fISUDO_USER\fR +.RS 4 +Desired sudo user (default=root) (deprecated, use become)\&. .RE .PP \fB\-\-skip\-tags=\fR\fISKIP_TAGS\fR @@ -106,14 +202,9 @@ Only run plays and tasks whose tags do not match these values\&. Look for syntax errors in the playbook, but don\(cqt run anything .RE .PP -\fB\-\-check\fR -.RS 4 -Do not make any changes on the remote system, but test resources to see what might have changed\&. Note this can not scan all possible resource types and is only a simulation\&. -.RE -.PP -\fB\-\-diff\fR +\fB\-t\fR, \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR .RS 4 -When changing any templated files, show the unified diffs of how they changed\&. When used with \-\-check, shows how the files would have changed if \-\-check were not used\&. +Only run plays and tasks tagged with these values\&. .RE .PP \fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR @@ -122,35 +213,24 @@ Connection timeout to use when trying to talk to hosts, in \fISECONDS\fR\&. .RE .PP -\fB\-s\fR, \fB\-\-sudo\fR -.RS 4 -Force all plays to use sudo, even if not marked as such\&. -.RE -.PP \fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR .RS 4 Use this remote user name on playbook steps that do not indicate a user name to run as\&. .RE .PP -\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR +\fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR .RS 4 -Connection type to use\&. Possible options are -\fIparamiko\fR -(SSH), -\fIssh\fR, and -\fIlocal\fR\&. -\fIlocal\fR -is mostly useful for crontab or kickstarts\&. +Vault password file\&. .RE .PP -\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR +\fB\-v\fR, \fB\-\-verbose\fR .RS 4 -Further limits the selected host/group patterns\&. +Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. .RE .PP -\fB\-\-list\-hosts\fR +\fB\-\-version\fR .RS 4 -Outputs a list of matching hosts; does not execute anything else\&. +Show program\(cqs version number and exit\&. .RE .SH "ENVIRONMENT" .sp diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index 44513d11112..8b8ba9c4688 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -34,22 +34,44 @@ The names of one or more YAML format files to run as ansible playbooks. OPTIONS ------- -*-v*, *--verbose*:: +*-k*, *--ask-pass*:: -Verbose mode, more output from successful actions will be shown. Give -up to three times for more output. +Prompt for the SSH password instead of assuming key-based +authentication with ssh-agent. -*-i* 'PATH', *--inventory=*'PATH':: +*-K*, *--ask-sudo-pass*:: -The 'PATH' to the inventory hosts file, which defaults to -'/etc/ansible/hosts'. +Prompt for the password to use for playbook plays that request sudo +access, if any. +*-b*, *--become*:: -*-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: +Run operations with become (nopasswd implied) -The 'DIRECTORY' search path to load modules from. The default is -'/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY -environment variable. +*--become-method=BECOME_METHOD*:: + +Privilege escalation method to use (default=sudo), +valid choices: [ sudo | su | pbrun | pfexec | runas ] + +*--become-user=BECOME_USER*:: + +Run operations as this user (default=None). + +*-C*, *--check*:: + +Do not make any changes on the remote system, but test resources to see what might +have changed. Note this can not scan all possible resource types and is only +a simulation. + +*-c* 'CONNECTION', *--connection=*'CONNECTION':: + +Connection type to use. Possible options are 'paramiko' (SSH), 'ssh', +and 'local'. 'local' is mostly useful for crontab or kickstarts. + +*-D*, *--diff*:: + +When changing any templated files, show the unified diffs of how they changed. When +used with --check, shows how the files would have changed if --check were not used. *-e* 'VARS', *--extra-vars=*'VARS':: @@ -57,76 +79,115 @@ Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays). To load variables from a file, specify the file preceded by @ (e.g. @vars.yml). +*--flush-cache*:: + +Clear the fact cache. + +*--force-handlers*:: + +Run handlers even if a task fails. + *-f* 'NUM', *--forks=*'NUM':: Level of parallelism. 'NUM' is specified as an integer, the default is 5. +*-h*, *--help*:: -*-k*, *--ask-pass*:: +Show help page and exit -Prompt for the SSH password instead of assuming key-based -authentication with ssh-agent. +*-i* 'PATH', *--inventory=*'PATH':: +The 'PATH' to the inventory hosts file, which defaults to +'/etc/ansible/hosts'. -*-K*, *--ask-sudo-pass*:: +*-l* 'SUBSET', *--limit=*'SUBSET':: -Prompt for the password to use for playbook plays that request sudo -access, if any. +Further limits the selected host/group patterns. -*-U*, 'SUDO_USER', *--sudo-user=*'SUDO_USER':: +*--list-hosts*:: + +Outputs a list of matching hosts; does not execute anything else. -Desired sudo user (default=root). +*--list-tags*:: -*-t*, 'TAGS', *--tags=*'TAGS':: +List all available tags. -Only run plays and tasks tagged with these values. +*--list-tasks*:: -*--skip-tags=*'SKIP_TAGS':: +List all tasks that would be executed -Only run plays and tasks whose tags do not match these values. +*-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: -*--syntax-check*:: +The 'DIRECTORY' search path to load modules from. The default is +'/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY +environment variable. -Look for syntax errors in the playbook, but don't run anything +*--private-key=*'PRIVATE_KEY_FILE':: -*--check*:: +Use this file to authenticate the connection -Do not make any changes on the remote system, but test resources to see what might -have changed. Note this can not scan all possible resource types and is only -a simulation. +*--skip-tages=*'SKIP_TAGS':: -*--diff*:: +Only run plays and tasks whose tags do not match these values. -When changing any templated files, show the unified diffs of how they changed. When -used with --check, shows how the files would have changed if --check were not used. +*--start-at-task=*'START_AT':: -*-T* 'SECONDS', *--timeout=*'SECONDS':: +Start the playbook at the task matching this name. -Connection timeout to use when trying to talk to hosts, in 'SECONDS'. +*--step*:: + +One-step-at-a-time: confirm each task before running. +*-S*, --su*:: + +Run operations with su (deprecated, use become) + +*-R SU-USER*, *--su-user=*'SU_USER':: + +run operations with su as this user (default=root) +(deprecated, use become) *-s*, *--sudo*:: -Force all plays to use sudo, even if not marked as such. +Run operations with sudo (nopasswd) (deprecated, use become) +*-U*, 'SUDO_USER', *--sudo-user=*'SUDO_USER':: + +Desired sudo user (default=root) (deprecated, use become). + +*--skip-tags=*'SKIP_TAGS':: + +Only run plays and tasks whose tags do not match these values. + +*--syntax-check*:: + +Look for syntax errors in the playbook, but don't run anything + +*-t*, 'TAGS', *--tags=*'TAGS':: + +Only run plays and tasks tagged with these values. + +*-T* 'SECONDS', *--timeout=*'SECONDS':: + +Connection timeout to use when trying to talk to hosts, in 'SECONDS'. *-u* 'USERNAME', *--user=*'USERNAME':: Use this remote user name on playbook steps that do not indicate a user name to run as. -*-c* 'CONNECTION', *--connection=*'CONNECTION':: +*--vault-password-file=*'VAULT_PASSWORD_FILE':: -Connection type to use. Possible options are 'paramiko' (SSH), 'ssh', -and 'local'. 'local' is mostly useful for crontab or kickstarts. +Vault password file. -*-l* 'SUBSET', *--limit=*'SUBSET':: +*-v*, *--verbose*:: -Further limits the selected host/group patterns. +Verbose mode, more output from successful actions will be shown. Give +up to three times for more output. -*--list-hosts*:: +*--version*:: -Outputs a list of matching hosts; does not execute anything else. +Show program's version number and exit. ENVIRONMENT ----------- diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1 index 029d1e45bbc..8e9bc6a8f5b 100644 --- a/docs/man/man1/ansible-pull.1 +++ b/docs/man/man1/ansible-pull.1 @@ -2,12 +2,12 @@ .\" Title: ansible .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 05/05/2015 +.\" Date: 07/22/2015 .\" Manual: System administration commands -.\" Source: Ansible 2.0.0 +.\" Source: Ansible %VERSION% .\" Language: English .\" -.TH "ANSIBLE" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands" +.TH "ANSIBLE" "1" "07/22/2015" "Ansible %VERSION%" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -50,14 +50,14 @@ The name of one the YAML format files to run as an ansible playbook\&. This can .RE .SH "OPTIONS" .PP -\fB\-d\fR \fIDEST\fR, \fB\-\-directory=\fR\fIDEST\fR +\fB\-\-accept\-host\-key\fR .RS 4 -Directory to checkout repository into\&. If not provided, a subdirectory of ~/\&.ansible/pull/ will be used\&. +Adds the hostkey for the repo URL if not already added\&. .RE .PP -\fB\-U\fR \fIURL\fR, \fB\-\-url=\fR\fIURL\fR +\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR .RS 4 -URL of the playbook repository to checkout\&. +Ask for sudo password\&. .RE .PP \fB\-C\fR \fICHECKOUT\fR, \fB\-\-checkout=\fR\fICHECKOUT\fR @@ -65,11 +65,26 @@ URL of the playbook repository to checkout\&. Branch/Tag/Commit to checkout\&. If not provided, uses default behavior of module used to check out playbook repository\&. .RE .PP +\fB\-d\fR \fIDEST\fR, \fB\-\-directory=\fR\fIDEST\fR +.RS 4 +Directory to checkout repository into\&. If not provided, a subdirectory of ~/\&.ansible/pull/ will be used\&. +.RE +.PP +\fB\-e\fR \fIEXTRA_VARS\fR, \fB\-\-extra\-vars=\fR\*(AqEXTRA_VARS* +.RS 4 +Set additional variables as key=value or YAML/JSON +.RE +.PP \fB\-f\fR, \fB\-\-force\fR .RS 4 Force running of playbook even if unable to update playbook repository\&. This can be useful, for example, to enforce run\-time state when a network connection may not always be up or possible\&. .RE .PP +\fB\-h\fR, \fB\-\-help\fR +.RS 4 +Show the help message and exit\&. +.RE +.PP \fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR .RS 4 The @@ -77,9 +92,11 @@ The to the inventory hosts file\&. This can be a relative path within the checkout\&. .RE .PP -\fB\-\-purge\fR +\fB\-\-key\-file=\fR\fIKEYFILE\fR .RS 4 -Purge the checkout after the playbook is run\&. +Pass +\fI\-i \fR +to the SSH arguments used by git\&. .RE .PP \fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR @@ -89,7 +106,37 @@ Module used to checkout playbook repository\&. Defaults to git\&. .PP \fB\-o\fR, \fB\-\-only\-if\-changed\fR .RS 4 -Run the playbook only if the repository has changed +Only run the playbook if the repository has been updated\&. +.RE +.PP +\fB\-\-purge\fR +.RS 4 +Purge the checkout after the playbook is run\&. +.RE +.PP +\fB\-s\fR \fISLEEP\fR, \fB\-\-sleep=\fR\fISLEEP\fR +.RS 4 +Sleep for random interval (between 0 and SLEEP number of seconds) before starting\&. This is a useful way ot disperse git requests\&. +.RE +.PP +\fB\-t\fR \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR +.RS 4 +Only run plays and tasks tagged with these values\&. +.RE +.PP +\fB\-U\fR \fIURL\fR, \fB\-\-url=\fR\fIURL\fR +.RS 4 +URL of the playbook repository to checkout\&. +.RE +.PP +\fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR +.RS 4 +Vault password file\&. +.RE +.PP +\fB\-v\fR, \fB\-\-verbose\fR +.RS 4 +Pass \-vvv to ansible\-playbook\&. .RE .SH "AUTHOR" .sp diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index d75fc637946..b78b7e67a2b 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -50,19 +50,26 @@ host hostname and finally a playbook named *local.yml*. OPTIONS ------- +*--accept-host-key*:: + +Adds the hostkey for the repo URL if not already added. + +*-K*, *--ask-sudo-pass*:: + +Ask for sudo password. + +*-C* 'CHECKOUT', *--checkout=*'CHECKOUT':: + +Branch/Tag/Commit to checkout. If not provided, uses default behavior of module used to check out playbook repository. + *-d* 'DEST', *--directory=*'DEST':: Directory to checkout repository into. If not provided, a subdirectory of ~/.ansible/pull/ will be used. -*-U* 'URL', *--url=*'URL':: - -URL of the playbook repository to checkout. - -*-C* 'CHECKOUT', *--checkout=*'CHECKOUT':: +*-e* 'EXTRA_VARS', *--extra-vars=*'EXTRA_VARS*:: -Branch/Tag/Commit to checkout. If not provided, uses default behavior -of module used to check out playbook repository. +Set additional variables as key=value or YAML/JSON *-f*, *--force*:: @@ -70,14 +77,17 @@ Force running of playbook even if unable to update playbook repository. This can be useful, for example, to enforce run-time state when a network connection may not always be up or possible. +*-h*, *--help*:: + +Show the help message and exit. + *-i* 'PATH', *--inventory=*'PATH':: -The 'PATH' to the inventory hosts file. This can be a relative path within -the checkout. +The 'PATH' to the inventory hosts file. This can be a relative path within the checkout. -*--purge*:: +*--key-file=*'KEYFILE':: -Purge the checkout after the playbook is run. +Pass '-i ' to the SSH arguments used by git. *-m* 'NAME', *--module-name=*'NAME':: @@ -85,7 +95,32 @@ Module used to checkout playbook repository. Defaults to git. *-o*, *--only-if-changed*:: -Run the playbook only if the repository has changed +Only run the playbook if the repository has been updated. + +*--purge*:: + +Purge the checkout after the playbook is run. + +*-s* 'SLEEP', *--sleep=*'SLEEP':: + +Sleep for random interval (between 0 and SLEEP number of seconds) before starting. This is a useful way ot disperse git requests. + +*-t* 'TAGS', *--tags=*'TAGS':: + +Only run plays and tasks tagged with these values. + +*-U* 'URL', *--url=*'URL':: + +URL of the playbook repository to checkout. + +*--vault-password-file=*'VAULT_PASSWORD_FILE':: + +Vault password file. + +*-v*, *--verbose*:: + +Pass -vvv to ansible-playbook. + AUTHOR ------ diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1 index 102ba7e5b0e..83bfc0500dd 100644 --- a/docs/man/man1/ansible.1 +++ b/docs/man/man1/ansible.1 @@ -2,12 +2,12 @@ .\" Title: ansible .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 05/05/2015 +.\" Date: 07/15/2015 .\" Manual: System administration commands -.\" Source: Ansible 2.0.0 +.\" Source: Ansible %VERSION% .\" Language: English .\" -.TH "ANSIBLE" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands" +.TH "ANSIBLE" "1" "07/15/2015" "Ansible %VERSION%" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -43,9 +43,86 @@ A name of a group in the inventory file, a shell\-like glob selecting hosts in i .RE .SH "OPTIONS" .PP -\fB\-v\fR, \fB\-\-verbose\fR +\fB\-a\fR \*(Aq\fIARGUMENTS\fR\*(Aq, \fB\-\-args=\fR\*(Aq\fIARGUMENTS\fR\*(Aq .RS 4 -Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. +The +\fIARGUMENTS\fR +to pass to the module\&. +.RE +.PP +\fB\-\-ask\-become\-pass\fR +.RS 4 +Ask for privilege escalation password\&. +.RE +.PP +\fB\-k\fR, \fB\-\-ask\-pass\fR +.RS 4 +Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. +.RE +.PP +\fB\-\-ask\-su\-pass\fR +.RS 4 +Prompt for su password (deprecated, use become)\&. +.RE +.PP +\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR +.RS 4 +Prompt for the password to use with \-\-sudo, if any\&. +.RE +.PP +\fB\-\-ask\-vault\-pass\fR +.RS 4 +Prompt for vault password\&. +.RE +.PP +\fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR +.RS 4 +Run commands in the background, killing the task after +\fINUM\fR +seconds\&. +.RE +.PP +\fB\-\-become\-method=\fR\fIBECOME_METHOD\fR +.RS 4 +Privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | runas ] +.RE +.PP +\fB\-\-become\-user=\fR\fIBECOME_USER\fR +.RS 4 +Run operations as this user (default=None)\&. +.RE +.PP +\fB\-C\fR, \fB\-\-check\fR +.RS 4 +Don\(cqt make any changes; instead try to predict some of the changes that may occur\&. +.RE +.PP +\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR +.RS 4 +Connection type to use\&. Possible options are +\fIparamiko\fR +(SSH), +\fIssh\fR, and +\fIlocal\fR\&. +\fIlocal\fR +is mostly useful for crontab or kickstarts\&. +.RE +.PP +\fB\-e\fR \fIEXTRA_VARS*, \fR\fI\fB\-\-extra\-vars=\fR\fR\fI\*(AqEXTRA_VARS\fR +.RS 4 +Set additional variables as key=value or YAML/JSON\&. +.RE +.PP +\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR +.RS 4 +Level of parallelism\&. +\fINUM\fR +is specified as an integer, the default is 5\&. +.RE +.PP +\fB\-h\fR, \fB\-\-help\fR +.RS 4 +Show help message and exit\&. .RE .PP \fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR @@ -56,16 +133,19 @@ to the inventory hosts file, which defaults to \fI/etc/ansible/hosts\fR\&. .RE .PP -\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR +\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR .RS 4 -Level of parallelism\&. -\fINUM\fR -is specified as an integer, the default is 5\&. +Further limits the selected host/group patterns\&. .RE .PP -\fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR +\fB\-l\fR \fI~REGEX\fR, \fB\-\-limit=\fR\fI~REGEX\fR .RS 4 -Use this file to authenticate the connection\&. +Further limits hosts with a regex pattern\&. +.RE +.PP +\fB\-\-list\-hosts\fR +.RS 4 +Outputs a list of matching hosts; does not execute anything else\&. .RE .PP \fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR @@ -82,26 +162,32 @@ search path to load modules from\&. The default is \fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&. .RE .PP -\fB\-a\fR \*(Aq\fIARGUMENTS\fR\*(Aq, \fB\-\-args=\fR\*(Aq\fIARGUMENTS\fR\*(Aq +\fB\-o\fR, \fB\-\-one\-line\fR .RS 4 -The -\fIARGUMENTS\fR -to pass to the module\&. +Try to output everything on one line\&. .RE .PP -\fB\-k\fR, \fB\-\-ask\-pass\fR +\fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR .RS 4 -Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. +Poll a background job every +\fINUM\fR +seconds\&. Requires +\fB\-B\fR\&. .RE .PP -\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR +\fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR .RS 4 -Prompt for the password to use with \-\-sudo, if any +Use this file to authenticate the connection\&. .RE .PP -\fB\-o\fR, \fB\-\-one\-line\fR +\fB\-S\fR, \fB\-\-su\fR .RS 4 -Try to output everything on one line\&. +Run operations with su (deprecated, use become)\&. +.RE +.PP +\fB\-R\fR \fISU_USER\fR, \fB\-\-se\-user=\fR\fISUDO_USER\fR +.RS 4 +Run operations with su as this user (default=root) (deprecated, use become) .RE .PP \fB\-s\fR, \fB\-\-sudo\fR @@ -109,6 +195,13 @@ Try to output everything on one line\&. Run the command as the user given by \-u and sudo to root\&. .RE .PP +\fB\-U\fR \fISUDO_USERNAME\fR, \fB\-\-sudo\-user=\fR\fISUDO_USERNAME\fR +.RS 4 +Sudo to +\fISUDO_USERNAME\fR +instead of root\&. Implies \-\-sudo\&. +.RE +.PP \fB\-t\fR \fIDIRECTORY\fR, \fB\-\-tree=\fR\fIDIRECTORY\fR .RS 4 Save contents in this output @@ -121,21 +214,6 @@ Connection timeout to use when trying to talk to hosts, in \fISECONDS\fR\&. .RE .PP -\fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR -.RS 4 -Run commands in the background, killing the task after -\fINUM\fR -seconds\&. -.RE -.PP -\fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR -.RS 4 -Poll a background job every -\fINUM\fR -seconds\&. Requires -\fB\-B\fR\&. -.RE -.PP \fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR .RS 4 Use this remote @@ -143,37 +221,19 @@ Use this remote instead of the current user\&. .RE .PP -\fB\-U\fR \fISUDO_USERNAME\fR, \fB\-\-sudo\-user=\fR\fISUDO_USERNAME\fR +\fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR .RS 4 -Sudo to -\fISUDO_USERNAME\fR -instead of root\&. Implies \-\-sudo\&. +Vault password file\&. .RE .PP -\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR -.RS 4 -Connection type to use\&. Possible options are -\fIparamiko\fR -(SSH), -\fIssh\fR, and -\fIlocal\fR\&. -\fIlocal\fR -is mostly useful for crontab or kickstarts\&. -.RE -.PP -\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR -.RS 4 -Further limits the selected host/group patterns\&. -.RE -.PP -\fB\-l\fR \fI~REGEX\fR, \fB\-\-limit=\fR\fI~REGEX\fR +\fB\-v\fR, \fB\-\-verbose\fR .RS 4 -Further limits hosts with a regex pattern\&. +Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. .RE .PP -\fB\-\-list\-hosts\fR +\fB\-\-version\fR .RS 4 -Outputs a list of matching hosts; does not execute anything else\&. +Show program version number and exit\&. .RE .SH "INVENTORY" .sp diff --git a/docs/man/man1/ansible.1.asciidoc.in b/docs/man/man1/ansible.1.asciidoc.in index f0f81b7d9bd..26bd0144d4e 100644 --- a/docs/man/man1/ansible.1.asciidoc.in +++ b/docs/man/man1/ansible.1.asciidoc.in @@ -34,56 +34,119 @@ semicolons. OPTIONS ------- -*-v*, *--verbose*:: +*-a* \'_ARGUMENTS_', *--args=*\'_ARGUMENTS_':: -Verbose mode, more output from successful actions will be shown. Give -up to three times for more output. +The 'ARGUMENTS' to pass to the module. -*-i* 'PATH', *--inventory=*'PATH':: +*--ask-become-pass*:: -The 'PATH' to the inventory hosts file, which defaults to '/etc/ansible/hosts'. +Ask for privilege escalation password. + +*-k*, *--ask-pass*:: + +Prompt for the SSH password instead of assuming key-based authentication with ssh-agent. + +*--ask-su-pass*:: + +Prompt for su password (deprecated, use become). + +*-K*, *--ask-sudo-pass*:: + +Prompt for the password to use with --sudo, if any. + +*--ask-vault-pass*:: + +Prompt for vault password. + +*-B* 'NUM', *--background=*'NUM':: + +Run commands in the background, killing the task after 'NUM' seconds. + +*--become-method=*'BECOME_METHOD':: + +Privilege escalation method to use (default=sudo), +valid choices: [ sudo | su | pbrun | pfexec | runas ] + +*--become-user=*'BECOME_USER':: +Run operations as this user (default=None). + +*-C*, *--check*:: + +Don't make any changes; instead try to predict some of the changes that may occur. + +*-c* 'CONNECTION', *--connection=*'CONNECTION':: + +Connection type to use. Possible options are 'paramiko' (SSH), 'ssh', +and 'local'. 'local' is mostly useful for crontab or kickstarts. + +*-e* 'EXTRA_VARS*, *--extra-vars=*'EXTRA_VARS':: + +Set additional variables as key=value or YAML/JSON. *-f* 'NUM', *--forks=*'NUM':: Level of parallelism. 'NUM' is specified as an integer, the default is 5. -*--private-key=*'PRIVATE_KEY_FILE':: +*-h*, *--help*:: -Use this file to authenticate the connection. +Show help message and exit. + +*-i* 'PATH', *--inventory=*'PATH':: + +The 'PATH' to the inventory hosts file, which defaults to '/etc/ansible/hosts'. + +*-l* 'SUBSET', *--limit=*'SUBSET':: + +Further limits the selected host/group patterns. + +*-l* '\~REGEX', *--limit=*'~REGEX':: +Further limits hosts with a regex pattern. + +*--list-hosts*:: + +Outputs a list of matching hosts; does not execute anything else. *-m* 'NAME', *--module-name=*'NAME':: Execute the module called 'NAME'. - *-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: The 'DIRECTORY' search path to load modules from. The default is '/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY environment variable. -*-a* \'_ARGUMENTS_', *--args=*\'_ARGUMENTS_':: +*-o*, *--one-line*:: -The 'ARGUMENTS' to pass to the module. +Try to output everything on one line. -*-k*, *--ask-pass*:: +*-P* 'NUM', *--poll=*'NUM':: -Prompt for the SSH password instead of assuming key-based authentication with ssh-agent. +Poll a background job every 'NUM' seconds. Requires *-B*. -*-K*, *--ask-sudo-pass*:: +*--private-key=*'PRIVATE_KEY_FILE':: -Prompt for the password to use with --sudo, if any +Use this file to authenticate the connection. -*-o*, *--one-line*:: +*-S*, *--su*:: -Try to output everything on one line. +Run operations with su (deprecated, use become). + +*-R* 'SU_USER', *--se-user=*'SUDO_USER':: + +Run operations with su as this user (default=root) +(deprecated, use become) *-s*, *--sudo*:: Run the command as the user given by -u and sudo to root. +*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: + +Sudo to 'SUDO_USERNAME' instead of root. Implies --sudo. + *-t* 'DIRECTORY', *--tree=*'DIRECTORY':: Save contents in this output 'DIRECTORY', with the results saved in a @@ -93,38 +156,22 @@ file named after each host. Connection timeout to use when trying to talk to hosts, in 'SECONDS'. -*-B* 'NUM', *--background=*'NUM':: - -Run commands in the background, killing the task after 'NUM' seconds. - -*-P* 'NUM', *--poll=*'NUM':: - -Poll a background job every 'NUM' seconds. Requires *-B*. - *-u* 'USERNAME', *--user=*'USERNAME':: Use this remote 'USERNAME' instead of the current user. -*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: - -Sudo to 'SUDO_USERNAME' instead of root. Implies --sudo. - -*-c* 'CONNECTION', *--connection=*'CONNECTION':: - -Connection type to use. Possible options are 'paramiko' (SSH), 'ssh', -and 'local'. 'local' is mostly useful for crontab or kickstarts. - -*-l* 'SUBSET', *--limit=*'SUBSET':: +*--vault-password-file=*'VAULT_PASSWORD_FILE':: -Further limits the selected host/group patterns. +Vault password file. -*-l* '\~REGEX', *--limit=*'~REGEX':: +*-v*, *--verbose*:: -Further limits hosts with a regex pattern. +Verbose mode, more output from successful actions will be shown. Give +up to three times for more output. -*--list-hosts*:: +*--version*:: -Outputs a list of matching hosts; does not execute anything else. +Show program version number and exit. INVENTORY --------- diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html index b6422f9a2dd..b70cfde7ad8 100644 --- a/docsite/_themes/srtd/footer.html +++ b/docsite/_themes/srtd/footer.html @@ -20,6 +20,6 @@ {%- endif %}

-Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs. {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %} +Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs. {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %} diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index b9d9d065c7b..74fb6008ba0 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -113,23 +113,8 @@ } - - - - - + @@ -140,16 +125,32 @@ + + +
+ Documentation +
+
+
{# SIDE NAV, TOGGLES ON MOBILE #}