diff --git a/.gitmodules b/.gitmodules
index 3f14953ec8f..793522a29c6 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,16 +1,12 @@
[submodule "lib/ansible/modules/core"]
path = lib/ansible/modules/core
- url = https://github.com/ansible/ansible-modules-core.git
- branch = devel
+ url = https://github.com/ansible/ansible-modules-core
[submodule "lib/ansible/modules/extras"]
path = lib/ansible/modules/extras
- url = https://github.com/ansible/ansible-modules-extras.git
- branch = devel
-[submodule "v2/ansible/modules/core"]
- path = v2/ansible/modules/core
- url = https://github.com/ansible/ansible-modules-core.git
- branch = devel
-[submodule "v2/ansible/modules/extras"]
- path = v2/ansible/modules/extras
- url = https://github.com/ansible/ansible-modules-extras.git
- branch = devel
+ url = https://github.com/ansible/ansible-modules-extras
+[submodule "v1/ansible/modules/core"]
+ path = v1/ansible/modules/core
+ url = https://github.com/ansible/ansible-modules-core
+[submodule "v1/ansible/modules/extras"]
+ path = v1/ansible/modules/extras
+ url = https://github.com/ansible/ansible-modules-extras
diff --git a/.travis.yml b/.travis.yml
index 6e18e06050c..335a8e58e31 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,11 +1,20 @@
sudo: false
language: python
env:
+ - TOKENV=py24
- TOXENV=py26
- TOXENV=py27
+addons:
+ apt:
+ sources:
+ - deadsnakes
+ packages:
+ - python2.4
install:
- - pip install tox
+ - pip install tox PyYAML Jinja2 sphinx
script:
- - tox
+- if test x"$TOKENV" != x'py24' ; then tox ; fi
+- if test x"$TOKENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi
+ #- make -C docsite all
after_success:
- coveralls
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 98006503692..fdf0e4aa846 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,19 +4,47 @@ Ansible Changes By Release
## 2.0 "TBD" - ACTIVE DEVELOPMENT
Major Changes:
+ * Introducing the new block/rescue/always directives, allow for making task blocks and introducing exception like semantics
+ * New strategy plugins, allow to control the flow of execution of tasks per play, the default will be the same as before
+ * Improved error handling, now you get much more detailed parser messages. General exception handling and display has been revamped.
+ * Task includes now get evaluated during execution, end behaviour will be the same but it now allows for more dynamic includes and options.
+ * First feature of the more dynamic includes is that with_ loops are now usable with them.
+ * callback, connection and lookup plugin APIs have changed, some will require modification to work with new version
+ * callbacks are now shipped in the active directory and don't need to be copied, just whitelisted in ansible.cfg
+ * Many API changes, this will break those currently using it directly, but the new API is much easier to use and test
+ * Settings are now more inheritable, what you set at play, block or role will be automatically inhertited by the contained.
+ This allows for new features to automatically be settable at all levels, previously we had to manually code this
+ * Many more tests, new API makes things more testable and we took advantage of it
* big_ip modules now support turning off ssl certificate validation (use only for self signed)
- * template code now retains types for bools and Numbers instead of turning them into strings
+ * template code now retains types for bools and numbers instead of turning them into strings.
If you need the old behaviour, quote the value and it will get passed around as a string
+ * Consiidated code from modules using urllib2 to normalize features, TLS and SNI support
Deprecated Modules (new ones in parens):
* ec2_ami_search (ec2_ami_find)
* quantum_network (os_network)
+ * glance_image
* nova_compute (os_server)
+ * quantum_floating_ip (os_floating_ip)
New Modules:
- * find
- * ec2_ami_find
- * ec2_win_password
+ * amazon: ec2_ami_copy
+ * amazon: ec2_ami_find
+ * amazon: ec2_eni
+ * amazon: ec2_eni_facts
+ * amazon: ec2_vpc_net
+ * amazon: ec2_vpc_route_table_facts
+ * amazon: ec2_vpc_subnet
+ * amazon: ec2_win_password
+ * amazon: elasticache_subnet_group
+ * amazon: iam
+ * amazon: iam_policy
+ * amazon: route53_zone
+ * amazon: sts_assume_role
+ * amazon: s3_logging
+ * apk
+ * bundler
+ * centurylink: clc_publicip
* circonus_annotation
* consul
* consul_acl
@@ -25,46 +53,131 @@ New Modules:
* cloudtrail
* cloudstack: cs_account
* cloudstack: cs_affinitygroup
+ * cloudstack: cs_facts
* cloudstack: cs_firewall
* cloudstack: cs_iso
* cloudstack: cs_instance
* cloudstack: cs_instancegroup
+ * cloudstack: cs_network
* cloudstack: cs_portforward
+ * cloudstack: cs_project
* cloudstack: cs_sshkeypair
* cloudstack: cs_securitygroup
* cloudstack: cs_securitygroup_rule
+ * cloudstack: cs_staticnat
+ * cloudstack: cs_template
* cloudstack: cs_vmsnapshot
+ * datadog_monitor
+ * dpkg_selections
+ * elasticsearch_plugin
+ * expect
+ * find
+ * hall
+ * libvirt: virt_net
+ * libvirt: virt_pool
* maven_artifact
+ * openstack: os_ironic
+ * openstack: os_ironic_node
+ * openstack: os_client_config
+ * openstack: os_floating_ip
+ * openstack: os_image
* openstack: os_network
+ * openstack: os_nova_flavor
+ * openstack: os_object
+ * openstack: os_security_group
+ * openstack: os_security_group_rule
* openstack: os_server
* openstack: os_server_actions
* openstack: os_server_facts
* openstack: os_server_volume
* openstack: os_subnet
* openstack: os_volume
+ * osx_defaults
+ * pam_limits
+ * pear
+ * profitbricks: profitbricks
* proxmox
+ * proxmox_template
+ * puppet
* pushover
* pushbullet
+ * rax: rax_mon_alarm
+ * rax: rax_mon_check
+ * rax: rax_mon_entity
+ * rax: rax_mon_notification
+ * rax: rax_mon_notification_plan
* rabbitmq_binding
* rabbitmq_exchange
* rabbitmq_queue
- * zabbix_host
- * zabbix_hostmacro
- * zabbix_screen
+ * selinux_permissive
+ * sensu_check
+ * sensu_subscription
+ * slackpkg
* vertica_configuration
* vertica_facts
* vertica_role
* vertica_schema
* vertica_user
- * vmware_datacenter
+ * vmware: vmware_datacenter
+ * vmware: vca_fw
+ * vmware: vca_nat
+ * vmware: vsphere_copy
+ * webfaction_app
+ * webfaction_db
+ * webfaction_domain
+ * webfaction_mailbox
+ * webfaction_site
* win_environment
+ * win_scheduled_task
+ * win_iis_virtualdirectory
+ * win_iis_webapplication
+ * win_iis_webapppool
+ * win_iis_webbinding
+ * win_iis_website
+ * win_regedit
+ * win_unzip
+ * xenserver_facts
+ * zabbix_host
+ * zabbix_hostmacro
+ * zabbix_screen
New Inventory scripts:
* cloudstack
* fleetctl
+ * openvz
+ * proxmox
+ * serf
Other Notable Changes:
+## 1.9.2 "Dancing In the Street" - Jun 26, 2015
+
+* Security fixes to check that hostnames match certificates with https urls (CVE-2015-3908)
+ - get_url and uri modules
+ - url and etcd lookup plugins
+* Security fixes to the zone (Solaris containers), jail (bsd containers),
+ and chroot connection plugins. These plugins can be used to connect to
+ their respective container types in leiu of the standard ssh connection.
+ Prior to this fix being applied these connection plugins didn't properly
+ handle symlinks within the containers which could lead to files intended to
+ be written to or read from the container being written to or read from the
+ host system instead. (CVE pending)
+* Fixed a bug in the service module where init scripts were being incorrectly used instead of upstart/systemd.
+* Fixed a bug where sudo/su settings were not inherited from ansible.cfg correctly.
+* Fixed a bug in the rds module where a traceback may occur due to an unbound variable.
+* Fixed a bug where certain remote file systems where the SELinux context was not being properly set.
+* Re-enabled several windows modules which had been partially merged (via action plugins):
+ - win_copy.ps1
+ - win_copy.py
+ - win_file.ps1
+ - win_file.py
+ - win_template.py
+* Fix bug using with_sequence and a count that is zero. Also allows counting backwards isntead of forwards
+* Fix get_url module bug preventing use of custom ports with https urls
+* Fix bug disabling repositories in the yum module.
+* Fix giving yum module a url to install a package from on RHEL/CENTOS5
+* Fix bug in dnf module preventing it from working when yum-utils was not already installed
+
## 1.9.1 "Dancing In the Street" - Apr 27, 2015
* Fixed a bug related to Kerberos auth when using winrm with a domain account.
@@ -99,7 +212,7 @@ Major changes:
* Added travis integration to github for basic tests, this should speed up ticket triage and merging.
* environment: directive now can also be applied to play and is inhertited by tasks, which can still override it.
* expanded facts and OS/distribution support for existing facts and improved performance with pypy.
-* new 'wantlist' option to lookups allows for selecting a list typed variable vs a command delimited string as the return.
+* new 'wantlist' option to lookups allows for selecting a list typed variable vs a comma delimited string as the return.
* the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes).
* allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules).
* sped up YAML parsing in ansible by up to 25% by switching to CParser loader.
@@ -313,7 +426,7 @@ And various other bug fixes and improvements ...
- Fixes a bug in vault where the password file option was not being used correctly internally.
- Improved multi-line parsing when using YAML literal blocks (using > or |).
- Fixed a bug with the file module and the creation of relative symlinks.
-- Fixed a bug where checkmode was not being honored during the templating of files.
+- Fixed a bug where checkmode was not being honoured during the templating of files.
- Other various bug fixes.
## 1.7.1 "Summer Nights" - Aug 14, 2014
@@ -356,7 +469,7 @@ New Modules:
Other notable changes:
* Security fixes
- - Prevent the use of lookups when using legaxy "{{ }}" syntax around variables and with_* loops.
+ - Prevent the use of lookups when using legacy "{{ }}" syntax around variables and with_* loops.
- Remove relative paths in TAR-archived file names used by ansible-galaxy.
* Inventory speed improvements for very large inventories.
* Vault password files can now be executable, to support scripts that fetch the vault password.
@@ -1033,7 +1146,7 @@ the variable is still registered for the host, with the attribute skipped: True.
* service pattern argument now correctly read for BSD services
* fetch location can now be controlled more directly via the 'flat' parameter.
* added basename and dirname as Jinja2 filters available to all templates
-* pip works better when sudoing from unpriveledged users
+* pip works better when sudoing from unprivileged users
* fix for user creation with groups specification reporting 'changed' incorrectly in some cases
* fix for some unicode encoding errors in outputing some data in verbose mode
* improved FreeBSD, NetBSD and Solaris facts
diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md
index ac252d54146..094501db906 100644
--- a/ISSUE_TEMPLATE.md
+++ b/ISSUE_TEMPLATE.md
@@ -1,6 +1,13 @@
##### Issue Type:
-Can you help us out in labelling this by telling us what kind of ticket this this? You can say “Bug Report”, “Feature Idea”, “Feature Pull Request”, “New Module Pull Request”, “Bugfix Pull Request”, “Documentation Report”, or “Docs Pull Request”.
+Can you help us out in labelling this by telling us what kind of ticket this this? You can say:
+ - Bug Report
+ - Feature Idea
+ - Feature Pull Request
+ - New Module Pull Request
+ - Bugfix Pull Request
+ - Documentation Report
+ - Docs Pull Request
##### Ansible Version:
diff --git a/MANIFEST.in b/MANIFEST.in
index f4e727d8c4d..b9bf5f42764 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -10,9 +10,12 @@ include examples/ansible.cfg
include lib/ansible/module_utils/powershell.ps1
recursive-include lib/ansible/modules *
recursive-include docs *
-recursive-include plugins *
include Makefile
include VERSION
include MANIFEST.in
+include contrib/README.md
+include contrib/inventory *
+exclude lib/ansible/modules/core/.git*
+exclude lib/ansible/modules/extras/.git*
prune lib/ansible/modules/core/.git
prune lib/ansible/modules/extras/.git
diff --git a/Makefile b/Makefile
index e01e1a9713c..69d749b7194 100644
--- a/Makefile
+++ b/Makefile
@@ -40,6 +40,11 @@ RELEASE := $(shell cat VERSION | cut -f2 -d' ')
# Get the branch information from git
ifneq ($(shell which git),)
GIT_DATE := $(shell git log -n 1 --format="%ai")
+GIT_HASH := $(shell git log -n 1 --format="%h")
+GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.]//g')
+GITINFO = .$(GIT_HASH).$(GIT_BRANCH)
+else
+GITINFO = ''
endif
ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1)
@@ -62,7 +67,7 @@ ifeq ($(OFFICIAL),yes)
DEBUILD_OPTS += -k$(DEBSIGN_KEYID)
endif
else
- DEB_RELEASE = 0.git$(DATE)
+ DEB_RELEASE = 0.git$(DATE)$(GITINFO)
# Do not sign unofficial builds
DEBUILD_OPTS += -uc -us
DPUT_OPTS += -u
@@ -78,7 +83,7 @@ RPMSPEC = $(RPMSPECDIR)/ansible.spec
RPMDIST = $(shell rpm --eval '%{?dist}')
RPMRELEASE = $(RELEASE)
ifneq ($(OFFICIAL),yes)
- RPMRELEASE = 0.git$(DATE)
+ RPMRELEASE = 0.git$(DATE)$(GITINFO)
endif
RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)"
@@ -136,7 +141,7 @@ clean:
@echo "Cleaning up byte compiled python stuff"
find . -type f -regex ".*\.py[co]$$" -delete
@echo "Cleaning up editor backup files"
- find . -type f \( -name "*~" -or -name "#*" \) -delete
+ find . -type f \( -name "*~" -or -name "#*" \) |grep -v test/units/inventory_test_data/group_vars/noparse/all.yml~ |xargs -n 1024 -r rm
find . -type f \( -name "*.swp" \) -delete
@echo "Cleaning up manpage stuff"
find ./docs/man -type f -name "*.xml" -delete
diff --git a/README.md b/README.md
index 2a7d8e03af7..cea24c84772 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible)
-[![PyPI downloads](https://pypip.in/d/ansible/badge.png)](https://pypi.python.org/pypi/ansible)
+[![PyPI version](https://badge.fury.io/py/ansible.svg)](http://badge.fury.io/py/ansible)
+[![PyPI downloads](https://pypip.in/d/ansible/badge.svg)](https://pypi.python.org/pypi/ansible)
[![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=devel)](https://travis-ci.org/ansible/ansible)
@@ -51,7 +51,7 @@ Branch Info
Authors
=======
-Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 900 users (and growing). Thanks everyone!
+Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 1000 users (and growing). Thanks everyone!
Ansible is sponsored by [Ansible, Inc](http://ansible.com)
diff --git a/bin/ansible b/bin/ansible
index 7fec34ec81e..209b235c88d 100755
--- a/bin/ansible
+++ b/bin/ansible
@@ -18,6 +18,8 @@
# along with Ansible. If not, see .
########################################################
+from __future__ import (absolute_import, print_function)
+__metaclass__ = type
__requires__ = ['ansible']
try:
@@ -32,176 +34,77 @@ except Exception:
import os
import sys
+import traceback
-from ansible.runner import Runner
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import callbacks
-from ansible import inventory
-########################################################
-
-class Cli(object):
- ''' code behind bin/ansible '''
-
- # ----------------------------------------------
-
- def __init__(self):
- self.stats = callbacks.AggregateStats()
- self.callbacks = callbacks.CliRunnerCallbacks()
- if C.DEFAULT_LOAD_CALLBACK_PLUGINS:
- callbacks.load_callback_plugins()
-
- # ----------------------------------------------
-
- def parse(self):
- ''' create an options parser for bin/ansible '''
-
- parser = utils.base_parser(
- constants=C,
- runas_opts=True,
- subset_opts=True,
- async_opts=True,
- output_opts=True,
- connect_opts=True,
- check_opts=True,
- diff_opts=False,
- usage='%prog [options]'
- )
-
- parser.add_option('-a', '--args', dest='module_args',
- help="module arguments", default=C.DEFAULT_MODULE_ARGS)
- parser.add_option('-m', '--module-name', dest='module_name',
- help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
- default=C.DEFAULT_MODULE_NAME)
-
- options, args = parser.parse_args()
- self.callbacks.options = options
-
- if len(args) == 0 or len(args) > 1:
- parser.print_help()
- sys.exit(1)
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- return (options, args)
-
- # ----------------------------------------------
-
- def run(self, options, args):
- ''' use Runner lib to do SSH things '''
-
- pattern = args[0]
-
- sshpass = becomepass = vault_pass = become_method = None
-
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible.utils.display import Display
- # become
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
+########################################
+### OUTPUT OF LAST RESORT ###
+class LastResort(object):
+ def display(self, msg):
+ print(msg, file=sys.stderr)
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
+ def error(self, msg, wrap_text=None):
+ print(msg, file=sys.stderr)
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
- inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass)
- if options.subset:
- inventory_manager.subset(options.subset)
- hosts = inventory_manager.list_hosts(pattern)
-
- if len(hosts) == 0:
- callbacks.display("No hosts matched", stderr=True)
- sys.exit(0)
-
- if options.listhosts:
- for host in hosts:
- callbacks.display(' %s' % host)
- sys.exit(0)
-
- if options.module_name in ['command','shell'] and not options.module_args:
- callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
- sys.exit(1)
-
- if options.tree:
- utils.prepare_writeable_dir(options.tree)
-
- runner = Runner(
- module_name=options.module_name,
- module_path=options.module_path,
- module_args=options.module_args,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- inventory=inventory_manager,
- timeout=options.timeout,
- private_key_file=options.private_key_file,
- forks=options.forks,
- pattern=pattern,
- callbacks=self.callbacks,
- transport=options.connection,
- subset=options.subset,
- check=options.check,
- diff=options.check,
- vault_pass=vault_pass,
- become=options.become,
- become_method=options.become_method,
- become_pass=becomepass,
- become_user=options.become_user,
- extra_vars=extra_vars,
- )
-
- if options.seconds:
- callbacks.display("background launch...\n\n", color='cyan')
- results, poller = runner.run_async(options.seconds)
- results = self.poll_while_needed(poller, options)
- else:
- results = runner.run()
-
- return (runner, results)
-
- # ----------------------------------------------
-
- def poll_while_needed(self, poller, options):
- ''' summarize results from Runner '''
-
- # BACKGROUND POLL LOGIC when -B and -P are specified
- if options.seconds and options.poll_interval > 0:
- poller.wait(options.seconds, options.poll_interval)
-
- return poller.results
-
-
-########################################################
+########################################
if __name__ == '__main__':
- callbacks.display("", log_only=True)
- callbacks.display(" ".join(sys.argv), log_only=True)
- callbacks.display("", log_only=True)
- cli = Cli()
- (options, args) = cli.parse()
+ display = LastResort()
+ cli = None
+ me = os.path.basename(sys.argv[0])
+
try:
- (runner, results) = cli.run(options, args)
- for result in results['contacted'].values():
- if 'failed' in result or result.get('rc', 0) != 0:
- sys.exit(2)
- if results['dark']:
- sys.exit(3)
- except errors.AnsibleError, e:
- # Generic handler for ansible specific errors
- callbacks.display("ERROR: %s" % str(e), stderr=True, color='red')
+ display = Display()
+
+ if me == 'ansible-playbook':
+ from ansible.cli.playbook import PlaybookCLI as mycli
+ elif me == 'ansible':
+ from ansible.cli.adhoc import AdHocCLI as mycli
+ elif me == 'ansible-pull':
+ from ansible.cli.pull import PullCLI as mycli
+ elif me == 'ansible-doc':
+ from ansible.cli.doc import DocCLI as mycli
+ elif me == 'ansible-vault':
+ from ansible.cli.vault import VaultCLI as mycli
+ elif me == 'ansible-galaxy':
+ from ansible.cli.galaxy import GalaxyCLI as mycli
+
+ cli = mycli(sys.argv, display=display)
+ if cli:
+ cli.parse()
+ sys.exit(cli.run())
+ else:
+ raise AnsibleError("Program not implemented: %s" % me)
+
+ except AnsibleOptionsError as e:
+ cli.parser.print_help()
+ display.error(str(e), wrap_text=False)
+ sys.exit(5)
+ except AnsibleParserError as e:
+ display.error(str(e), wrap_text=False)
+ sys.exit(4)
+# TQM takes care of these, but leaving comment to reserve the exit codes
+# except AnsibleHostUnreachable as e:
+# display.error(str(e))
+# sys.exit(3)
+# except AnsibleHostFailed as e:
+# display.error(str(e))
+# sys.exit(2)
+ except AnsibleError as e:
+ display.error(str(e), wrap_text=False)
sys.exit(1)
-
+ except KeyboardInterrupt:
+ display.error("User interrupted execution")
+ sys.exit(99)
+ except Exception as e:
+ have_cli_options = cli is not None and cli.options is not None
+ display.error("Unexpected Exception: %s" % str(e), wrap_text=False)
+ if not have_cli_options or have_cli_options and cli.options.verbosity > 2:
+ display.display("the full traceback was:\n\n%s" % traceback.format_exc())
+ else:
+ display.display("to see the full traceback, use -vvv")
+ sys.exit(250)
diff --git a/bin/ansible-doc b/bin/ansible-doc
deleted file mode 100755
index dff7cecce79..00000000000
--- a/bin/ansible-doc
+++ /dev/null
@@ -1,337 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Jan-Piet Mens
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-
-import os
-import sys
-import textwrap
-import re
-import optparse
-import datetime
-import subprocess
-import fcntl
-import termios
-import struct
-
-from ansible import utils
-from ansible.utils import module_docs
-import ansible.constants as C
-from ansible.utils import version
-import traceback
-
-MODULEDIR = C.DEFAULT_MODULE_PATH
-
-BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
-IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
-
-_ITALIC = re.compile(r"I\(([^)]+)\)")
-_BOLD = re.compile(r"B\(([^)]+)\)")
-_MODULE = re.compile(r"M\(([^)]+)\)")
-_URL = re.compile(r"U\(([^)]+)\)")
-_CONST = re.compile(r"C\(([^)]+)\)")
-PAGER = 'less'
-LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
- # -S (chop long lines) -X (disable termcap init and de-init)
-
-def pager_print(text):
- ''' just print text '''
- print text
-
-def pager_pipe(text, cmd):
- ''' pipe text through a pager '''
- if 'LESS' not in os.environ:
- os.environ['LESS'] = LESS_OPTS
- try:
- cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
- cmd.communicate(input=text)
- except IOError:
- pass
- except KeyboardInterrupt:
- pass
-
-def pager(text):
- ''' find reasonable way to display text '''
- # this is a much simpler form of what is in pydoc.py
- if not sys.stdout.isatty():
- pager_print(text)
- elif 'PAGER' in os.environ:
- if sys.platform == 'win32':
- pager_print(text)
- else:
- pager_pipe(text, os.environ['PAGER'])
- elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
- pager_pipe(text, 'less')
- else:
- pager_print(text)
-
-def tty_ify(text):
-
- t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
- t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
- t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
- t = _URL.sub(r"\1", t) # U(word) => word
- t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
-
- return t
-
-def get_man_text(doc):
-
- opt_indent=" "
- text = []
- text.append("> %s\n" % doc['module'].upper())
-
- desc = " ".join(doc['description'])
-
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
-
- if 'option_keys' in doc and len(doc['option_keys']) > 0:
- text.append("Options (= is mandatory):\n")
-
- for o in sorted(doc['option_keys']):
- opt = doc['options'][o]
-
- if opt.get('required', False):
- opt_leadin = "="
- else:
- opt_leadin = "-"
-
- text.append("%s %s" % (opt_leadin, o))
-
- desc = " ".join(opt['description'])
-
- if 'choices' in opt:
- choices = ", ".join(str(i) for i in opt['choices'])
- desc = desc + " (Choices: " + choices + ")"
- if 'default' in opt:
- default = str(opt['default'])
- desc = desc + " [Default: " + default + "]"
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent,
- subsequent_indent=opt_indent))
-
- if 'notes' in doc and len(doc['notes']) > 0:
- notes = " ".join(doc['notes'])
- text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ",
- subsequent_indent=opt_indent))
-
-
- if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
- req = ", ".join(doc['requirements'])
- text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ",
- subsequent_indent=opt_indent))
-
- if 'examples' in doc and len(doc['examples']) > 0:
- text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
- for ex in doc['examples']:
- text.append("%s\n" % (ex['code']))
-
- if 'plainexamples' in doc and doc['plainexamples'] is not None:
- text.append("EXAMPLES:")
- text.append(doc['plainexamples'])
- if 'returndocs' in doc and doc['returndocs'] is not None:
- text.append("RETURN VALUES:")
- text.append(doc['returndocs'])
- text.append('')
-
- return "\n".join(text)
-
-
-def get_snippet_text(doc):
-
- text = []
- desc = tty_ify(" ".join(doc['short_description']))
- text.append("- name: %s" % (desc))
- text.append(" action: %s" % (doc['module']))
-
- for o in sorted(doc['options'].keys()):
- opt = doc['options'][o]
- desc = tty_ify(" ".join(opt['description']))
-
- if opt.get('required', False):
- s = o + "="
- else:
- s = o
-
- text.append(" %-20s # %s" % (s, desc))
- text.append('')
-
- return "\n".join(text)
-
-def get_module_list_text(module_list):
- tty_size = 0
- if os.isatty(0):
- tty_size = struct.unpack('HHHH',
- fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
- columns = max(60, tty_size)
- displace = max(len(x) for x in module_list)
- linelimit = columns - displace - 5
- text = []
- deprecated = []
- for module in sorted(set(module_list)):
-
- if module in module_docs.BLACKLIST_MODULES:
- continue
-
- filename = utils.plugins.module_finder.find_plugin(module)
-
- if filename is None:
- continue
- if filename.endswith(".ps1"):
- continue
- if os.path.isdir(filename):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- desc = tty_ify(doc.get('short_description', '?')).strip()
- if len(desc) > linelimit:
- desc = desc[:linelimit] + '...'
-
- if module.startswith('_'): # Handle deprecated
- deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
- else:
- text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
-
- if len(deprecated) > 0:
- text.append("\nDEPRECATED:")
- text.extend(deprecated)
- return "\n".join(text)
-
-def find_modules(path, module_list):
-
- if os.path.isdir(path):
- for module in os.listdir(path):
- if module.startswith('.'):
- continue
- elif os.path.isdir(module):
- find_modules(module, module_list)
- elif any(module.endswith(x) for x in BLACKLIST_EXTS):
- continue
- elif module.startswith('__'):
- continue
- elif module in IGNORE_FILES:
- continue
- elif module.startswith('_'):
- fullpath = '/'.join([path,module])
- if os.path.islink(fullpath): # avoids aliases
- continue
-
- module = os.path.splitext(module)[0] # removes the extension
- module_list.append(module)
-
-def main():
-
- p = optparse.OptionParser(
- version=version("%prog"),
- usage='usage: %prog [options] [module...]',
- description='Show Ansible module documentation',
- )
-
- p.add_option("-M", "--module-path",
- action="store",
- dest="module_path",
- default=MODULEDIR,
- help="Ansible modules/ directory")
- p.add_option("-l", "--list",
- action="store_true",
- default=False,
- dest='list_dir',
- help='List available modules')
- p.add_option("-s", "--snippet",
- action="store_true",
- default=False,
- dest='show_snippet',
- help='Show playbook snippet for specified module(s)')
- p.add_option('-v', action='version', help='Show version number and exit')
-
- (options, args) = p.parse_args()
-
- if options.module_path is not None:
- for i in options.module_path.split(os.pathsep):
- utils.plugins.module_finder.add_directory(i)
-
- if options.list_dir:
- # list modules
- paths = utils.plugins.module_finder._get_paths()
- module_list = []
- for path in paths:
- find_modules(path, module_list)
-
- pager(get_module_list_text(module_list))
- sys.exit()
-
- if len(args) == 0:
- p.print_help()
-
- def print_paths(finder):
- ''' Returns a string suitable for printing of the search path '''
-
- # Uses a list to get the order right
- ret = []
- for i in finder._get_paths():
- if i not in ret:
- ret.append(i)
- return os.pathsep.join(ret)
-
- text = ''
- for module in args:
-
- filename = utils.plugins.module_finder.find_plugin(module)
- if filename is None:
- sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder)))
- continue
-
- if any(filename.endswith(x) for x in BLACKLIST_EXTS):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
- continue
-
- if doc is not None:
-
- all_keys = []
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
- all_keys = sorted(all_keys)
- doc['option_keys'] = all_keys
-
- doc['filename'] = filename
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['plainexamples'] = plainexamples
- doc['returndocs'] = returndocs
-
- if options.show_snippet:
- text += get_snippet_text(doc)
- else:
- text += get_man_text(doc)
- else:
- # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
- # probably a quoting issue.
- sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module)
- pager(text)
-
-if __name__ == '__main__':
- main()
diff --git a/bin/ansible-doc b/bin/ansible-doc
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-doc
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
deleted file mode 100755
index a6d625671ec..00000000000
--- a/bin/ansible-galaxy
+++ /dev/null
@@ -1,957 +0,0 @@
-#!/usr/bin/env python
-
-########################################################################
-#
-# (C) 2013, James Cammarata
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-########################################################################
-
-import datetime
-import json
-import os
-import os.path
-import shutil
-import subprocess
-import sys
-import tarfile
-import tempfile
-import urllib
-import urllib2
-import yaml
-
-from collections import defaultdict
-from distutils.version import LooseVersion
-from jinja2 import Environment
-from optparse import OptionParser
-
-import ansible.constants as C
-import ansible.utils
-from ansible.errors import AnsibleError
-
-default_meta_template = """---
-galaxy_info:
- author: {{ author }}
- description: {{description}}
- company: {{ company }}
- # If the issue tracker for your role is not on github, uncomment the
- # next line and provide a value
- # issue_tracker_url: {{ issue_tracker_url }}
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: {{ license }}
- min_ansible_version: {{ min_ansible_version }}
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- {%- for platform,versions in platforms.iteritems() %}
- #- name: {{ platform }}
- # versions:
- # - all
- {%- for version in versions %}
- # - {{ version }}
- {%- endfor %}
- {%- endfor %}
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- {%- for category in categories %}
- #- {{ category.name }}
- {%- endfor %}
-dependencies: []
- # List your role dependencies here, one per line.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
- {% for dependency in dependencies %}
- #- {{ dependency }}
- {% endfor %}
-
-"""
-
-default_readme_template = """Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
-"""
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("init", "info", "install", "list", "remove")
-SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- # - none yet
-
- # options specific to actions
- if action == "info":
- parser.set_usage("usage: %prog info [options] role_name[,version]")
- elif action == "init":
- parser.set_usage("usage: %prog init [options] role_name")
- parser.add_option(
- '-p', '--init-path', dest='init_path', default="./",
- help='The path in which the skeleton role will be created. '
- 'The default is the current working directory.')
- parser.add_option(
- '--offline', dest='offline', default=False, action='store_true',
- help="Don't query the galaxy API when creating roles")
- elif action == "install":
- parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
- parser.add_option(
- '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
- help='Ignore errors and continue with the next specified role.')
- parser.add_option(
- '-n', '--no-deps', dest='no_deps', action='store_true', default=False,
- help='Don\'t download roles listed as dependencies')
- parser.add_option(
- '-r', '--role-file', dest='role_file',
- help='A file containing a list of roles to be imported')
- elif action == "remove":
- parser.set_usage("usage: %prog remove role1 role2 ...")
- elif action == "list":
- parser.set_usage("usage: %prog list [role_name]")
-
- # options that apply to more than one action
- if action != "init":
- parser.add_option(
- '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
- help='The path to the directory containing your roles. '
- 'The default is the roles_path configured in your '
- 'ansible.cfg file (/etc/ansible/roles if not configured)')
-
- if action in ("info","init","install"):
- parser.add_option(
- '-s', '--server', dest='api_server', default="galaxy.ansible.com",
- help='The API server destination')
-
- if action in ("init","install"):
- parser.add_option(
- '-f', '--force', dest='force', action='store_true', default=False,
- help='Force overwriting an existing role')
- # done, return the parser
- return parser
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-def exit_without_ignore(options, rc=1):
- """
- Exits with the specified return code unless the
- option --ignore-errors was specified
- """
-
- if not get_opt(options, "ignore_errors", False):
- print '- you can use --ignore-errors to skip failed roles.'
- sys.exit(rc)
-
-
-#-------------------------------------------------------------------------------------
-# Galaxy API functions
-#-------------------------------------------------------------------------------------
-
-def api_get_config(api_server):
- """
- Fetches the Galaxy API current version to ensure
- the API server is up and reachable.
- """
-
- try:
- url = 'https://%s/api/' % api_server
- data = json.load(urllib2.urlopen(url))
- if not data.get("current_version",None):
- return None
- else:
- return data
- except:
- return None
-
-def api_lookup_role_by_name(api_server, role_name, notify=True):
- """
- Uses the Galaxy API to do a lookup on the role owner/name.
- """
-
- role_name = urllib.quote(role_name)
-
- try:
- parts = role_name.split(".")
- user_name = ".".join(parts[0:-1])
- role_name = parts[-1]
- if notify:
- print "- downloading role '%s', owned by %s" % (role_name, user_name)
- except:
- parser.print_help()
- print "- invalid role name (%s). Specify role as format: username.rolename" % role_name
- sys.exit(1)
-
- url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name)
- try:
- data = json.load(urllib2.urlopen(url))
- if len(data["results"]) == 0:
- return None
- else:
- return data["results"][0]
- except:
- return None
-
-def api_fetch_role_related(api_server, related, role_id):
- """
- Uses the Galaxy API to fetch the list of related items for
- the given role. The url comes from the 'related' field of
- the role.
- """
-
- try:
- url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related)
- data = json.load(urllib2.urlopen(url))
- results = data['results']
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- return None
-
-def api_get_list(api_server, what):
- """
- Uses the Galaxy API to fetch the list of items specified.
- """
-
- try:
- url = 'https://%s/api/v1/%s/?page_size' % (api_server, what)
- data = json.load(urllib2.urlopen(url))
- if "results" in data:
- results = data['results']
- else:
- results = data
- done = True
- if "next" in data:
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- print "- failed to download the %s list" % what
- return None
-
-#-------------------------------------------------------------------------------------
-# scm repo utility functions
-#-------------------------------------------------------------------------------------
-
-def scm_archive_role(scm, role_url, role_version, role_name):
- if scm not in ['hg', 'git']:
- print "- scm %s is not currently supported" % scm
- return False
- tempdir = tempfile.mkdtemp()
- clone_cmd = [scm, 'clone', role_url, role_name]
- with open('/dev/null', 'w') as devnull:
- try:
- print "- executing: %s" % " ".join(clone_cmd)
- popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
- except:
- raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(clone_cmd)
- print " in directory %s" % tempdir
- return False
-
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
- if scm == 'hg':
- archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
- if role_version:
- archive_cmd.extend(['-r', role_version])
- archive_cmd.append(temp_file.name)
- if scm == 'git':
- archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
- if role_version:
- archive_cmd.append(role_version)
- else:
- archive_cmd.append('HEAD')
-
- with open('/dev/null', 'w') as devnull:
- print "- executing: %s" % " ".join(archive_cmd)
- popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
- stderr=devnull, stdout=devnull)
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(archive_cmd)
- print " in directory %s" % tempdir
- return False
-
- shutil.rmtree(tempdir, ignore_errors=True)
-
- return temp_file.name
-
-
-#-------------------------------------------------------------------------------------
-# Role utility functions
-#-------------------------------------------------------------------------------------
-
-def get_role_path(role_name, options):
- """
- Returns the role path based on the roles_path option
- and the role name.
- """
- roles_path = get_opt(options,'roles_path')
- roles_path = os.path.join(roles_path, role_name)
- roles_path = os.path.expanduser(roles_path)
- return roles_path
-
-def get_role_metadata(role_name, options):
- """
- Returns the metadata as YAML, if the file 'meta/main.yml'
- exists in the specified role_path
- """
- role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
- try:
- if os.path.isfile(role_path):
- f = open(role_path, 'r')
- meta_data = yaml.safe_load(f)
- f.close()
- return meta_data
- else:
- return None
- except:
- return None
-
-def get_galaxy_install_info(role_name, options):
- """
- Returns the YAML data contained in 'meta/.galaxy_install_info',
- if it exists.
- """
-
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- if os.path.isfile(info_path):
- f = open(info_path, 'r')
- info_data = yaml.safe_load(f)
- f.close()
- return info_data
- else:
- return None
- except:
- return None
-
-def write_galaxy_install_info(role_name, role_version, options):
- """
- Writes a YAML-formatted file to the role's meta/ directory
- (named .galaxy_install_info) which contains some information
- we can use later for commands like 'list' and 'info'.
- """
-
- info = dict(
- version = role_version,
- install_date = datetime.datetime.utcnow().strftime("%c"),
- )
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- f = open(info_path, 'w+')
- info_data = yaml.safe_dump(info, f)
- f.close()
- except:
- return False
- return True
-
-
-def remove_role(role_name, options):
- """
- Removes the specified role from the roles path. There is a
- sanity check to make sure there's a meta/main.yml file at this
- path so the user doesn't blow away random directories
- """
- if get_role_metadata(role_name, options):
- role_path = get_role_path(role_name, options)
- shutil.rmtree(role_path)
- return True
- else:
- return False
-
-def fetch_role(role_name, target, role_data, options):
- """
- Downloads the archived role from github to a temp location, extracts
- it, and then copies the extracted role to the role library path.
- """
-
- # first grab the file and save it to a temp location
- if '://' in role_name:
- archive_url = role_name
- else:
- archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
- print "- downloading role from %s" % archive_url
-
- try:
- url_file = urllib2.urlopen(archive_url)
- temp_file = tempfile.NamedTemporaryFile(delete=False)
- data = url_file.read()
- while data:
- temp_file.write(data)
- data = url_file.read()
- temp_file.close()
- return temp_file.name
- except Exception, e:
- # TODO: better urllib2 error handling for error
- # messages that are more exact
- print "- error: failed to download the file."
- return False
-
-def install_role(role_name, role_version, role_filename, options):
- # the file is a tar, so open it that way and extract it
- # to the specified (or default) roles directory
-
- if not tarfile.is_tarfile(role_filename):
- print "- error: the file downloaded was not a tar.gz"
- return False
- else:
- if role_filename.endswith('.gz'):
- role_tar_file = tarfile.open(role_filename, "r:gz")
- else:
- role_tar_file = tarfile.open(role_filename, "r")
- # verify the role's meta file
- meta_file = None
- members = role_tar_file.getmembers()
- # next find the metadata file
- for member in members:
- if "/meta/main.yml" in member.name:
- meta_file = member
- break
- if not meta_file:
- print "- error: this role does not appear to have a meta/main.yml file."
- return False
- else:
- try:
- meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file))
- except:
- print "- error: this role does not appear to have a valid meta/main.yml file."
- return False
-
- # we strip off the top-level directory for all of the files contained within
- # the tar file here, since the default is 'github_repo-target', and change it
- # to the specified role's name
- role_path = os.path.join(get_opt(options, 'roles_path'), role_name)
- role_path = os.path.expanduser(role_path)
- print "- extracting %s to %s" % (role_name, role_path)
- try:
- if os.path.exists(role_path):
- if not os.path.isdir(role_path):
- print "- error: the specified roles path exists and is not a directory."
- return False
- elif not get_opt(options, "force", False):
- print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name
- return False
- else:
- # using --force, remove the old path
- if not remove_role(role_name, options):
- print "- error: %s doesn't appear to contain a role." % role_path
- print " please remove this directory manually if you really want to put the role here."
- return False
- else:
- os.makedirs(role_path)
-
- # now we do the actual extraction to the role_path
- for member in members:
- # we only extract files, and remove any relative path
- # bits that might be in the file for security purposes
- # and drop the leading directory, as mentioned above
- if member.isreg() or member.issym():
- parts = member.name.split("/")[1:]
- final_parts = []
- for part in parts:
- if part != '..' and '~' not in part and '$' not in part:
- final_parts.append(part)
- member.name = os.path.join(*final_parts)
- role_tar_file.extract(member, role_path)
-
- # write out the install info file for later use
- write_galaxy_install_info(role_name, role_version, options)
- except OSError, e:
- print "- error: you do not have permission to modify files in %s" % role_path
- return False
-
- # return the parsed yaml metadata
- print "- %s was installed successfully" % role_name
- return meta_file_data
-
-#-------------------------------------------------------------------------------------
-# Action functions
-#-------------------------------------------------------------------------------------
-
-def execute_init(args, options, parser):
- """
- Executes the init action, which creates the skeleton framework
- of a role that complies with the galaxy metadata format.
- """
-
- init_path = get_opt(options, 'init_path', './')
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- force = get_opt(options, 'force', False)
- offline = get_opt(options, 'offline', False)
-
- if not offline:
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- try:
- role_name = args.pop(0).strip()
- if role_name == "":
- raise Exception("")
- role_path = os.path.join(init_path, role_name)
- if os.path.exists(role_path):
- if os.path.isfile(role_path):
- print "- the path %s already exists, but is a file - aborting" % role_path
- sys.exit(1)
- elif not force:
- print "- the directory %s already exists." % role_path
- print " you can use --force to re-initialize this directory,\n" + \
- " however it will reset any main.yml files that may have\n" + \
- " been modified there already."
- sys.exit(1)
- except Exception, e:
- parser.print_help()
- print "- no role name specified for init"
- sys.exit(1)
-
- ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
-
- # create the default README.md
- if not os.path.exists(role_path):
- os.makedirs(role_path)
- readme_path = os.path.join(role_path, "README.md")
- f = open(readme_path, "wb")
- f.write(default_readme_template)
- f.close
-
- for dir in ROLE_DIRS:
- dir_path = os.path.join(init_path, role_name, dir)
- main_yml_path = os.path.join(dir_path, 'main.yml')
- # create the directory if it doesn't exist already
- if not os.path.exists(dir_path):
- os.makedirs(dir_path)
-
- # now create the main.yml file for that directory
- if dir == "meta":
- # create a skeleton meta/main.yml with a valid galaxy_info
- # datastructure in place, plus with all of the available
- # tags/platforms included (but commented out) and the
- # dependencies section
- platforms = []
- if not offline:
- platforms = api_get_list(api_server, "platforms") or []
- categories = []
- if not offline:
- categories = api_get_list(api_server, "categories") or []
-
- # group the list of platforms from the api based
- # on their names, with the release field being
- # appended to a list of versions
- platform_groups = defaultdict(list)
- for platform in platforms:
- platform_groups[platform['name']].append(platform['release'])
- platform_groups[platform['name']].sort()
-
- inject = dict(
- author = 'your name',
- company = 'your company (optional)',
- license = 'license (GPLv2, CC-BY, etc)',
- issue_tracker_url = 'http://example.com/issue/tracker',
- min_ansible_version = '1.2',
- platforms = platform_groups,
- categories = categories,
- )
- rendered_meta = Environment().from_string(default_meta_template).render(inject)
- f = open(main_yml_path, 'w')
- f.write(rendered_meta)
- f.close()
- pass
- elif dir not in ('files','templates'):
- # just write a (mostly) empty YAML file for main.yml
- f = open(main_yml_path, 'w')
- f.write('---\n# %s file for %s\n' % (dir,role_name))
- f.close()
- print "- %s was created successfully" % role_name
-
-def execute_info(args, options, parser):
- """
- Executes the info action. This action prints out detailed
- information about an installed role as well as info available
- from the galaxy API.
- """
-
- if len(args) == 0:
- # the user needs to specify a role
- parser.print_help()
- print "- you must specify a user/role name"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- api_config = api_get_config(api_server)
- roles_path = get_opt(options, "roles_path")
-
- for role in args:
-
- role_info = {}
-
- install_info = get_galaxy_install_info(role, options)
- if install_info:
- if 'version' in install_info:
- install_info['intalled_version'] = install_info['version']
- del install_info['version']
- role_info.update(install_info)
-
- remote_data = api_lookup_role_by_name(api_server, role, False)
- if remote_data:
- role_info.update(remote_data)
-
- metadata = get_role_metadata(role, options)
- if metadata:
- role_info.update(metadata)
-
- role_spec = ansible.utils.role_spec_parse(role)
- if role_spec:
- role_info.update(role_spec)
-
- if role_info:
- print "- %s:" % (role)
- for k in sorted(role_info.keys()):
-
- if k in SKIP_INFO_KEYS:
- continue
-
- if isinstance(role_info[k], dict):
- print "\t%s: " % (k)
- for key in sorted(role_info[k].keys()):
- if key in SKIP_INFO_KEYS:
- continue
- print "\t\t%s: %s" % (key, role_info[k][key])
- else:
- print "\t%s: %s" % (k, role_info[k])
- else:
- print "- the role %s was not found" % role
-
-def execute_install(args, options, parser):
- """
- Executes the installation action. The args list contains the
- roles to be installed, unless -f was specified. The list of roles
- can be a name (which will be downloaded via the galaxy API and github),
- or it can be a local .tar.gz file.
- """
-
- role_file = get_opt(options, "role_file", None)
-
- if len(args) == 0 and role_file is None:
- # the user needs to specify one of either --role-file
- # or specify a single user/role name
- parser.print_help()
- print "- you must specify a user/role name or a roles file"
- sys.exit()
- elif len(args) == 1 and not role_file is None:
- # using a role file is mutually exclusive of specifying
- # the role name on the command line
- parser.print_help()
- print "- please specify a user/role name, or a roles file, but not both"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- no_deps = get_opt(options, "no_deps", False)
- roles_path = get_opt(options, "roles_path")
-
- roles_done = []
- if role_file:
- f = open(role_file, 'r')
- if role_file.endswith('.yaml') or role_file.endswith('.yml'):
- roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
- else:
- # roles listed in a file, one per line
- roles_left = map(ansible.utils.role_spec_parse, f.readlines())
- f.close()
- else:
- # roles were specified directly, so we'll just go out grab them
- # (and their dependencies, unless the user doesn't want us to).
- roles_left = map(ansible.utils.role_spec_parse, args)
-
- while len(roles_left) > 0:
- # query the galaxy API for the role data
- role_data = None
- role = roles_left.pop(0)
- role_src = role.get("src")
- role_scm = role.get("scm")
- role_path = role.get("path")
-
- if role_path:
- options.roles_path = role_path
- else:
- options.roles_path = roles_path
-
- if os.path.isfile(role_src):
- # installing a local tar.gz
- tmp_file = role_src
- else:
- if role_scm:
- # create tar file from scm url
- tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name"))
- elif '://' in role_src:
- # just download a URL - version will probably be in the URL
- tmp_file = fetch_role(role_src, None, None, options)
- else:
- # installing from galaxy
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- role_data = api_lookup_role_by_name(api_server, role_src)
- if not role_data:
- print "- sorry, %s was not found on %s." % (role_src, api_server)
- exit_without_ignore(options)
- continue
-
- role_versions = api_fetch_role_related(api_server, 'versions', role_data['id'])
- if "version" not in role or role['version'] == '':
- # convert the version names to LooseVersion objects
- # and sort them to get the latest version. If there
- # are no versions in the list, we'll grab the head
- # of the master branch
- if len(role_versions) > 0:
- loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
- loose_versions.sort()
- role["version"] = str(loose_versions[-1])
- else:
- role["version"] = 'master'
- elif role['version'] != 'master':
- if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]:
- print 'role is %s' % role
- print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions)
- exit_without_ignore(options)
- continue
-
- # download the role. if --no-deps was specified, we stop here,
- # otherwise we recursively grab roles and all of their deps.
- tmp_file = fetch_role(role_src, role["version"], role_data, options)
- installed = False
- if tmp_file:
- installed = install_role(role.get("name"), role.get("version"), tmp_file, options)
- # we're done with the temp file, clean it up
- if tmp_file != role_src:
- os.unlink(tmp_file)
- # install dependencies, if we want them
- if not no_deps and installed:
- if not role_data:
- role_data = get_role_metadata(role.get("name"), options)
- role_dependencies = role_data['dependencies']
- else:
- role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
- for dep in role_dependencies:
- if isinstance(dep, basestring):
- dep = ansible.utils.role_spec_parse(dep)
- else:
- dep = ansible.utils.role_yaml_parse(dep)
- if not get_role_metadata(dep["name"], options):
- if dep not in roles_left:
- print '- adding dependency: %s' % dep["name"]
- roles_left.append(dep)
- else:
- print '- dependency %s already pending installation.' % dep["name"]
- else:
- print '- dependency %s is already installed, skipping.' % dep["name"]
- if not tmp_file or not installed:
- print "- %s was NOT installed successfully." % role.get("name")
- exit_without_ignore(options)
- sys.exit(0)
-
-def execute_remove(args, options, parser):
- """
- Executes the remove action. The args list contains the list
- of roles to be removed. This list can contain more than one role.
- """
-
- if len(args) == 0:
- parser.print_help()
- print '- you must specify at least one role to remove.'
- sys.exit()
-
- for role in args:
- if get_role_metadata(role, options):
- if remove_role(role, options):
- print '- successfully removed %s' % role
- else:
- print "- failed to remove role: %s" % role
- else:
- print '- %s is not installed, skipping.' % role
- sys.exit(0)
-
-def execute_list(args, options, parser):
- """
- Executes the list action. The args list can contain zero
- or one role. If one is specified, only that role will be
- shown, otherwise all roles in the specified directory will
- be shown.
- """
-
- if len(args) > 1:
- print "- please specify only one role to list, or specify no roles to see a full list"
- sys.exit(1)
-
- if len(args) == 1:
- # show only the request role, if it exists
- role_name = args[0]
- metadata = get_role_metadata(role_name, options)
- if metadata:
- install_info = get_galaxy_install_info(role_name, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- # show some more info about single roles here
- print "- %s, %s" % (role_name, version)
- else:
- print "- the role %s was not found" % role_name
- else:
- # show all valid roles in the roles_path directory
- roles_path = get_opt(options, 'roles_path')
- roles_path = os.path.expanduser(roles_path)
- if not os.path.exists(roles_path):
- parser.print_help()
- print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path
- sys.exit(1)
- elif not os.path.isdir(roles_path):
- print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path
- parser.print_help()
- sys.exit(1)
- path_files = os.listdir(roles_path)
- for path_file in path_files:
- if get_role_metadata(path_file, options):
- install_info = get_galaxy_install_info(path_file, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- print "- %s, %s" % (path_file, version)
- sys.exit(0)
-
-#-------------------------------------------------------------------------------------
-# The main entry point
-#-------------------------------------------------------------------------------------
-
-def main():
- # parse the CLI options
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- # execute the desired action
- if 1: #try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- #except KeyError, e:
- # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS))
- # sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-galaxy
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
deleted file mode 100755
index 3d6e1f9f402..00000000000
--- a/bin/ansible-playbook
+++ /dev/null
@@ -1,330 +0,0 @@
-#!/usr/bin/env python
-# (C) 2012, Michael DeHaan,
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-#######################################################
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import sys
-import os
-import stat
-
-# Augment PYTHONPATH to find Python modules relative to this file path
-# This is so that we can find the modules when running from a local checkout
-# installed as editable with `pip install -e ...` or `python setup.py develop`
-local_module_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), '..', 'lib')
-)
-sys.path.append(local_module_path)
-
-import ansible.playbook
-import ansible.constants as C
-import ansible.utils.template
-from ansible import errors
-from ansible import callbacks
-from ansible import utils
-from ansible.color import ANSIBLE_COLOR, stringc
-from ansible.callbacks import display
-
-def colorize(lead, num, color):
- """ Print 'lead' = 'num' in 'color' """
- if num != 0 and ANSIBLE_COLOR and color is not None:
- return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
- else:
- return "%s=%-4s" % (lead, str(num))
-
-def hostcolor(host, stats, color=True):
- if ANSIBLE_COLOR and color:
- if stats['failures'] != 0 or stats['unreachable'] != 0:
- return "%-37s" % stringc(host, 'red')
- elif stats['changed'] != 0:
- return "%-37s" % stringc(host, 'yellow')
- else:
- return "%-37s" % stringc(host, 'green')
- return "%-26s" % host
-
-
-def main(args):
- ''' run ansible-playbook operations '''
-
- # create parser for CLI options
- parser = utils.base_parser(
- constants=C,
- usage = "%prog playbook.yml",
- connect_opts=True,
- runas_opts=True,
- subset_opts=True,
- check_opts=True,
- diff_opts=True
- )
- #parser.add_option('--vault-password', dest="vault_password",
- # help="password for vault encrypted files")
- parser.add_option('-t', '--tags', dest='tags', default='all',
- help="only run plays and tasks tagged with these values")
- parser.add_option('--skip-tags', dest='skip_tags',
- help="only run plays and tasks whose tags do not match these values")
- parser.add_option('--syntax-check', dest='syntax', action='store_true',
- help="perform a syntax check on the playbook, but do not execute it")
- parser.add_option('--list-tasks', dest='listtasks', action='store_true',
- help="list all tasks that would be executed")
- parser.add_option('--list-tags', dest='listtags', action='store_true',
- help="list all available tags")
- parser.add_option('--step', dest='step', action='store_true',
- help="one-step-at-a-time: confirm each task before running")
- parser.add_option('--start-at-task', dest='start_at',
- help="start the playbook at the task matching this name")
- parser.add_option('--force-handlers', dest='force_handlers',
- default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
- help="run handlers even if a task fails")
- parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
- help="clear the fact cache")
-
- options, args = parser.parse_args(args)
-
- if len(args) == 0:
- parser.print_help(file=sys.stderr)
- return 1
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- sshpass = None
- becomepass = None
- vault_pass = None
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
-
- if options.listhosts or options.syntax or options.listtasks or options.listtags:
- (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
-
- # set pe options
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
- become_ask_pass=options.become_ask_pass,
- ask_vault_pass=options.ask_vault_pass,
- become_method=prompt_method)
-
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
-
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
-
- only_tags = options.tags.split(",")
- skip_tags = options.skip_tags
- if options.skip_tags is not None:
- skip_tags = options.skip_tags.split(",")
-
- for playbook in args:
- if not os.path.exists(playbook):
- raise errors.AnsibleError("the playbook: %s could not be found" % playbook)
- if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
- raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook)
-
- inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass)
-
- # Note: slightly wrong, this is written so that implicit localhost
- # (which is not returned in list_hosts()) is taken into account for
- # warning if inventory is empty. But it can't be taken into account for
- # checking if limit doesn't match any hosts. Instead we don't worry about
- # limit if only implicit localhost was in inventory to start with.
- #
- # Fix this in v2
- no_hosts = False
- if len(inventory.list_hosts()) == 0:
- # Empty inventory
- utils.warning("provided hosts list is empty, only localhost is available")
- no_hosts = True
- inventory.subset(options.subset)
- if len(inventory.list_hosts()) == 0 and no_hosts is False:
- # Invalid limit
- raise errors.AnsibleError("Specified --limit does not match any hosts")
-
- # run all playbooks specified on the command line
- for playbook in args:
-
- stats = callbacks.AggregateStats()
- playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
- if options.step:
- playbook_cb.step = options.step
- if options.start_at:
- playbook_cb.start_at = options.start_at
- runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
-
- pb = ansible.playbook.PlayBook(
- playbook=playbook,
- module_path=options.module_path,
- inventory=inventory,
- forks=options.forks,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- callbacks=playbook_cb,
- runner_callbacks=runner_cb,
- stats=stats,
- timeout=options.timeout,
- transport=options.connection,
- become=options.become,
- become_method=options.become_method,
- become_user=options.become_user,
- become_pass=becomepass,
- extra_vars=extra_vars,
- private_key_file=options.private_key_file,
- only_tags=only_tags,
- skip_tags=skip_tags,
- check=options.check,
- diff=options.diff,
- vault_password=vault_pass,
- force_handlers=options.force_handlers,
- )
-
- if options.flush_cache:
- display(callbacks.banner("FLUSHING FACT CACHE"))
- pb.SETUP_CACHE.flush()
-
- if options.listhosts or options.listtasks or options.syntax or options.listtags:
- print ''
- print 'playbook: %s' % playbook
- print ''
- playnum = 0
- for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
- playnum += 1
- play = ansible.playbook.Play(pb, play_ds, play_basedir,
- vault_password=pb.vault_password)
- label = play.name
- hosts = pb.inventory.list_hosts(play.hosts)
-
- if options.listhosts:
- print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts))
- for host in hosts:
- print ' %s' % host
-
- if options.listtags or options.listtasks:
- print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags))))
-
- if options.listtags:
- tags = []
- for task in pb.tasks_to_run_in_play(play):
- tags.extend(task.tags)
- print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged']))))
-
- if options.listtasks:
-
- for task in pb.tasks_to_run_in_play(play):
- if getattr(task, 'name', None) is not None:
- # meta tasks have no names
- print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged']))))
-
- if options.listhosts or options.listtasks or options.listtags:
- print ''
- continue
-
- if options.syntax:
- # if we've not exited by now then we are fine.
- print 'Playbook Syntax is fine'
- return 0
-
- failed_hosts = []
- unreachable_hosts = []
-
- try:
-
- pb.run()
-
- hosts = sorted(pb.stats.processed.keys())
- display(callbacks.banner("PLAY RECAP"))
- playbook_cb.on_stats(pb.stats)
-
- for h in hosts:
- t = pb.stats.summarize(h)
- if t['failures'] > 0:
- failed_hosts.append(h)
- if t['unreachable'] > 0:
- unreachable_hosts.append(h)
-
- retries = failed_hosts + unreachable_hosts
-
- if C.RETRY_FILES_ENABLED and len(retries) > 0:
- filename = pb.generate_retry_inventory(retries)
- if filename:
- display(" to retry, use: --limit @%s\n" % filename)
-
- for h in hosts:
- t = pb.stats.summarize(h)
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t),
- colorize('ok', t['ok'], 'green'),
- colorize('changed', t['changed'], 'yellow'),
- colorize('unreachable', t['unreachable'], 'red'),
- colorize('failed', t['failures'], 'red')),
- screen_only=True
- )
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize('ok', t['ok'], None),
- colorize('changed', t['changed'], None),
- colorize('unreachable', t['unreachable'], None),
- colorize('failed', t['failures'], None)),
- log_only=True
- )
-
-
- print ""
- if len(failed_hosts) > 0:
- return 2
- if len(unreachable_hosts) > 0:
- return 3
-
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red')
- return 1
-
- return 0
-
-
-if __name__ == "__main__":
- display(" ", log_only=True)
- display(" ".join(sys.argv), log_only=True)
- display(" ", log_only=True)
- try:
- sys.exit(main(sys.argv[1:]))
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red', stderr=True)
- sys.exit(1)
- except KeyboardInterrupt, ke:
- display("ERROR: interrupted", color='red', stderr=True)
- sys.exit(1)
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-playbook
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-pull b/bin/ansible-pull
deleted file mode 100755
index d4887631e0f..00000000000
--- a/bin/ansible-pull
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Stephen Fromm
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
-
-# usage:
-# ansible-pull -d /var/lib/ansible \
-# -U http://example.net/content.git [-C production] \
-# [path/playbook.yml]
-#
-# the -d and -U arguments are required; the -C argument is optional.
-#
-# ansible-pull accepts an optional argument to specify a playbook
-# location underneath the workdir and then searches the source repo
-# for playbooks in the following order, stopping at the first match:
-#
-# 1. $workdir/path/playbook.yml, if specified
-# 2. $workdir/$fqdn.yml
-# 3. $workdir/$hostname.yml
-# 4. $workdir/local.yml
-#
-# the source repo must contain at least one of these playbooks.
-
-import os
-import shutil
-import sys
-import datetime
-import socket
-import random
-import time
-from ansible import utils
-from ansible.utils import cmd_functions
-from ansible import errors
-from ansible import inventory
-
-DEFAULT_REPO_TYPE = 'git'
-DEFAULT_PLAYBOOK = 'local.yml'
-PLAYBOOK_ERRORS = {1: 'File does not exist',
- 2: 'File is not readable'}
-
-VERBOSITY=0
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def try_playbook(path):
- if not os.path.exists(path):
- return 1
- if not os.access(path, os.R_OK):
- return 2
- return 0
-
-
-def select_playbook(path, args):
- playbook = None
- if len(args) > 0 and args[0] is not None:
- playbook = "%s/%s" % (path, args[0])
- rc = try_playbook(playbook)
- if rc != 0:
- print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc])
- return None
- return playbook
- else:
- fqdn = socket.getfqdn()
- hostpb = "%s/%s.yml" % (path, fqdn)
- shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0])
- localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK)
- errors = []
- for pb in [hostpb, shorthostpb, localpb]:
- rc = try_playbook(pb)
- if rc == 0:
- playbook = pb
- break
- else:
- errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc]))
- if playbook is None:
- print >>sys.stderr, "\n".join(errors)
- return playbook
-
-
-def main(args):
- """ Set up and run a local playbook """
- usage = "%prog [options] [playbook.yml]"
- parser = utils.SortedOptParser(usage=usage)
- parser.add_option('--purge', default=False, action='store_true',
- help='purge checkout after playbook run')
- parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
- help='only run the playbook if the repository has been updated')
- parser.add_option('-s', '--sleep', dest='sleep', default=None,
- help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests')
- parser.add_option('-f', '--force', dest='force', default=False,
- action='store_true',
- help='run the playbook even if the repository could '
- 'not be updated')
- parser.add_option('-d', '--directory', dest='dest', default=None,
- help='directory to checkout repository to')
- #parser.add_option('-l', '--live', default=True, action='store_live',
- # help='Print the ansible-playbook output while running')
- parser.add_option('-U', '--url', dest='url', default=None,
- help='URL of the playbook repository')
- parser.add_option('-C', '--checkout', dest='checkout',
- help='branch/tag/commit to checkout. '
- 'Defaults to behavior of repository module.')
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="location of the inventory host file")
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-v', '--verbose', default=False, action="callback",
- callback=increment_debug,
- help='Pass -vvvv to ansible-playbook')
- parser.add_option('-m', '--module-name', dest='module_name',
- default=DEFAULT_REPO_TYPE,
- help='Module name used to check out repository. '
- 'Default is %s.' % DEFAULT_REPO_TYPE)
- parser.add_option('--vault-password-file', dest='vault_password_file',
- help="vault password file")
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password')
- parser.add_option('-t', '--tags', dest='tags', default=False,
- help='only run plays and tasks tagged with these values')
- parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
- help='adds the hostkey for the repo url if not already added')
- parser.add_option('--key-file', dest='key_file',
- help="Pass '-i ' to the SSH arguments used by git.")
- options, args = parser.parse_args(args)
-
- hostname = socket.getfqdn()
- if not options.dest:
- # use a hostname dependent directory, in case of $HOME on nfs
- options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname)
-
- options.dest = os.path.abspath(options.dest)
-
- if not options.url:
- parser.error("URL for repository not specified, use -h for help")
- return 1
-
- now = datetime.datetime.now()
- print now.strftime("Starting ansible-pull at %F %T")
-
- # Attempt to use the inventory passed in as an argument
- # It might not yet have been downloaded so use localhost if note
- if not options.inventory or not os.path.exists(options.inventory):
- inv_opts = 'localhost,'
- else:
- inv_opts = options.inventory
- limit_opts = 'localhost:%s:127.0.0.1' % hostname
- repo_opts = "name=%s dest=%s" % (options.url, options.dest)
-
- if VERBOSITY == 0:
- base_opts = '-c local --limit "%s"' % limit_opts
- elif VERBOSITY > 0:
- debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ])
- base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts)
-
- if options.checkout:
- repo_opts += ' version=%s' % options.checkout
-
- # Only git module is supported
- if options.module_name == DEFAULT_REPO_TYPE:
- if options.accept_host_key:
- repo_opts += ' accept_hostkey=yes'
-
- if options.key_file:
- repo_opts += ' key_file=%s' % options.key_file
-
- path = utils.plugins.module_finder.find_plugin(options.module_name)
- if path is None:
- sys.stderr.write("module '%s' not found.\n" % options.module_name)
- return 1
-
- bin_path = os.path.dirname(os.path.abspath(__file__))
- cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
- bin_path, inv_opts, base_opts, options.module_name, repo_opts
- )
-
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
-
- if options.sleep:
- try:
- secs = random.randint(0,int(options.sleep));
- except ValueError:
- parser.error("%s is not a number." % options.sleep)
- return 1
-
- print >>sys.stderr, "Sleeping for %d seconds..." % secs
- time.sleep(secs);
-
-
- # RUN THe CHECKOUT COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if rc != 0:
- if options.force:
- print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook."
- else:
- return rc
- elif options.ifchanged and '"changed": true' not in out:
- print "Repository has not changed, quitting."
- return 0
-
- playbook = select_playbook(options.dest, args)
-
- if playbook is None:
- print >>sys.stderr, "Could not find a playbook to run."
- return 1
-
- cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
- if options.vault_password_file:
- cmd += " --vault-password-file=%s" % options.vault_password_file
- if options.inventory:
- cmd += ' -i "%s"' % options.inventory
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
- if options.ask_sudo_pass:
- cmd += ' -K'
- if options.tags:
- cmd += ' -t "%s"' % options.tags
- os.chdir(options.dest)
-
- # RUN THE PLAYBOOK COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if options.purge:
- os.chdir('/')
- try:
- shutil.rmtree(options.dest)
- except Exception, e:
- print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
-
- return rc
-
-if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv[1:]))
- except KeyboardInterrupt, e:
- print >>sys.stderr, "Exit on user request.\n"
- sys.exit(1)
diff --git a/bin/ansible-pull b/bin/ansible-pull
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-pull
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-vault b/bin/ansible-vault
deleted file mode 100755
index 22cfc0e1487..00000000000
--- a/bin/ansible-vault
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, James Tanner
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-# ansible-vault is a script that encrypts/decrypts YAML files. See
-# http://docs.ansible.com/playbooks_vault.html for more details.
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import os
-import sys
-import traceback
-
-import ansible.constants as C
-
-from ansible import utils
-from ansible import errors
-from ansible.utils.vault import VaultEditor
-
-from optparse import OptionParser
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use")
- parser.add_option('--debug', dest='debug', action="store_true", help="debug")
- parser.add_option('--vault-password-file', dest='password_file',
- help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE)
-
- # options specific to actions
- if action == "create":
- parser.set_usage("usage: %prog create [options] file_name")
- elif action == "decrypt":
- parser.set_usage("usage: %prog decrypt [options] file_name")
- elif action == "edit":
- parser.set_usage("usage: %prog edit [options] file_name")
- elif action == "view":
- parser.set_usage("usage: %prog view [options] file_name")
- elif action == "encrypt":
- parser.set_usage("usage: %prog encrypt [options] file_name")
- elif action == "rekey":
- parser.set_usage("usage: %prog rekey [options] file_name")
-
- # done, return the parser
- return parser
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-#-------------------------------------------------------------------------------------
-# Command functions
-#-------------------------------------------------------------------------------------
-
-def execute_create(args, options, parser):
- if len(args) > 1:
- raise errors.AnsibleError("'create' does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- this_editor = VaultEditor(cipher, password, args[0])
- this_editor.create_file()
-
-def execute_decrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.decrypt_file()
-
- print "Decryption successful"
-
-def execute_edit(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("edit does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.edit_file()
-
-def execute_view(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("view does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.view_file()
-
-def execute_encrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.encrypt_file()
-
- print "Encryption successful"
-
-def execute_rekey(args, options, parser):
-
- if not options.password_file:
- password, __ = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
-
- cipher = None
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.rekey_file(new_password)
-
- print "Rekey successful"
-
-#-------------------------------------------------------------------------------------
-# MAIN
-#-------------------------------------------------------------------------------------
-
-def main():
-
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- if not len(args):
- raise errors.AnsibleError(
- "The '%s' command requires a filename as the first argument" % action
- )
-
- # execute the desired action
- try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- except Exception, err:
- if options.debug:
- print traceback.format_exc()
- print "ERROR:",err
- sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/bin/ansible-vault b/bin/ansible-vault
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-vault
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/contrib/README.md b/contrib/README.md
new file mode 100644
index 00000000000..dab0da4ba72
--- /dev/null
+++ b/contrib/README.md
@@ -0,0 +1,17 @@
+inventory
+=========
+
+Inventory scripts allow you to store your hosts, groups, and variables in any way
+you like. Examples include discovering inventory from EC2 or pulling it from
+Cobbler. These could also be used to interface with LDAP or database.
+
+chmod +x an inventory plugin and either name it /etc/ansible/hosts or use ansible
+with -i to designate the path to the script. You might also need to copy a configuration
+file with the same name and/or set environment variables, the scripts or configuration
+files have more details.
+
+contributions welcome
+=====================
+
+Send in pull requests to add plugins of your own. The sky is the limit!
+
diff --git a/plugins/inventory/abiquo.ini b/contrib/inventory/abiquo.ini
similarity index 100%
rename from plugins/inventory/abiquo.ini
rename to contrib/inventory/abiquo.ini
diff --git a/plugins/inventory/abiquo.py b/contrib/inventory/abiquo.py
similarity index 100%
rename from plugins/inventory/abiquo.py
rename to contrib/inventory/abiquo.py
diff --git a/plugins/inventory/apache-libcloud.py b/contrib/inventory/apache-libcloud.py
similarity index 96%
rename from plugins/inventory/apache-libcloud.py
rename to contrib/inventory/apache-libcloud.py
index 95804095da9..151daeefe08 100755
--- a/plugins/inventory/apache-libcloud.py
+++ b/contrib/inventory/apache-libcloud.py
@@ -222,12 +222,17 @@ class LibcloudInventory(object):
self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
'''
# Inventory: Group by key pair
- if node.extra['keyname']:
- self.push(self.inventory, self.to_safe('key_' + node.extra['keyname']), dest)
+ if node.extra['key_name']:
+ self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
# Inventory: Group by security group, quick thing to handle single sg
- if node.extra['securitygroup']:
- self.push(self.inventory, self.to_safe('sg_' + node.extra['securitygroup'][0]), dest)
+ if node.extra['security_group']:
+ self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)
+
+ # Inventory: Group by tag
+ if node.extra['tags']:
+ for tagkey in node.extra['tags'].keys():
+ self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest)
def get_host_info(self):
'''
diff --git a/plugins/inventory/cloudstack.ini b/contrib/inventory/cloudstack.ini
similarity index 100%
rename from plugins/inventory/cloudstack.ini
rename to contrib/inventory/cloudstack.ini
diff --git a/plugins/inventory/cloudstack.py b/contrib/inventory/cloudstack.py
similarity index 100%
rename from plugins/inventory/cloudstack.py
rename to contrib/inventory/cloudstack.py
diff --git a/plugins/inventory/cobbler.ini b/contrib/inventory/cobbler.ini
similarity index 100%
rename from plugins/inventory/cobbler.ini
rename to contrib/inventory/cobbler.ini
diff --git a/plugins/inventory/cobbler.py b/contrib/inventory/cobbler.py
similarity index 100%
rename from plugins/inventory/cobbler.py
rename to contrib/inventory/cobbler.py
diff --git a/plugins/inventory/collins.ini b/contrib/inventory/collins.ini
similarity index 100%
rename from plugins/inventory/collins.ini
rename to contrib/inventory/collins.ini
diff --git a/plugins/inventory/collins.py b/contrib/inventory/collins.py
similarity index 100%
rename from plugins/inventory/collins.py
rename to contrib/inventory/collins.py
diff --git a/plugins/inventory/consul.ini b/contrib/inventory/consul.ini
similarity index 100%
rename from plugins/inventory/consul.ini
rename to contrib/inventory/consul.ini
diff --git a/plugins/inventory/consul_io.py b/contrib/inventory/consul_io.py
similarity index 100%
rename from plugins/inventory/consul_io.py
rename to contrib/inventory/consul_io.py
diff --git a/plugins/inventory/digital_ocean.ini b/contrib/inventory/digital_ocean.ini
similarity index 66%
rename from plugins/inventory/digital_ocean.ini
rename to contrib/inventory/digital_ocean.ini
index c4e3fe21419..021899731c4 100644
--- a/plugins/inventory/digital_ocean.ini
+++ b/contrib/inventory/digital_ocean.ini
@@ -3,12 +3,11 @@
[digital_ocean]
-# The module needs your DigitalOcean Client ID and API Key.
-# These may also be specified on the command line via --client-id and --api-key
-# or via the environment variables DO_CLIENT_ID and DO_API_KEY
+# The module needs your DigitalOcean API Token.
+# It may also be specified on the command line via --api-token
+# or via the environment variables DO_API_TOKEN or DO_API_KEY
#
-#client_id = abcdefg123456
-#api_key = 123456abcdefg
+#api_token = 123456abcdefg
# API calls to DigitalOcean may be slow. For this reason, we cache the results
diff --git a/plugins/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py
similarity index 55%
rename from plugins/inventory/digital_ocean.py
rename to contrib/inventory/digital_ocean.py
index 1c3eccd21ed..1927f09fdf3 100755
--- a/plugins/inventory/digital_ocean.py
+++ b/contrib/inventory/digital_ocean.py
@@ -24,12 +24,12 @@ found. You can force this script to use the cache with --force-cache.
Configuration is read from `digital_ocean.ini`, then from environment variables,
then and command-line arguments.
-Most notably, the DigitalOcean Client ID and API Key must be specified. They
-can be specified in the INI file or with the following environment variables:
- export DO_CLIENT_ID='DO123' DO_API_KEY='abc123'
+Most notably, the DigitalOcean API Token must be specified. It can be specified
+in the INI file or with the following environment variables:
+ export DO_API_TOKEN='abc123' or
+ export DO_API_KEY='abc123'
-Alternatively, they can be passed on the command-line with --client-id and
---api-key.
+Alternatively, it can be passed on the command-line with --api-token.
If you specify DigitalOcean credentials in the INI file, a handy way to
get them into your environment (e.g., to use the digital_ocean module)
@@ -43,25 +43,31 @@ The following groups are generated from --list:
- image_ID
- image_NAME
- distro_NAME (distribution NAME from image)
- - region_ID
- region_NAME
- - size_ID
- size_NAME
- status_STATUS
When run against a specific host, this script returns the following variables:
+ - do_backup_ids
- do_created_at
- - do_distroy
+ - do_disk
+ - do_features - list
- do_id
- - do_image
- - do_image_id
+ - do_image - object
- do_ip_address
+ - do_private_ip_address
+ - do_kernel - object
+ - do_locked
+ - de_memory
- do_name
- - do_region
- - do_region_id
- - do_size
- - do_size_id
+ - do_networks - object
+ - do_next_backup_window
+ - do_region - object
+ - do_size - object
+ - do_size_slug
+ - do_snapshot_ids - list
- do_status
+ - do_vcpus
-----
```
@@ -70,8 +76,9 @@ usage: digital_ocean.py [-h] [--list] [--host HOST] [--all]
[--ssh-keys] [--domains] [--pretty]
[--cache-path CACHE_PATH]
[--cache-max_age CACHE_MAX_AGE]
- [--refresh-cache] [--client-id CLIENT_ID]
- [--api-key API_KEY]
+ [--force-cache]
+ [--refresh-cache]
+ [--api-token API_TOKEN]
Produce an Ansible Inventory file based on DigitalOcean credentials
@@ -93,12 +100,11 @@ optional arguments:
Path to the cache files (default: .)
--cache-max_age CACHE_MAX_AGE
Maximum age of the cached items (default: 0)
+ --force-cache Only use data from the cache
--refresh-cache Force refresh of cache by making API requests to
DigitalOcean (default: False - use cache files)
- --client-id CLIENT_ID, -c CLIENT_ID
- DigitalOcean Client ID
- --api-key API_KEY, -a API_KEY
- DigitalOcean API Key
+ --api-token API_TOKEN, -a API_TOKEN
+ DigitalOcean API Token
```
'''
@@ -106,7 +112,7 @@ optional arguments:
# (c) 2013, Evan Wies
#
# Inspired by the EC2 inventory plugin:
-# https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py
+# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# This file is part of Ansible,
#
@@ -157,7 +163,6 @@ class DigitalOceanInventory(object):
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
- self.index = {} # Various indices of Droplet metadata
# Define defaults
self.cache_path = '.'
@@ -169,49 +174,61 @@ class DigitalOceanInventory(object):
self.read_cli_args()
# Verify credentials were set
- if not hasattr(self, 'client_id') or not hasattr(self, 'api_key'):
- print '''Could not find values for DigitalOcean client_id and api_key.
-They must be specified via either ini file, command line argument (--client-id and --api-key),
-or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
+ if not hasattr(self, 'api_token'):
+ print '''Could not find values for DigitalOcean api_token.
+They must be specified via either ini file, command line argument (--api-token),
+or environment variables (DO_API_TOKEN)'''
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
- print "DO_CLIENT_ID=%s DO_API_KEY=%s" % (self.client_id, self.api_key)
+ print "DO_API_TOKEN=%s" % self.api_token
sys.exit(0)
# Manage cache
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
self.cache_refreshed = False
- if not self.args.force_cache and self.args.refresh_cache or not self.is_cache_valid():
- self.load_all_data_from_digital_ocean()
- else:
+ if self.is_cache_valid:
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
print '''Cache is empty and --force-cache was specified'''
sys.exit(-1)
- self.load_all_data_from_digital_ocean()
- else:
- # We always get fresh droplets for --list, --host, --all, and --droplets
- # unless --force-cache is specified
- if not self.args.force_cache and (
- self.args.list or self.args.host or self.args.all or self.args.droplets):
- self.load_droplets_from_digital_ocean()
+
+ self.manager = DoManager(None, self.api_token, api_version=2)
# Pick the json_data to print based on the CLI command
- if self.args.droplets: json_data = { 'droplets': self.data['droplets'] }
- elif self.args.regions: json_data = { 'regions': self.data['regions'] }
- elif self.args.images: json_data = { 'images': self.data['images'] }
- elif self.args.sizes: json_data = { 'sizes': self.data['sizes'] }
- elif self.args.ssh_keys: json_data = { 'ssh_keys': self.data['ssh_keys'] }
- elif self.args.domains: json_data = { 'domains': self.data['domains'] }
- elif self.args.all: json_data = self.data
-
- elif self.args.host: json_data = self.load_droplet_variables_for_host()
+ if self.args.droplets:
+ self.load_from_digital_ocean('droplets')
+ json_data = {'droplets': self.data['droplets']}
+ elif self.args.regions:
+ self.load_from_digital_ocean('regions')
+ json_data = {'regions': self.data['regions']}
+ elif self.args.images:
+ self.load_from_digital_ocean('images')
+ json_data = {'images': self.data['images']}
+ elif self.args.sizes:
+ self.load_from_digital_ocean('sizes')
+ json_data = {'sizes': self.data['sizes']}
+ elif self.args.ssh_keys:
+ self.load_from_digital_ocean('ssh_keys')
+ json_data = {'ssh_keys': self.data['ssh_keys']}
+ elif self.args.domains:
+ self.load_from_digital_ocean('domains')
+ json_data = {'domains': self.data['domains']}
+ elif self.args.all:
+ self.load_from_digital_ocean()
+ json_data = self.data
+ elif self.args.host:
+ json_data = self.load_droplet_variables_for_host()
else: # '--list' this is last to make it default
- json_data = self.inventory
+ self.load_from_digital_ocean('droplets')
+ self.build_inventory()
+ json_data = self.inventory
+
+ if self.cache_refreshed:
+ self.write_to_cache()
if self.args.pretty:
print json.dumps(json_data, sort_keys=True, indent=2)
@@ -230,10 +247,8 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini')
# Credentials
- if config.has_option('digital_ocean', 'client_id'):
- self.client_id = config.get('digital_ocean', 'client_id')
- if config.has_option('digital_ocean', 'api_key'):
- self.api_key = config.get('digital_ocean', 'api_key')
+ if config.has_option('digital_ocean', 'api_token'):
+ self.api_token = config.get('digital_ocean', 'api_token')
# Cache related
if config.has_option('digital_ocean', 'cache_path'):
@@ -245,8 +260,10 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
def read_environment(self):
''' Reads the settings from environment variables '''
# Setup credentials
- if os.getenv("DO_CLIENT_ID"): self.client_id = os.getenv("DO_CLIENT_ID")
- if os.getenv("DO_API_KEY"): self.api_key = os.getenv("DO_API_KEY")
+ if os.getenv("DO_API_TOKEN"):
+ self.api_token = os.getenv("DO_API_TOKEN")
+ if os.getenv("DO_API_KEY"):
+ self.api_token = os.getenv("DO_API_KEY")
def read_cli_args(self):
@@ -269,70 +286,57 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
- parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
+ parser.add_argument('--refresh-cache','-r', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
- parser.add_argument('--env','-e', action='store_true', help='Display DO_CLIENT_ID and DO_API_KEY')
- parser.add_argument('--client-id','-c', action='store', help='DigitalOcean Client ID')
- parser.add_argument('--api-key','-a', action='store', help='DigitalOcean API Key')
+ parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN')
+ parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token')
self.args = parser.parse_args()
- if self.args.client_id: self.client_id = self.args.client_id
- if self.args.api_key: self.api_key = self.args.api_key
- if self.args.cache_path: self.cache_path = self.args.cache_path
- if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age
+ if self.args.api_token:
+ self.api_token = self.args.api_token
# Make --list default if none of the other commands are specified
- if (not self.args.droplets and not self.args.regions and not self.args.images and
- not self.args.sizes and not self.args.ssh_keys and not self.args.domains and
- not self.args.all and not self.args.host):
- self.args.list = True
+ if (not self.args.droplets and not self.args.regions and
+ not self.args.images and not self.args.sizes and
+ not self.args.ssh_keys and not self.args.domains and
+ not self.args.all and not self.args.host):
+ self.args.list = True
###########################################################################
# Data Management
###########################################################################
- def load_all_data_from_digital_ocean(self):
- ''' Use dopy to get all the information from DigitalOcean and save data in cache files '''
- manager = DoManager(self.client_id, self.api_key)
-
- self.data = {}
- self.data['droplets'] = self.sanitize_list(manager.all_active_droplets())
- self.data['regions'] = self.sanitize_list(manager.all_regions())
- self.data['images'] = self.sanitize_list(manager.all_images(filter=None))
- self.data['sizes'] = self.sanitize_list(manager.sizes())
- self.data['ssh_keys'] = self.sanitize_list(manager.all_ssh_keys())
- self.data['domains'] = self.sanitize_list(manager.all_domains())
-
- self.index = {}
- self.index['region_to_name'] = self.build_index(self.data['regions'], 'id', 'name')
- self.index['size_to_name'] = self.build_index(self.data['sizes'], 'id', 'name')
- self.index['image_to_name'] = self.build_index(self.data['images'], 'id', 'name')
- self.index['image_to_distro'] = self.build_index(self.data['images'], 'id', 'distribution')
- self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False)
-
- self.build_inventory()
-
- self.write_to_cache()
-
-
- def load_droplets_from_digital_ocean(self):
- ''' Use dopy to get droplet information from DigitalOcean and save data in cache files '''
- manager = DoManager(self.client_id, self.api_key)
- self.data['droplets'] = self.sanitize_list(manager.all_active_droplets())
- self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False)
- self.build_inventory()
- self.write_to_cache()
-
-
- def build_index(self, source_seq, key_from, key_to, use_slug=True):
- dest_dict = {}
- for item in source_seq:
- name = (use_slug and item.has_key('slug')) and item['slug'] or item[key_to]
- key = item[key_from]
- dest_dict[key] = name
- return dest_dict
+ def load_from_digital_ocean(self, resource=None):
+ '''Get JSON from DigitalOcean API'''
+ if self.args.force_cache:
+ return
+ # We always get fresh droplets
+ if self.is_cache_valid() and not (resource=='droplets' or resource is None):
+ return
+ if self.args.refresh_cache:
+ resource=None
+
+ if resource == 'droplets' or resource is None:
+ self.data['droplets'] = self.manager.all_active_droplets()
+ self.cache_refreshed = True
+ if resource == 'regions' or resource is None:
+ self.data['regions'] = self.manager.all_regions()
+ self.cache_refreshed = True
+ if resource == 'images' or resource is None:
+ self.data['images'] = self.manager.all_images(filter=None)
+ self.cache_refreshed = True
+ if resource == 'sizes' or resource is None:
+ self.data['sizes'] = self.manager.sizes()
+ self.cache_refreshed = True
+ if resource == 'ssh_keys' or resource is None:
+ self.data['ssh_keys'] = self.manager.all_ssh_keys()
+ self.cache_refreshed = True
+ if resource == 'domains' or resource is None:
+ self.data['domains'] = self.manager.all_domains()
+ self.cache_refreshed = True
def build_inventory(self):
@@ -341,74 +345,46 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
# add all droplets by id and name
for droplet in self.data['droplets']:
- dest = droplet['ip_address']
+ #when using private_networking, the API reports the private one in "ip_address", which is useless. We need the public one for Ansible to work
+ if 'private_networking' in droplet['features']:
+ for net in droplet['networks']['v4']:
+ if net['type']=='public':
+ dest=net['ip_address']
+ else:
+ continue
+ else:
+ dest = droplet['ip_address']
self.inventory[droplet['id']] = [dest]
self.push(self.inventory, droplet['name'], dest)
- self.push(self.inventory, 'region_'+droplet['region_id'], dest)
- self.push(self.inventory, 'image_' +droplet['image_id'], dest)
- self.push(self.inventory, 'size_' +droplet['size_id'], dest)
- self.push(self.inventory, 'status_'+droplet['status'], dest)
+ self.push(self.inventory, 'region_' + droplet['region']['slug'], dest)
+ self.push(self.inventory, 'image_' + str(droplet['image']['id']), dest)
+ self.push(self.inventory, 'size_' + droplet['size']['slug'], dest)
- region_name = self.index['region_to_name'].get(droplet['region_id'])
- if region_name:
- self.push(self.inventory, 'region_'+region_name, dest)
-
- size_name = self.index['size_to_name'].get(droplet['size_id'])
- if size_name:
- self.push(self.inventory, 'size_'+size_name, dest)
-
- image_name = self.index['image_to_name'].get(droplet['image_id'])
- if image_name:
- self.push(self.inventory, 'image_'+image_name, dest)
+ image_slug = droplet['image']['slug']
+ if image_slug:
+ self.push(self.inventory, 'image_' + self.to_safe(image_slug), dest)
+ else:
+ image_name = droplet['image']['name']
+ if image_name:
+ self.push(self.inventory, 'image_' + self.to_safe(image_name), dest)
- distro_name = self.index['image_to_distro'].get(droplet['image_id'])
- if distro_name:
- self.push(self.inventory, 'distro_'+distro_name, dest)
+ self.push(self.inventory, 'distro_' + self.to_safe(droplet['image']['distribution']), dest)
+ self.push(self.inventory, 'status_' + droplet['status'], dest)
def load_droplet_variables_for_host(self):
'''Generate a JSON response to a --host call'''
- host = self.to_safe(str(self.args.host))
+ host = int(self.args.host)
- if not host in self.index['host_to_droplet']:
- # try updating cache
- if not self.args.force_cache:
- self.load_all_data_from_digital_ocean()
- if not host in self.index['host_to_droplet']:
- # host might not exist anymore
- return {}
-
- droplet = None
- if self.cache_refreshed:
- for drop in self.data['droplets']:
- if drop['ip_address'] == host:
- droplet = self.sanitize_dict(drop)
- break
- else:
- # Cache wasn't refreshed this run, so hit DigitalOcean API
- manager = DoManager(self.client_id, self.api_key)
- droplet_id = self.index['host_to_droplet'][host]
- droplet = self.sanitize_dict(manager.show_droplet(droplet_id))
-
- if not droplet:
- return {}
+ droplet = self.manager.show_droplet(host)
# Put all the information in a 'do_' namespace
info = {}
for k, v in droplet.items():
info['do_'+k] = v
- # Generate user-friendly variables (i.e. not the ID's)
- if droplet.has_key('region_id'):
- info['do_region'] = self.index['region_to_name'].get(droplet['region_id'])
- if droplet.has_key('size_id'):
- info['do_size'] = self.index['size_to_name'].get(droplet['size_id'])
- if droplet.has_key('image_id'):
- info['do_image'] = self.index['image_to_name'].get(droplet['image_id'])
- info['do_distro'] = self.index['image_to_distro'].get(droplet['image_id'])
-
- return info
+ return {'droplet': info}
@@ -428,19 +404,21 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
def load_from_cache(self):
''' Reads the data from the cache file and assigns it to member variables as Python Objects'''
- cache = open(self.cache_filename, 'r')
- json_data = cache.read()
- cache.close()
- data = json.loads(json_data)
+ try:
+ cache = open(self.cache_filename, 'r')
+ json_data = cache.read()
+ cache.close()
+ data = json.loads(json_data)
+ except IOError:
+ data = {'data': {}, 'inventory': {}}
self.data = data['data']
self.inventory = data['inventory']
- self.index = data['index']
def write_to_cache(self):
''' Writes data in JSON format to a file '''
- data = { 'data': self.data, 'index': self.index, 'inventory': self.inventory }
+ data = { 'data': self.data, 'inventory': self.inventory }
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(self.cache_filename, 'w')
@@ -448,7 +426,6 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
cache.close()
-
###########################################################################
# Utilities
###########################################################################
@@ -456,7 +433,7 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the dict '''
if key in my_dict:
- my_dict[key].append(element);
+ my_dict[key].append(element)
else:
my_dict[key] = [element]
@@ -466,21 +443,6 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
return re.sub("[^A-Za-z0-9\-\.]", "_", word)
- def sanitize_dict(self, d):
- new_dict = {}
- for k, v in d.items():
- if v != None:
- new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
- return new_dict
-
-
- def sanitize_list(self, seq):
- new_seq = []
- for d in seq:
- new_seq.append(self.sanitize_dict(d))
- return new_seq
-
-
###########################################################################
# Run the script
diff --git a/plugins/inventory/docker.py b/contrib/inventory/docker.py
similarity index 100%
rename from plugins/inventory/docker.py
rename to contrib/inventory/docker.py
diff --git a/plugins/inventory/docker.yml b/contrib/inventory/docker.yml
similarity index 100%
rename from plugins/inventory/docker.yml
rename to contrib/inventory/docker.yml
diff --git a/plugins/inventory/ec2.ini b/contrib/inventory/ec2.ini
similarity index 75%
rename from plugins/inventory/ec2.ini
rename to contrib/inventory/ec2.ini
index 1866f0bf3d6..50430ce0ed4 100644
--- a/plugins/inventory/ec2.ini
+++ b/contrib/inventory/ec2.ini
@@ -35,6 +35,9 @@ destination_variable = public_dns_name
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
+# WARNING: - instances that are in the private vpc, _without_ public ip address
+# will not be listed in the inventory until You set:
+# vpc_destination_variable = 'private_ip_address'
vpc_destination_variable = ip_address
# To tag instances on EC2 with the resource records that point to them from
@@ -44,6 +47,9 @@ route53 = False
# To exclude RDS instances from the inventory, uncomment and set to False.
#rds = False
+# To exclude ElastiCache instances from the inventory, uncomment and set to False.
+#elasticache = False
+
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
@@ -52,10 +58,27 @@ route53 = False
# 'all_instances' to True to return all instances regardless of state.
all_instances = False
+# By default, only EC2 instances in the 'running' state are returned. Specify
+# EC2 instance states to return as a comma-separated list. This
+# option is overriden when 'all_instances' is True.
+# instance_states = pending, running, shutting-down, terminated, stopping, stopped
+
# By default, only RDS instances in the 'available' state are returned. Set
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
+# By default, only ElastiCache clusters and nodes in the 'available' state
+# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
+# to True return all ElastiCache clusters and nodes, regardless of state.
+#
+# Note that all_elasticache_nodes only applies to listed clusters. That means
+# if you set all_elastic_clusters to false, no node will be return from
+# unavailable clusters, regardless of the state and to what you set for
+# all_elasticache_nodes.
+all_elasticache_replication_groups = False
+all_elasticache_clusters = False
+all_elasticache_nodes = False
+
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
@@ -86,12 +109,16 @@ group_by_tag_none = True
group_by_route53_names = True
group_by_rds_engine = True
group_by_rds_parameter_group = True
+group_by_elasticache_engine = True
+group_by_elasticache_cluster = True
+group_by_elasticache_parameter_group = True
+group_by_elasticache_replication_group = True
# If you only want to include hosts that match a certain regular expression
-# pattern_include = stage-*
+# pattern_include = staging-*
# If you want to exclude any hosts that match a certain regular expression
-# pattern_exclude = stage-*
+# pattern_exclude = staging-*
# Instance filters can be used to control which instances are retrieved for
# inventory. For the full list of possible filters, please read the EC2 API
@@ -99,14 +126,14 @@ group_by_rds_parameter_group = True
# Filters are key/value pairs separated by '=', to list multiple filters use
# a list separated by commas. See examples below.
-# Retrieve only instances with (key=value) env=stage tag
-# instance_filters = tag:env=stage
+# Retrieve only instances with (key=value) env=staging tag
+# instance_filters = tag:env=staging
# Retrieve only instances with role=webservers OR role=dbservers tag
# instance_filters = tag:role=webservers,tag:role=dbservers
-# Retrieve only t1.micro instances OR instances with tag env=stage
-# instance_filters = instance-type=t1.micro,tag:env=stage
+# Retrieve only t1.micro instances OR instances with tag env=staging
+# instance_filters = instance-type=t1.micro,tag:env=staging
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
diff --git a/plugins/inventory/ec2.py b/contrib/inventory/ec2.py
similarity index 58%
rename from plugins/inventory/ec2.py
rename to contrib/inventory/ec2.py
index 16ac93f5ee4..e4b0b072d43 100755
--- a/plugins/inventory/ec2.py
+++ b/contrib/inventory/ec2.py
@@ -121,6 +121,7 @@ from time import time
import boto
from boto import ec2
from boto import rds
+from boto import elasticache
from boto import route53
import six
@@ -191,7 +192,7 @@ class Ec2Inventory(object):
else:
config = configparser.ConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
- ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
+ ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
@@ -232,16 +233,63 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
- # Return all EC2 and RDS instances (if RDS is enabled)
+ # Include ElastiCache instances?
+ self.elasticache_enabled = True
+ if config.has_option('ec2', 'elasticache'):
+ self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
+
+ # Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
+
+ # Instance states to be gathered in inventory. Default is 'running'.
+ # Setting 'all_instances' to 'yes' overrides this option.
+ ec2_valid_instance_states = [
+ 'pending',
+ 'running',
+ 'shutting-down',
+ 'terminated',
+ 'stopping',
+ 'stopped'
+ ]
+ self.ec2_instance_states = []
+ if self.all_instances:
+ self.ec2_instance_states = ec2_valid_instance_states
+ elif config.has_option('ec2', 'instance_states'):
+ for instance_state in config.get('ec2', 'instance_states').split(','):
+ instance_state = instance_state.strip()
+ if instance_state not in ec2_valid_instance_states:
+ continue
+ self.ec2_instance_states.append(instance_state)
+ else:
+ self.ec2_instance_states = ['running']
+
+ # Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
+ # Return all ElastiCache replication groups? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
+ self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
+ else:
+ self.all_elasticache_replication_groups = False
+
+ # Return all ElastiCache clusters? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
+ self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
+ else:
+ self.all_elasticache_clusters = False
+
+ # Return all ElastiCache nodes? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
+ self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
+ else:
+ self.all_elasticache_nodes = False
+
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
@@ -272,6 +320,10 @@ class Ec2Inventory(object):
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
+ 'group_by_elasticache_engine',
+ 'group_by_elasticache_cluster',
+ 'group_by_elasticache_parameter_group',
+ 'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
@@ -334,6 +386,9 @@ class Ec2Inventory(object):
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
+ if self.elasticache_enabled:
+ self.get_elasticache_clusters_by_region(region)
+ self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
@@ -373,7 +428,7 @@ class Ec2Inventory(object):
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
- self.fail_with_error(error)
+ self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
@@ -387,12 +442,82 @@ class Ec2Inventory(object):
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
error = e.reason
-
+
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
- self.fail_with_error(error)
+ self.fail_with_error(error, 'getting RDS instances')
+
+ def get_elasticache_clusters_by_region(self, region):
+ ''' Makes an AWS API call to the list of ElastiCache clusters (with
+ nodes' info) in a particular region.'''
+
+ # ElastiCache boto module doesn't provide a get_all_intances method,
+ # that's why we need to call describe directly (it would be called by
+ # the shorthand method anyway...)
+ try:
+ conn = elasticache.connect_to_region(region)
+ if conn:
+ # show_cache_node_info = True
+ # because we also want nodes' information
+ response = conn.describe_cache_clusters(None, None, None, True)
+
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ if not e.reason == "Forbidden":
+ error = "Looks like AWS ElastiCache is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ try:
+ # Boto also doesn't provide wrapper classes to CacheClusters or
+ # CacheNodes. Because of that wo can't make use of the get_list
+ # method in the AWSQueryConnection. Let's do the work manually
+ clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
+
+ except KeyError as e:
+ error = "ElastiCache query to AWS failed (unexpected format)."
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ for cluster in clusters:
+ self.add_elasticache_cluster(cluster, region)
+
+ def get_elasticache_replication_groups_by_region(self, region):
+ ''' Makes an AWS API call to the list of ElastiCache replication groups
+ in a particular region.'''
+
+ # ElastiCache boto module doesn't provide a get_all_intances method,
+ # that's why we need to call describe directly (it would be called by
+ # the shorthand method anyway...)
+ try:
+ conn = elasticache.connect_to_region(region)
+ if conn:
+ response = conn.describe_replication_groups()
+
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ if not e.reason == "Forbidden":
+ error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ try:
+ # Boto also doesn't provide wrapper classes to ReplicationGroups
+ # Because of that wo can't make use of the get_list method in the
+ # AWSQueryConnection. Let's do the work manually
+ replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
+
+ except KeyError as e:
+ error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ for replication_group in replication_groups:
+ self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
@@ -410,9 +535,12 @@ class Ec2Inventory(object):
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
-
- def fail_with_error(self, err_msg):
+
+ def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
+ if err_operation:
+ err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
+ err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
@@ -428,8 +556,8 @@ class Ec2Inventory(object):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
- # Only want running instances unless all_instances is True
- if not self.all_instances and instance.state != 'running':
+ # Only return instances with desired instance states
+ if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
@@ -520,7 +648,10 @@ class Ec2Inventory(object):
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
- key = self.to_safe("tag_" + k + "=" + v)
+ if v:
+ key = self.to_safe("tag_" + k + "=" + v)
+ else:
+ key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
@@ -629,6 +760,243 @@ class Ec2Inventory(object):
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ def add_elasticache_cluster(self, cluster, region):
+ ''' Adds an ElastiCache cluster to the inventory and index, as long as
+ it's nodes are addressable '''
+
+ # Only want available clusters unless all_elasticache_clusters is True
+ if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
+ return
+
+ # Select the best destination address
+ if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
+ # Memcached cluster
+ dest = cluster['ConfigurationEndpoint']['Address']
+ is_redis = False
+ else:
+ # Redis sigle node cluster
+ # Because all Redis clusters are single nodes, we'll merge the
+ # info from the cluster with info about the node
+ dest = cluster['CacheNodes'][0]['Endpoint']['Address']
+ is_redis = True
+
+ if not dest:
+ # Skip clusters we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = [region, cluster['CacheClusterId']]
+
+ # Inventory: Group by instance ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[cluster['CacheClusterId']] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
+
+ # Inventory: Group by region
+ if self.group_by_region and not is_redis:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone
+ if self.group_by_availability_zone and not is_redis:
+ self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+ self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+ # Inventory: Group by node type
+ if self.group_by_instance_type and not is_redis:
+ type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for ElastiCache)
+
+ # Inventory: Group by security group
+ if self.group_by_security_group and not is_redis:
+
+ # Check for the existence of the 'SecurityGroups' key and also if
+ # this key has some value. When the cluster is not placed in a SG
+ # the query can return None here and cause an error.
+ if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+ for security_group in cluster['SecurityGroups']:
+ key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ # Inventory: Group by engine
+ if self.group_by_elasticache_engine and not is_redis:
+ self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
+
+ # Inventory: Group by parameter group
+ if self.group_by_elasticache_parameter_group:
+ self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
+
+ # Inventory: Group by replication group
+ if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
+ self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
+
+ # Global Tag: all ElastiCache clusters
+ self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
+
+ host_info = self.get_host_info_dict_from_describe_dict(cluster)
+
+ self.inventory["_meta"]["hostvars"][dest] = host_info
+
+ # Add the nodes
+ for node in cluster['CacheNodes']:
+ self.add_elasticache_node(node, cluster, region)
+
+ def add_elasticache_node(self, node, cluster, region):
+ ''' Adds an ElastiCache node to the inventory and index, as long as
+ it is addressable '''
+
+ # Only want available nodes unless all_elasticache_nodes is True
+ if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
+ return
+
+ # Select the best destination address
+ dest = node['Endpoint']['Address']
+
+ if not dest:
+ # Skip nodes we cannot address (e.g. private VPC subnet)
+ return
+
+ node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
+
+ # Add to index
+ self.index[dest] = [region, node_id]
+
+ # Inventory: Group by node ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[node_id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', node_id)
+
+ # Inventory: Group by region
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone
+ if self.group_by_availability_zone:
+ self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+ self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+ # Inventory: Group by node type
+ if self.group_by_instance_type:
+ type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for ElastiCache)
+
+ # Inventory: Group by security group
+ if self.group_by_security_group:
+
+ # Check for the existence of the 'SecurityGroups' key and also if
+ # this key has some value. When the cluster is not placed in a SG
+ # the query can return None here and cause an error.
+ if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+ for security_group in cluster['SecurityGroups']:
+ key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ # Inventory: Group by engine
+ if self.group_by_elasticache_engine:
+ self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
+
+ # Inventory: Group by parameter group (done at cluster level)
+
+ # Inventory: Group by replication group (done at cluster level)
+
+ # Inventory: Group by ElastiCache Cluster
+ if self.group_by_elasticache_cluster:
+ self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
+
+ # Global Tag: all ElastiCache nodes
+ self.push(self.inventory, 'elasticache_nodes', dest)
+
+ host_info = self.get_host_info_dict_from_describe_dict(node)
+
+ if dest in self.inventory["_meta"]["hostvars"]:
+ self.inventory["_meta"]["hostvars"][dest].update(host_info)
+ else:
+ self.inventory["_meta"]["hostvars"][dest] = host_info
+
+ def add_elasticache_replication_group(self, replication_group, region):
+ ''' Adds an ElastiCache replication group to the inventory and index '''
+
+ # Only want available clusters unless all_elasticache_replication_groups is True
+ if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
+ return
+
+ # Select the best destination address (PrimaryEndpoint)
+ dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
+
+ if not dest:
+ # Skip clusters we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = [region, replication_group['ReplicationGroupId']]
+
+ # Inventory: Group by ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[replication_group['ReplicationGroupId']] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
+
+ # Inventory: Group by region
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone (doesn't apply to replication groups)
+
+ # Inventory: Group by node type (doesn't apply to replication groups)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for replication groups
+
+ # Inventory: Group by security group (doesn't apply to replication groups)
+ # Check this value in cluster level
+
+ # Inventory: Group by engine (replication groups are always Redis)
+ if self.group_by_elasticache_engine:
+ self.push(self.inventory, 'elasticache_redis', dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', 'redis')
+
+ # Global Tag: all ElastiCache clusters
+ self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
+
+ host_info = self.get_host_info_dict_from_describe_dict(replication_group)
+
+ self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
@@ -677,7 +1045,6 @@ class Ec2Inventory(object):
return list(name_list)
-
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
@@ -723,6 +1090,91 @@ class Ec2Inventory(object):
return instance_vars
+ def get_host_info_dict_from_describe_dict(self, describe_dict):
+ ''' Parses the dictionary returned by the API call into a flat list
+ of parameters. This method should be used only when 'describe' is
+ used directly because Boto doesn't provide specific classes. '''
+
+ # I really don't agree with prefixing everything with 'ec2'
+ # because EC2, RDS and ElastiCache are different services.
+ # I'm just following the pattern used until now to not break any
+ # compatibility.
+
+ host_info = {}
+ for key in describe_dict:
+ value = describe_dict[key]
+ key = self.to_safe('ec2_' + self.uncammelize(key))
+
+ # Handle complex types
+
+ # Target: Memcached Cache Clusters
+ if key == 'ec2_configuration_endpoint' and value:
+ host_info['ec2_configuration_endpoint_address'] = value['Address']
+ host_info['ec2_configuration_endpoint_port'] = value['Port']
+
+ # Target: Cache Nodes and Redis Cache Clusters (single node)
+ if key == 'ec2_endpoint' and value:
+ host_info['ec2_endpoint_address'] = value['Address']
+ host_info['ec2_endpoint_port'] = value['Port']
+
+ # Target: Redis Replication Groups
+ if key == 'ec2_node_groups' and value:
+ host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
+ host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
+ replica_count = 0
+ for node in value[0]['NodeGroupMembers']:
+ if node['CurrentRole'] == 'primary':
+ host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
+ host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
+ host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
+ elif node['CurrentRole'] == 'replica':
+ host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
+ host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
+ host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
+ replica_count += 1
+
+ # Target: Redis Replication Groups
+ if key == 'ec2_member_clusters' and value:
+ host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
+
+ # Target: All Cache Clusters
+ elif key == 'ec2_cache_parameter_group':
+ host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
+ host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
+ host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
+
+ # Target: Almost everything
+ elif key == 'ec2_security_groups':
+
+ # Skip if SecurityGroups is None
+ # (it is possible to have the key defined but no value in it).
+ if value is not None:
+ sg_ids = []
+ for sg in value:
+ sg_ids.append(sg['SecurityGroupId'])
+ host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
+
+ # Target: Everything
+ # Preserve booleans and integers
+ elif type(value) in [int, bool]:
+ host_info[key] = value
+
+ # Target: Everything
+ # Sanitize string values
+ elif isinstance(value, six.string_types):
+ host_info[key] = value.strip()
+
+ # Target: Everything
+ # Replace None by an empty string
+ elif type(value) == type(None):
+ host_info[key] = ''
+
+ else:
+ # Remove non-processed complex types
+ pass
+
+ return host_info
+
def get_host_info(self):
''' Get variables about a specific host '''
@@ -786,6 +1238,9 @@ class Ec2Inventory(object):
cache.write(json_data)
cache.close()
+ def uncammelize(self, key):
+ temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
@@ -793,7 +1248,6 @@ class Ec2Inventory(object):
return re.sub("[^A-Za-z0-9\_]", "_", word)
-
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
diff --git a/plugins/inventory/fleet.py b/contrib/inventory/fleet.py
similarity index 100%
rename from plugins/inventory/fleet.py
rename to contrib/inventory/fleet.py
diff --git a/plugins/inventory/freeipa.py b/contrib/inventory/freeipa.py
similarity index 100%
rename from plugins/inventory/freeipa.py
rename to contrib/inventory/freeipa.py
diff --git a/plugins/inventory/gce.ini b/contrib/inventory/gce.ini
similarity index 100%
rename from plugins/inventory/gce.ini
rename to contrib/inventory/gce.ini
diff --git a/plugins/inventory/gce.py b/contrib/inventory/gce.py
similarity index 96%
rename from plugins/inventory/gce.py
rename to contrib/inventory/gce.py
index 76e14f23012..740e112332c 100755
--- a/plugins/inventory/gce.py
+++ b/contrib/inventory/gce.py
@@ -66,7 +66,7 @@ Examples:
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
- $ plugins/inventory/gce.py --host my_instance
+ $ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson
Version: 0.0.1
@@ -221,7 +221,7 @@ class GceInventory(object):
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
- 'gce_public_ip': inst.public_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
@@ -230,7 +230,7 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': inst.public_ips[0]
+ 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
}
def get_instance(self, instance_name):
@@ -257,7 +257,10 @@ class GceInventory(object):
tags = node.extra['tags']
for t in tags:
- tag = 'tag_%s' % t
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
diff --git a/plugins/inventory/jail.py b/contrib/inventory/jail.py
similarity index 100%
rename from plugins/inventory/jail.py
rename to contrib/inventory/jail.py
diff --git a/contrib/inventory/landscape.py b/contrib/inventory/landscape.py
new file mode 100755
index 00000000000..4b53171c34e
--- /dev/null
+++ b/contrib/inventory/landscape.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+
+# (c) 2015, Marc Abramowitz
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Dynamic inventory script which lets you use nodes discovered by Canonical's
+# Landscape (http://www.ubuntu.com/management/landscape-features).
+#
+# Requires the `landscape_api` Python module
+# See:
+# - https://landscape.canonical.com/static/doc/api/api-client-package.html
+# - https://landscape.canonical.com/static/doc/api/python-api.html
+#
+# Environment variables
+# ---------------------
+# - `LANDSCAPE_API_URI`
+# - `LANDSCAPE_API_KEY`
+# - `LANDSCAPE_API_SECRET`
+# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
+
+
+import argparse
+import collections
+import os
+import sys
+
+from landscape_api.base import API, HTTPError
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+_key = 'landscape'
+
+
+class EnvironmentConfig(object):
+ uri = os.getenv('LANDSCAPE_API_URI')
+ access_key = os.getenv('LANDSCAPE_API_KEY')
+ secret_key = os.getenv('LANDSCAPE_API_SECRET')
+ ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
+
+
+def _landscape_client():
+ env = EnvironmentConfig()
+ return API(
+ uri=env.uri,
+ access_key=env.access_key,
+ secret_key=env.secret_key,
+ ssl_ca_file=env.ssl_ca_file)
+
+
+def get_landscape_members_data():
+ return _landscape_client().get_computers()
+
+
+def get_nodes(data):
+ return [node['hostname'] for node in data]
+
+
+def get_groups(data):
+ groups = collections.defaultdict(list)
+
+ for node in data:
+ for value in node['tags']:
+ groups[value].append(node['hostname'])
+
+ return groups
+
+
+def get_meta(data):
+ meta = {'hostvars': {}}
+ for node in data:
+ meta['hostvars'][node['hostname']] = {'tags': node['tags']}
+ return meta
+
+
+def print_list():
+ data = get_landscape_members_data()
+ nodes = get_nodes(data)
+ groups = get_groups(data)
+ meta = get_meta(data)
+ inventory_data = {_key: nodes, '_meta': meta}
+ inventory_data.update(groups)
+ print(json.dumps(inventory_data))
+
+
+def print_host(host):
+ data = get_landscape_members_data()
+ meta = get_meta(data)
+ print(json.dumps(meta['hostvars'][host]))
+
+
+def get_args(args_list):
+ parser = argparse.ArgumentParser(
+ description='ansible inventory script reading from landscape cluster')
+ mutex_group = parser.add_mutually_exclusive_group(required=True)
+ help_list = 'list all hosts from landscape cluster'
+ mutex_group.add_argument('--list', action='store_true', help=help_list)
+ help_host = 'display variables for a host'
+ mutex_group.add_argument('--host', help=help_host)
+ return parser.parse_args(args_list)
+
+
+def main(args_list):
+ args = get_args(args_list)
+ if args.list:
+ print_list()
+ if args.host:
+ print_host(args.host)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/plugins/inventory/libcloud.ini b/contrib/inventory/libcloud.ini
similarity index 100%
rename from plugins/inventory/libcloud.ini
rename to contrib/inventory/libcloud.ini
diff --git a/plugins/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py
similarity index 100%
rename from plugins/inventory/libvirt_lxc.py
rename to contrib/inventory/libvirt_lxc.py
diff --git a/plugins/inventory/linode.ini b/contrib/inventory/linode.ini
similarity index 100%
rename from plugins/inventory/linode.ini
rename to contrib/inventory/linode.ini
diff --git a/plugins/inventory/linode.py b/contrib/inventory/linode.py
similarity index 100%
rename from plugins/inventory/linode.py
rename to contrib/inventory/linode.py
diff --git a/plugins/inventory/nova.ini b/contrib/inventory/nova.ini
similarity index 100%
rename from plugins/inventory/nova.ini
rename to contrib/inventory/nova.ini
diff --git a/plugins/inventory/nova.py b/contrib/inventory/nova.py
old mode 100644
new mode 100755
similarity index 100%
rename from plugins/inventory/nova.py
rename to contrib/inventory/nova.py
diff --git a/plugins/inventory/openshift.py b/contrib/inventory/openshift.py
similarity index 100%
rename from plugins/inventory/openshift.py
rename to contrib/inventory/openshift.py
diff --git a/plugins/inventory/openstack.py b/contrib/inventory/openstack.py
similarity index 100%
rename from plugins/inventory/openstack.py
rename to contrib/inventory/openstack.py
diff --git a/plugins/inventory/openstack.yml b/contrib/inventory/openstack.yml
similarity index 100%
rename from plugins/inventory/openstack.yml
rename to contrib/inventory/openstack.yml
diff --git a/contrib/inventory/openvz.py b/contrib/inventory/openvz.py
new file mode 100644
index 00000000000..fd0bd9ff794
--- /dev/null
+++ b/contrib/inventory/openvz.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# openvz.py
+#
+# Copyright 2014 jordonr
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+# Inspired by libvirt_lxc.py inventory script
+# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
+#
+# Groups are determined by the description field of openvz guests
+# multiple groups can be seperated by commas: webserver,dbserver
+
+from subprocess import Popen,PIPE
+import sys
+import json
+
+
+#List openvz hosts
+vzhosts = ['vzhost1','vzhost2','vzhost3']
+#Add openvz hosts to the inventory and Add "_meta" trick
+inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
+#default group, when description not defined
+default_group = ['vzguest']
+
+def get_guests():
+ #Loop through vzhosts
+ for h in vzhosts:
+ #SSH to vzhost and get the list of guests in json
+ pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True)
+
+ #Load Json info of guests
+ json_data = json.loads(pipe.stdout.read())
+
+ #loop through guests
+ for j in json_data:
+ #Add information to host vars
+ inventory['_meta']['hostvars'][j['hostname']] = {'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip']}
+
+ #determine group from guest description
+ if j['description'] is not None:
+ groups = j['description'].split(",")
+ else:
+ groups = default_group
+
+ #add guest to inventory
+ for g in groups:
+ if g not in inventory:
+ inventory[g] = {'hosts': []}
+
+ inventory[g]['hosts'].append(j['hostname'])
+
+ return inventory
+
+
+if len(sys.argv) == 2 and sys.argv[1] == '--list':
+ inv_json = get_guests()
+ print json.dumps(inv_json, sort_keys=True)
+elif len(sys.argv) == 3 and sys.argv[1] == '--host':
+ print json.dumps({});
+else:
+ print "Need an argument, either --list or --host "
diff --git a/contrib/inventory/ovirt.ini b/contrib/inventory/ovirt.ini
new file mode 100644
index 00000000000..a52f9d63ff5
--- /dev/null
+++ b/contrib/inventory/ovirt.ini
@@ -0,0 +1,33 @@
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+# Author: Josha Inglis based on the gce.ini by Eric Johnson
+
+[ovirt]
+# ovirt Service Account configuration information can be stored in the
+# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
+# exist in your PYTHONPATH and be picked up automatically with an import
+# statement in the inventory script. However, you can specify an absolute
+# path to the secrets.py file with 'libcloud_secrets' parameter.
+ovirt_api_secrets =
+
+# If you are not going to use a 'secrets.py' file, you can set the necessary
+# authorization parameters here.
+ovirt_url =
+ovirt_username =
+ovirt_password =
diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py
new file mode 100755
index 00000000000..dc022c5dfd2
--- /dev/null
+++ b/contrib/inventory/ovirt.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+# Copyright 2015 IIX Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+"""
+ovirt external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests to
+oVirt via the ovirt-engine-sdk-python library.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the ovirt_sdk Node object:
+ - ovirt_uuid
+ - ovirt_id
+ - ovirt_image
+ - ovirt_machine_type
+ - ovirt_ips
+ - ovirt_name
+ - ovirt_description
+ - ovirt_status
+ - ovirt_zone
+ - ovirt_tags
+ - ovirt_stats
+
+When run in --list mode, instances are grouped by the following categories:
+
+ - zone:
+ zone group name.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - running status:
+ group name prefixed with 'status_' (e.g. status_up, status_down,..)
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the ovirt inventory script to print out instance specific information
+ $ contrib/inventory/ovirt.py --host my_instance
+
+Author: Josha Inglis based on the gce.py by Eric Johnson
+Version: 0.0.1
+"""
+
+USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin"
+USER_AGENT_VERSION = "v1"
+
+import sys
+import os
+import argparse
+import ConfigParser
+from collections import defaultdict
+
+try:
+ import json
+except ImportError:
+ # noinspection PyUnresolvedReferences,PyPackageRequirements
+ import simplejson as json
+
+try:
+ # noinspection PyUnresolvedReferences
+ from ovirtsdk.api import API
+ # noinspection PyUnresolvedReferences
+ from ovirtsdk.xml import params
+except ImportError:
+ print("ovirt inventory script requires ovirt-engine-sdk-python")
+ sys.exit(1)
+
+
+class OVirtInventory(object):
+ def __init__(self):
+ # Read settings and parse CLI arguments
+ self.args = self.parse_cli_args()
+ self.driver = self.get_ovirt_driver()
+
+ # Just display data for specific host
+ if self.args.host:
+ print self.json_format_dict(
+ self.node_to_dict(self.get_instance(self.args.host)),
+ pretty=self.args.pretty
+ )
+ sys.exit(0)
+
+ # Otherwise, assume user wants all instances grouped
+ print(
+ self.json_format_dict(
+ data=self.group_instances(),
+ pretty=self.args.pretty
+ )
+ )
+ sys.exit(0)
+
+ @staticmethod
+ def get_ovirt_driver():
+ """
+ Determine the ovirt authorization settings and return a ovirt_sdk driver.
+
+ :rtype : ovirtsdk.api.API
+ """
+ kwargs = {}
+
+ ovirt_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "ovirt.ini")
+ ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = ConfigParser.SafeConfigParser(defaults={
+ 'ovirt_url': '',
+ 'ovirt_username': '',
+ 'ovirt_password': '',
+ 'ovirt_api_secrets': '',
+ })
+ if 'ovirt' not in config.sections():
+ config.add_section('ovirt')
+ config.read(ovirt_ini_path)
+
+ # Attempt to get ovirt params from a configuration file, if one
+ # exists.
+ secrets_path = config.get('ovirt', 'ovirt_api_secrets')
+ secrets_found = False
+ try:
+ # noinspection PyUnresolvedReferences,PyPackageRequirements
+ import secrets
+
+ kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except ImportError:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py"
+ print(err)
+ sys.exit(1)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ # noinspection PyUnresolvedReferences,PyPackageRequirements
+ import secrets
+
+ kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
+ except ImportError:
+ pass
+ if not secrets_found:
+ kwargs = {
+ 'url': config.get('ovirt', 'ovirt_url'),
+ 'username': config.get('ovirt', 'ovirt_username'),
+ 'password': config.get('ovirt', 'ovirt_password'),
+ }
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ kwargs['url'] = os.environ.get('OVIRT_URL')
+ kwargs['username'] = os.environ.get('OVIRT_EMAIL')
+ kwargs['password'] = os.environ.get('OVIRT_PASS')
+
+ # Retrieve and return the ovirt driver.
+ return API(insecure=True, **kwargs)
+
+ @staticmethod
+ def parse_cli_args():
+ """
+ Command line argument processing
+
+ :rtype : argparse.Namespace
+ """
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt')
+ parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
+ parser.add_argument('--host', action='store', help='Get all information about an instance')
+ parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)')
+ return parser.parse_args()
+
+ def node_to_dict(self, inst):
+ """
+ :type inst: params.VM
+ """
+ if inst is None:
+ return {}
+
+ inst.get_custom_properties()
+ ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \
+ if inst.get_guest_info() is not None else []
+ stats = {}
+ for stat in inst.get_statistics().list():
+ stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum()
+
+ return {
+ 'ovirt_uuid': inst.get_id(),
+ 'ovirt_id': inst.get_id(),
+ 'ovirt_image': inst.get_os().get_type(),
+ 'ovirt_machine_type': inst.get_instance_type(),
+ 'ovirt_ips': ips,
+ 'ovirt_name': inst.get_name(),
+ 'ovirt_description': inst.get_description(),
+ 'ovirt_status': inst.get_status().get_state(),
+ 'ovirt_zone': inst.get_cluster().get_id(),
+ 'ovirt_tags': self.get_tags(inst),
+ 'ovirt_stats': stats,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_ssh_host': ips[0] if len(ips) > 0 else None
+ }
+
+ @staticmethod
+ def get_tags(inst):
+ """
+ :type inst: params.VM
+ """
+ return [x.get_name() for x in inst.get_tags().list()]
+
+ # noinspection PyBroadException,PyUnusedLocal
+ def get_instance(self, instance_name):
+ """Gets details about a specific instance """
+ try:
+ return self.driver.vms.get(name=instance_name)
+ except Exception as e:
+ return None
+
+ def group_instances(self):
+ """Group all instances"""
+ groups = defaultdict(list)
+ meta = {"hostvars": {}}
+
+ for node in self.driver.vms.list():
+ assert isinstance(node, params.VM)
+ name = node.get_name()
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.get_cluster().get_name()
+ groups[zone].append(name)
+
+ tags = self.get_tags(node)
+ for t in tags:
+ tag = 'tag_%s' % t
+ groups[tag].append(name)
+
+ nets = [x.get_name() for x in node.get_nics().list()]
+ for net in nets:
+ net = 'network_%s' % net
+ groups[net].append(name)
+
+ status = node.get_status().get_state()
+ stat = 'status_%s' % status.lower()
+ if stat in groups:
+ groups[stat].append(name)
+ else:
+ groups[stat] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ @staticmethod
+ def json_format_dict(data, pretty=False):
+ """ Converts a dict to a JSON object and dumps it as a formatted
+ string """
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+# Run the script
+OVirtInventory()
diff --git a/contrib/inventory/proxmox.py b/contrib/inventory/proxmox.py
new file mode 100755
index 00000000000..80f6628d973
--- /dev/null
+++ b/contrib/inventory/proxmox.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import urllib
+import urllib2
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import os
+import sys
+from optparse import OptionParser
+
+class ProxmoxNodeList(list):
+ def get_names(self):
+ return [node['node'] for node in self]
+
+class ProxmoxQemu(dict):
+ def get_variables(self):
+ variables = {}
+ for key, value in self.iteritems():
+ variables['proxmox_' + key] = value
+ return variables
+
+class ProxmoxQemuList(list):
+ def __init__(self, data=[]):
+ for item in data:
+ self.append(ProxmoxQemu(item))
+
+ def get_names(self):
+ return [qemu['name'] for qemu in self if qemu['template'] != 1]
+
+ def get_by_name(self, name):
+ results = [qemu for qemu in self if qemu['name'] == name]
+ return results[0] if len(results) > 0 else None
+
+ def get_variables(self):
+ variables = {}
+ for qemu in self:
+ variables[qemu['name']] = qemu.get_variables()
+
+ return variables
+
+class ProxmoxPoolList(list):
+ def get_names(self):
+ return [pool['poolid'] for pool in self]
+
+class ProxmoxPool(dict):
+ def get_members_name(self):
+ return [member['name'] for member in self['members'] if member['template'] != 1]
+
+class ProxmoxAPI(object):
+ def __init__(self, options):
+ self.options = options
+ self.credentials = None
+
+ if not options.url:
+ raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
+ elif not options.username:
+ raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
+ elif not options.password:
+ raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
+
+ def auth(self):
+ request_path = '{}api2/json/access/ticket'.format(self.options.url)
+
+ request_params = urllib.urlencode({
+ 'username': self.options.username,
+ 'password': self.options.password,
+ })
+
+ data = json.load(urllib2.urlopen(request_path, request_params))
+
+ self.credentials = {
+ 'ticket': data['data']['ticket'],
+ 'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
+ }
+
+ def get(self, url, data=None):
+ opener = urllib2.build_opener()
+ opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket'])))
+
+ request_path = '{}{}'.format(self.options.url, url)
+ request = opener.open(request_path, data)
+
+ response = json.load(request)
+ return response['data']
+
+ def nodes(self):
+ return ProxmoxNodeList(self.get('api2/json/nodes'))
+
+ def node_qemu(self, node):
+ return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node)))
+
+ def pools(self):
+ return ProxmoxPoolList(self.get('api2/json/pools'))
+
+ def pool(self, poolid):
+ return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
+
+def main_list(options):
+ results = {
+ 'all': {
+ 'hosts': [],
+ },
+ '_meta': {
+ 'hostvars': {},
+ }
+ }
+
+ proxmox_api = ProxmoxAPI(options)
+ proxmox_api.auth()
+
+ for node in proxmox_api.nodes().get_names():
+ qemu_list = proxmox_api.node_qemu(node)
+ results['all']['hosts'] += qemu_list.get_names()
+ results['_meta']['hostvars'].update(qemu_list.get_variables())
+
+ # pools
+ for pool in proxmox_api.pools().get_names():
+ results[pool] = {
+ 'hosts': proxmox_api.pool(pool).get_members_name(),
+ }
+
+ return results
+
+def main_host(options):
+ proxmox_api = ProxmoxAPI(options)
+ proxmox_api.auth()
+
+ for node in proxmox_api.nodes().get_names():
+ qemu_list = proxmox_api.node_qemu(node)
+ qemu = qemu_list.get_by_name(options.host)
+ if qemu:
+ return qemu.get_variables()
+
+ return {}
+
+def main():
+ parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
+ parser.add_option('--list', action="store_true", default=False, dest="list")
+ parser.add_option('--host', dest="host")
+ parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
+ parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
+ parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
+ parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
+ (options, args) = parser.parse_args()
+
+ if options.list:
+ data = main_list(options)
+ elif options.host:
+ data = main_host(options)
+ else:
+ parser.print_help()
+ sys.exit(1)
+
+ indent = None
+ if options.pretty:
+ indent = 2
+
+ print json.dumps(data, indent=indent)
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/inventory/rax.ini b/contrib/inventory/rax.ini
similarity index 100%
rename from plugins/inventory/rax.ini
rename to contrib/inventory/rax.ini
diff --git a/plugins/inventory/rax.py b/contrib/inventory/rax.py
old mode 100644
new mode 100755
similarity index 100%
rename from plugins/inventory/rax.py
rename to contrib/inventory/rax.py
diff --git a/contrib/inventory/serf.py b/contrib/inventory/serf.py
new file mode 100755
index 00000000000..e1340da92df
--- /dev/null
+++ b/contrib/inventory/serf.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+
+# (c) 2015, Marc Abramowitz
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Dynamic inventory script which lets you use nodes discovered by Serf
+# (https://serfdom.io/).
+#
+# Requires the `serfclient` Python module from
+# https://pypi.python.org/pypi/serfclient
+#
+# Environment variables
+# ---------------------
+# - `SERF_RPC_ADDR`
+# - `SERF_RPC_AUTH`
+#
+# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
+
+import argparse
+import collections
+import os
+import sys
+
+# https://pypi.python.org/pypi/serfclient
+from serfclient import SerfClient, EnvironmentConfig
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+_key = 'serf'
+
+
+def _serf_client():
+ env = EnvironmentConfig()
+ return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
+
+
+def get_serf_members_data():
+ return _serf_client().members().body['Members']
+
+
+def get_nodes(data):
+ return [node['Name'] for node in data]
+
+
+def get_groups(data):
+ groups = collections.defaultdict(list)
+
+ for node in data:
+ for key, value in node['Tags'].items():
+ groups[value].append(node['Name'])
+
+ return groups
+
+
+def get_meta(data):
+ meta = {'hostvars': {}}
+ for node in data:
+ meta['hostvars'][node['Name']] = node['Tags']
+ return meta
+
+
+def print_list():
+ data = get_serf_members_data()
+ nodes = get_nodes(data)
+ groups = get_groups(data)
+ meta = get_meta(data)
+ inventory_data = {_key: nodes, '_meta': meta}
+ inventory_data.update(groups)
+ print(json.dumps(inventory_data))
+
+
+def print_host(host):
+ data = get_serf_members_data()
+ meta = get_meta(data)
+ print(json.dumps(meta['hostvars'][host]))
+
+
+def get_args(args_list):
+ parser = argparse.ArgumentParser(
+ description='ansible inventory script reading from serf cluster')
+ mutex_group = parser.add_mutually_exclusive_group(required=True)
+ help_list = 'list all hosts from serf cluster'
+ mutex_group.add_argument('--list', action='store_true', help=help_list)
+ help_host = 'display variables for a host'
+ mutex_group.add_argument('--host', help=help_host)
+ return parser.parse_args(args_list)
+
+
+def main(args_list):
+ args = get_args(args_list)
+ if args.list:
+ print_list()
+ if args.host:
+ print_host(args.host)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/plugins/inventory/softlayer.py b/contrib/inventory/softlayer.py
similarity index 100%
rename from plugins/inventory/softlayer.py
rename to contrib/inventory/softlayer.py
diff --git a/plugins/inventory/spacewalk.py b/contrib/inventory/spacewalk.py
similarity index 100%
rename from plugins/inventory/spacewalk.py
rename to contrib/inventory/spacewalk.py
diff --git a/plugins/inventory/ssh_config.py b/contrib/inventory/ssh_config.py
similarity index 75%
rename from plugins/inventory/ssh_config.py
rename to contrib/inventory/ssh_config.py
index 7c04c8cc6da..55401a664d3 100755
--- a/plugins/inventory/ssh_config.py
+++ b/contrib/inventory/ssh_config.py
@@ -19,6 +19,10 @@
# Dynamic inventory script which lets you use aliases from ~/.ssh/config.
#
+# There were some issues with various Paramiko versions. I took a deeper look
+# and tested heavily. Now, ansible parses this alright with Paramiko versions
+# 1.7.2 to 1.15.2.
+#
# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts
# with their alias, rather than with the IP or hostname. It takes advantage
# of the ansible_ssh_{host,port,user,private_key_file}.
@@ -39,7 +43,6 @@
import argparse
import os.path
import sys
-
import paramiko
try:
@@ -47,6 +50,8 @@ try:
except ImportError:
import simplejson as json
+SSH_CONF = '~/.ssh/config'
+
_key = 'ssh_config'
_ssh_to_ansible = [('user', 'ansible_ssh_user'),
@@ -56,15 +61,25 @@ _ssh_to_ansible = [('user', 'ansible_ssh_user'),
def get_config():
- with open(os.path.expanduser('~/.ssh/config')) as f:
+ if not os.path.isfile(os.path.expanduser(SSH_CONF)):
+ return {}
+ with open(os.path.expanduser(SSH_CONF)) as f:
cfg = paramiko.SSHConfig()
cfg.parse(f)
ret_dict = {}
for d in cfg._config:
+ if type(d['host']) is list:
+ alias = d['host'][0]
+ else:
+ alias = d['host']
+ if ('?' in alias) or ('*' in alias):
+ continue
_copy = dict(d)
del _copy['host']
- for host in d['host']:
- ret_dict[host] = _copy['config']
+ if 'config' in _copy:
+ ret_dict[alias] = _copy['config']
+ else:
+ ret_dict[alias] = _copy
return ret_dict
@@ -75,7 +90,12 @@ def print_list():
tmp_dict = {}
for ssh_opt, ans_opt in _ssh_to_ansible:
if ssh_opt in attributes:
- tmp_dict[ans_opt] = attributes[ssh_opt]
+ # If the attribute is a list, just take the first element.
+ # Private key is returned in a list for some reason.
+ attr = attributes[ssh_opt]
+ if type(attr) is list:
+ attr = attr[0]
+ tmp_dict[ans_opt] = attr
if tmp_dict:
meta['hostvars'][alias] = tmp_dict
diff --git a/plugins/inventory/vagrant.py b/contrib/inventory/vagrant.py
similarity index 65%
rename from plugins/inventory/vagrant.py
rename to contrib/inventory/vagrant.py
index 7f6dc925e83..10dc61cdb24 100755
--- a/plugins/inventory/vagrant.py
+++ b/contrib/inventory/vagrant.py
@@ -13,6 +13,7 @@ Example Vagrant configuration using this script:
"""
# Copyright (C) 2013 Mark Mandel
+# 2015 Igor Khomyakov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -33,17 +34,26 @@ Example Vagrant configuration using this script:
#
import sys
+import os.path
import subprocess
import re
-import string
+from paramiko import SSHConfig
+from cStringIO import StringIO
from optparse import OptionParser
+from collections import defaultdict
try:
import json
except:
import simplejson as json
+_group = 'vagrant' # a default group
+_ssh_to_ansible = [('user', 'ansible_ssh_user'),
+ ('hostname', 'ansible_ssh_host'),
+ ('identityfile', 'ansible_ssh_private_key_file'),
+ ('port', 'ansible_ssh_port')]
+
# Options
-#------------------------------
+# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host ")
parser.add_option('--list', default=False, dest="list", action="store_true",
@@ -56,19 +66,13 @@ parser.add_option('--host', default=None, dest="host",
# helper functions
#
+
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
- configs = []
-
- boxes = list_running_boxes()
+ return {k: get_a_ssh_config(k) for k in list_running_boxes()}
- for box in boxes:
- config = get_a_ssh_config(box)
- configs.append(config)
- return configs
-
-#list all the running boxes
+# list all the running boxes
def list_running_boxes():
output = subprocess.check_output(["vagrant", "status"]).split('\n')
@@ -79,54 +83,47 @@ def list_running_boxes():
if matcher:
boxes.append(matcher.group(1))
-
return boxes
-#get the ssh config for a single box
+
+# get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
- output = subprocess.check_output(["vagrant", "ssh-config", box_name]).split('\n')
+ output = subprocess.check_output(["vagrant", "ssh-config", box_name])
+ config = SSHConfig()
+ config.parse(StringIO(output))
+ host_config = config.lookup(box_name)
- config = {}
- for line in output:
- if line.strip() != '':
- matcher = re.search("( )?([a-zA-Z]+) (.*)", line)
- config[matcher.group(2)] = matcher.group(3)
-
- return config
+ # man 5 ssh_config:
+ # > It is possible to have multiple identity files ...
+ # > all these identities will be tried in sequence.
+ for id in host_config['identityfile']:
+ if os.path.isfile(id):
+ host_config['identityfile'] = id
+ return {v: host_config[k] for k, v in _ssh_to_ansible}
# List out servers that vagrant has running
-#------------------------------
+# ------------------------------
if options.list:
ssh_config = get_ssh_config()
- hosts = { 'vagrant': []}
+ meta = defaultdict(dict)
- for data in ssh_config:
- hosts['vagrant'].append(data['HostName'])
+ for host in ssh_config:
+ meta['hostvars'][host] = ssh_config[host]
- print json.dumps(hosts)
+ print json.dumps({_group: list(ssh_config.keys()), '_meta': meta})
sys.exit(0)
# Get out the host details
-#------------------------------
+# ------------------------------
elif options.host:
- result = {}
- ssh_config = get_ssh_config()
-
- details = filter(lambda x: (x['HostName'] == options.host), ssh_config)
- if len(details) > 0:
- #pass through the port, in case it's non standard.
- result = details[0]
- result['ansible_ssh_port'] = result['Port']
-
- print json.dumps(result)
+ print json.dumps(get_a_ssh_config(options.host))
sys.exit(0)
-
# Print out help
-#------------------------------
+# ------------------------------
else:
parser.print_help()
sys.exit(0)
diff --git a/plugins/inventory/vbox.py b/contrib/inventory/vbox.py
similarity index 100%
rename from plugins/inventory/vbox.py
rename to contrib/inventory/vbox.py
diff --git a/plugins/inventory/vmware.ini b/contrib/inventory/vmware.ini
similarity index 91%
rename from plugins/inventory/vmware.ini
rename to contrib/inventory/vmware.ini
index 964be18c14e..5097735fd0e 100644
--- a/plugins/inventory/vmware.ini
+++ b/contrib/inventory/vmware.ini
@@ -23,6 +23,10 @@ guests_only = True
# caching will be disabled.
#cache_dir = ~/.cache/ansible
+# Specify a prefix filter. Any VMs with names beginning with this string will
+# not be returned.
+# prefix_filter = test_
+
[auth]
# Specify hostname or IP address of vCenter/ESXi server. A port may be
diff --git a/plugins/inventory/vmware.py b/contrib/inventory/vmware.py
similarity index 97%
rename from plugins/inventory/vmware.py
rename to contrib/inventory/vmware.py
index 92030d66e56..b708d599946 100755
--- a/plugins/inventory/vmware.py
+++ b/contrib/inventory/vmware.py
@@ -55,7 +55,7 @@ from suds.sudsobject import Object as SudsObject
class VMwareInventory(object):
-
+
def __init__(self, guests_only=None):
self.config = ConfigParser.SafeConfigParser()
if os.environ.get('VMWARE_INI', ''):
@@ -95,7 +95,7 @@ class VMwareInventory(object):
Saves the value to cache with the name given.
'''
if self.config.has_option('defaults', 'cache_dir'):
- cache_dir = self.config.get('defaults', 'cache_dir')
+ cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, name)
@@ -115,7 +115,7 @@ class VMwareInventory(object):
else:
cache_max_age = 0
cache_stat = os.stat(cache_file)
- if (cache_stat.st_mtime + cache_max_age) < time.time():
+ if (cache_stat.st_mtime + cache_max_age) >= time.time():
with open(cache_file) as cache:
return json.load(cache)
return default
@@ -305,6 +305,11 @@ class VMwareInventory(object):
else:
vm_group = default_group + '_vm'
+ if self.config.has_option('defaults', 'prefix_filter'):
+ prefix_filter = self.config.get('defaults', 'prefix_filter')
+ else:
+ prefix_filter = None
+
# Loop through physical hosts:
for host in HostSystem.all(self.client):
@@ -318,6 +323,9 @@ class VMwareInventory(object):
# Loop through all VMs on physical host.
for vm in host.vm:
+ if prefix_filter:
+ if vm.name.startswith( prefix_filter ):
+ continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)
vm_info = self._get_vm_info(vm)
diff --git a/plugins/inventory/windows_azure.ini b/contrib/inventory/windows_azure.ini
similarity index 100%
rename from plugins/inventory/windows_azure.ini
rename to contrib/inventory/windows_azure.ini
diff --git a/plugins/inventory/windows_azure.py b/contrib/inventory/windows_azure.py
similarity index 100%
rename from plugins/inventory/windows_azure.py
rename to contrib/inventory/windows_azure.py
diff --git a/plugins/inventory/zabbix.ini b/contrib/inventory/zabbix.ini
similarity index 100%
rename from plugins/inventory/zabbix.ini
rename to contrib/inventory/zabbix.ini
diff --git a/plugins/inventory/zabbix.py b/contrib/inventory/zabbix.py
similarity index 100%
rename from plugins/inventory/zabbix.py
rename to contrib/inventory/zabbix.py
diff --git a/plugins/inventory/zone.py b/contrib/inventory/zone.py
similarity index 100%
rename from plugins/inventory/zone.py
rename to contrib/inventory/zone.py
diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1
index f1a1babc763..0c820b72e37 100644
--- a/docs/man/man1/ansible-playbook.1
+++ b/docs/man/man1/ansible-playbook.1
@@ -2,12 +2,12 @@
.\" Title: ansible-playbook
.\" Author: :doctype:manpage
.\" Generator: DocBook XSL Stylesheets v1.78.1
-.\" Date: 05/05/2015
+.\" Date: 07/23/2015
.\" Manual: System administration commands
-.\" Source: Ansible 2.0.0
+.\" Source: Ansible %VERSION%
.\" Language: English
.\"
-.TH "ANSIBLE\-PLAYBOOK" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands"
+.TH "ANSIBLE\-PLAYBOOK" "1" "07/23/2015" "Ansible %VERSION%" "System administration commands"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -43,9 +43,77 @@ The names of one or more YAML format files to run as ansible playbooks\&.
.RE
.SH "OPTIONS"
.PP
-\fB\-v\fR, \fB\-\-verbose\fR
+\fB\-k\fR, \fB\-\-ask\-pass\fR
.RS 4
-Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&.
+Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&.
+.RE
+.PP
+\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR
+.RS 4
+Prompt for the password to use for playbook plays that request sudo access, if any\&.
+.RE
+.PP
+\fB\-b\fR, \fB\-\-become\fR
+.RS 4
+Run operations with become (nopasswd implied)
+.RE
+.PP
+\fB\-\-become\-method=BECOME_METHOD\fR
+.RS 4
+Privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | runas ]
+.RE
+.PP
+\fB\-\-become\-user=BECOME_USER\fR
+.RS 4
+Run operations as this user (default=None)\&.
+.RE
+.PP
+\fB\-C\fR, \fB\-\-check\fR
+.RS 4
+Do not make any changes on the remote system, but test resources to see what might have changed\&. Note this can not scan all possible resource types and is only a simulation\&.
+.RE
+.PP
+\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR
+.RS 4
+Connection type to use\&. Possible options are
+\fIparamiko\fR
+(SSH),
+\fIssh\fR, and
+\fIlocal\fR\&.
+\fIlocal\fR
+is mostly useful for crontab or kickstarts\&.
+.RE
+.PP
+\fB\-D\fR, \fB\-\-diff\fR
+.RS 4
+When changing any templated files, show the unified diffs of how they changed\&. When used with \-\-check, shows how the files would have changed if \-\-check were not used\&.
+.RE
+.PP
+\fB\-e\fR \fIVARS\fR, \fB\-\-extra\-vars=\fR\fIVARS\fR
+.RS 4
+Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&.
+.RE
+.PP
+\fB\-\-flush\-cache\fR
+.RS 4
+Clear the fact cache\&.
+.RE
+.PP
+\fB\-\-force\-handlers\fR
+.RS 4
+Run handlers even if a task fails\&.
+.RE
+.PP
+\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR
+.RS 4
+Level of parallelism\&.
+\fINUM\fR
+is specified as an integer, the default is 5\&.
+.RE
+.PP
+\fB\-h\fR, \fB\-\-help\fR
+.RS 4
+Show help page and exit
.RE
.PP
\fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR
@@ -56,6 +124,26 @@ to the inventory hosts file, which defaults to
\fI/etc/ansible/hosts\fR\&.
.RE
.PP
+\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR
+.RS 4
+Further limits the selected host/group patterns\&.
+.RE
+.PP
+\fB\-\-list\-hosts\fR
+.RS 4
+Outputs a list of matching hosts; does not execute anything else\&.
+.RE
+.PP
+\fB\-\-list\-tags\fR
+.RS 4
+List all available tags\&.
+.RE
+.PP
+\fB\-\-list\-tasks\fR
+.RS 4
+List all tasks that would be executed
+.RE
+.PP
\fB\-M\fR \fIDIRECTORY\fR, \fB\-\-module\-path=\fR\fIDIRECTORY\fR
.RS 4
The
@@ -64,36 +152,44 @@ search path to load modules from\&. The default is
\fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&.
.RE
.PP
-\fB\-e\fR \fIVARS\fR, \fB\-\-extra\-vars=\fR\fIVARS\fR
+\fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR
.RS 4
-Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&.
+Use this file to authenticate the connection
.RE
.PP
-\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR
+\fB\-\-skip\-tages=\fR\fISKIP_TAGS\fR
.RS 4
-Level of parallelism\&.
-\fINUM\fR
-is specified as an integer, the default is 5\&.
+Only run plays and tasks whose tags do not match these values\&.
.RE
.PP
-\fB\-k\fR, \fB\-\-ask\-pass\fR
+\fB\-\-start\-at\-task=\fR\fISTART_AT\fR
.RS 4
-Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&.
+Start the playbook at the task matching this name\&.
.RE
.PP
-\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR
+\fB\-\-step\fR
.RS 4
-Prompt for the password to use for playbook plays that request sudo access, if any\&.
+One\-step\-at\-a\-time: confirm each task before running\&.
.RE
.PP
-\fB\-U\fR, \fISUDO_USER\fR, \fB\-\-sudo\-user=\fR\fISUDO_USER\fR
+\fB\-S\fR, \-\-su*
.RS 4
-Desired sudo user (default=root)\&.
+Run operations with su (deprecated, use become)
.RE
.PP
-\fB\-t\fR, \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR
+\fB\-R SU\-USER\fR, \fB\-\-su\-user=\fR\fISU_USER\fR
.RS 4
-Only run plays and tasks tagged with these values\&.
+run operations with su as this user (default=root) (deprecated, use become)
+.RE
+.PP
+\fB\-s\fR, \fB\-\-sudo\fR
+.RS 4
+Run operations with sudo (nopasswd) (deprecated, use become)
+.RE
+.PP
+\fB\-U\fR, \fISUDO_USER\fR, \fB\-\-sudo\-user=\fR\fISUDO_USER\fR
+.RS 4
+Desired sudo user (default=root) (deprecated, use become)\&.
.RE
.PP
\fB\-\-skip\-tags=\fR\fISKIP_TAGS\fR
@@ -106,14 +202,9 @@ Only run plays and tasks whose tags do not match these values\&.
Look for syntax errors in the playbook, but don\(cqt run anything
.RE
.PP
-\fB\-\-check\fR
-.RS 4
-Do not make any changes on the remote system, but test resources to see what might have changed\&. Note this can not scan all possible resource types and is only a simulation\&.
-.RE
-.PP
-\fB\-\-diff\fR
+\fB\-t\fR, \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR
.RS 4
-When changing any templated files, show the unified diffs of how they changed\&. When used with \-\-check, shows how the files would have changed if \-\-check were not used\&.
+Only run plays and tasks tagged with these values\&.
.RE
.PP
\fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR
@@ -122,35 +213,24 @@ Connection timeout to use when trying to talk to hosts, in
\fISECONDS\fR\&.
.RE
.PP
-\fB\-s\fR, \fB\-\-sudo\fR
-.RS 4
-Force all plays to use sudo, even if not marked as such\&.
-.RE
-.PP
\fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR
.RS 4
Use this remote user name on playbook steps that do not indicate a user name to run as\&.
.RE
.PP
-\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR
+\fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR
.RS 4
-Connection type to use\&. Possible options are
-\fIparamiko\fR
-(SSH),
-\fIssh\fR, and
-\fIlocal\fR\&.
-\fIlocal\fR
-is mostly useful for crontab or kickstarts\&.
+Vault password file\&.
.RE
.PP
-\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR
+\fB\-v\fR, \fB\-\-verbose\fR
.RS 4
-Further limits the selected host/group patterns\&.
+Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&.
.RE
.PP
-\fB\-\-list\-hosts\fR
+\fB\-\-version\fR
.RS 4
-Outputs a list of matching hosts; does not execute anything else\&.
+Show program\(cqs version number and exit\&.
.RE
.SH "ENVIRONMENT"
.sp
diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in
index 44513d11112..8b8ba9c4688 100644
--- a/docs/man/man1/ansible-playbook.1.asciidoc.in
+++ b/docs/man/man1/ansible-playbook.1.asciidoc.in
@@ -34,22 +34,44 @@ The names of one or more YAML format files to run as ansible playbooks.
OPTIONS
-------
-*-v*, *--verbose*::
+*-k*, *--ask-pass*::
-Verbose mode, more output from successful actions will be shown. Give
-up to three times for more output.
+Prompt for the SSH password instead of assuming key-based
+authentication with ssh-agent.
-*-i* 'PATH', *--inventory=*'PATH'::
+*-K*, *--ask-sudo-pass*::
-The 'PATH' to the inventory hosts file, which defaults to
-'/etc/ansible/hosts'.
+Prompt for the password to use for playbook plays that request sudo
+access, if any.
+*-b*, *--become*::
-*-M* 'DIRECTORY', *--module-path=*'DIRECTORY'::
+Run operations with become (nopasswd implied)
-The 'DIRECTORY' search path to load modules from. The default is
-'/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY
-environment variable.
+*--become-method=BECOME_METHOD*::
+
+Privilege escalation method to use (default=sudo),
+valid choices: [ sudo | su | pbrun | pfexec | runas ]
+
+*--become-user=BECOME_USER*::
+
+Run operations as this user (default=None).
+
+*-C*, *--check*::
+
+Do not make any changes on the remote system, but test resources to see what might
+have changed. Note this can not scan all possible resource types and is only
+a simulation.
+
+*-c* 'CONNECTION', *--connection=*'CONNECTION'::
+
+Connection type to use. Possible options are 'paramiko' (SSH), 'ssh',
+and 'local'. 'local' is mostly useful for crontab or kickstarts.
+
+*-D*, *--diff*::
+
+When changing any templated files, show the unified diffs of how they changed. When
+used with --check, shows how the files would have changed if --check were not used.
*-e* 'VARS', *--extra-vars=*'VARS'::
@@ -57,76 +79,115 @@ Extra variables to inject into a playbook, in key=value key=value format or
as quoted JSON (hashes and arrays). To load variables from a file, specify
the file preceded by @ (e.g. @vars.yml).
+*--flush-cache*::
+
+Clear the fact cache.
+
+*--force-handlers*::
+
+Run handlers even if a task fails.
+
*-f* 'NUM', *--forks=*'NUM'::
Level of parallelism. 'NUM' is specified as an integer, the default is 5.
+*-h*, *--help*::
-*-k*, *--ask-pass*::
+Show help page and exit
-Prompt for the SSH password instead of assuming key-based
-authentication with ssh-agent.
+*-i* 'PATH', *--inventory=*'PATH'::
+The 'PATH' to the inventory hosts file, which defaults to
+'/etc/ansible/hosts'.
-*-K*, *--ask-sudo-pass*::
+*-l* 'SUBSET', *--limit=*'SUBSET'::
-Prompt for the password to use for playbook plays that request sudo
-access, if any.
+Further limits the selected host/group patterns.
-*-U*, 'SUDO_USER', *--sudo-user=*'SUDO_USER'::
+*--list-hosts*::
+
+Outputs a list of matching hosts; does not execute anything else.
-Desired sudo user (default=root).
+*--list-tags*::
-*-t*, 'TAGS', *--tags=*'TAGS'::
+List all available tags.
-Only run plays and tasks tagged with these values.
+*--list-tasks*::
-*--skip-tags=*'SKIP_TAGS'::
+List all tasks that would be executed
-Only run plays and tasks whose tags do not match these values.
+*-M* 'DIRECTORY', *--module-path=*'DIRECTORY'::
-*--syntax-check*::
+The 'DIRECTORY' search path to load modules from. The default is
+'/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY
+environment variable.
-Look for syntax errors in the playbook, but don't run anything
+*--private-key=*'PRIVATE_KEY_FILE'::
-*--check*::
+Use this file to authenticate the connection
-Do not make any changes on the remote system, but test resources to see what might
-have changed. Note this can not scan all possible resource types and is only
-a simulation.
+*--skip-tages=*'SKIP_TAGS'::
-*--diff*::
+Only run plays and tasks whose tags do not match these values.
-When changing any templated files, show the unified diffs of how they changed. When
-used with --check, shows how the files would have changed if --check were not used.
+*--start-at-task=*'START_AT'::
-*-T* 'SECONDS', *--timeout=*'SECONDS'::
+Start the playbook at the task matching this name.
-Connection timeout to use when trying to talk to hosts, in 'SECONDS'.
+*--step*::
+
+One-step-at-a-time: confirm each task before running.
+*-S*, --su*::
+
+Run operations with su (deprecated, use become)
+
+*-R SU-USER*, *--su-user=*'SU_USER'::
+
+run operations with su as this user (default=root)
+(deprecated, use become)
*-s*, *--sudo*::
-Force all plays to use sudo, even if not marked as such.
+Run operations with sudo (nopasswd) (deprecated, use become)
+*-U*, 'SUDO_USER', *--sudo-user=*'SUDO_USER'::
+
+Desired sudo user (default=root) (deprecated, use become).
+
+*--skip-tags=*'SKIP_TAGS'::
+
+Only run plays and tasks whose tags do not match these values.
+
+*--syntax-check*::
+
+Look for syntax errors in the playbook, but don't run anything
+
+*-t*, 'TAGS', *--tags=*'TAGS'::
+
+Only run plays and tasks tagged with these values.
+
+*-T* 'SECONDS', *--timeout=*'SECONDS'::
+
+Connection timeout to use when trying to talk to hosts, in 'SECONDS'.
*-u* 'USERNAME', *--user=*'USERNAME'::
Use this remote user name on playbook steps that do not indicate a
user name to run as.
-*-c* 'CONNECTION', *--connection=*'CONNECTION'::
+*--vault-password-file=*'VAULT_PASSWORD_FILE'::
-Connection type to use. Possible options are 'paramiko' (SSH), 'ssh',
-and 'local'. 'local' is mostly useful for crontab or kickstarts.
+Vault password file.
-*-l* 'SUBSET', *--limit=*'SUBSET'::
+*-v*, *--verbose*::
-Further limits the selected host/group patterns.
+Verbose mode, more output from successful actions will be shown. Give
+up to three times for more output.
-*--list-hosts*::
+*--version*::
-Outputs a list of matching hosts; does not execute anything else.
+Show program's version number and exit.
ENVIRONMENT
-----------
diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1
index 029d1e45bbc..8e9bc6a8f5b 100644
--- a/docs/man/man1/ansible-pull.1
+++ b/docs/man/man1/ansible-pull.1
@@ -2,12 +2,12 @@
.\" Title: ansible
.\" Author: :doctype:manpage
.\" Generator: DocBook XSL Stylesheets v1.78.1
-.\" Date: 05/05/2015
+.\" Date: 07/22/2015
.\" Manual: System administration commands
-.\" Source: Ansible 2.0.0
+.\" Source: Ansible %VERSION%
.\" Language: English
.\"
-.TH "ANSIBLE" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands"
+.TH "ANSIBLE" "1" "07/22/2015" "Ansible %VERSION%" "System administration commands"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -50,14 +50,14 @@ The name of one the YAML format files to run as an ansible playbook\&. This can
.RE
.SH "OPTIONS"
.PP
-\fB\-d\fR \fIDEST\fR, \fB\-\-directory=\fR\fIDEST\fR
+\fB\-\-accept\-host\-key\fR
.RS 4
-Directory to checkout repository into\&. If not provided, a subdirectory of ~/\&.ansible/pull/ will be used\&.
+Adds the hostkey for the repo URL if not already added\&.
.RE
.PP
-\fB\-U\fR \fIURL\fR, \fB\-\-url=\fR\fIURL\fR
+\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR
.RS 4
-URL of the playbook repository to checkout\&.
+Ask for sudo password\&.
.RE
.PP
\fB\-C\fR \fICHECKOUT\fR, \fB\-\-checkout=\fR\fICHECKOUT\fR
@@ -65,11 +65,26 @@ URL of the playbook repository to checkout\&.
Branch/Tag/Commit to checkout\&. If not provided, uses default behavior of module used to check out playbook repository\&.
.RE
.PP
+\fB\-d\fR \fIDEST\fR, \fB\-\-directory=\fR\fIDEST\fR
+.RS 4
+Directory to checkout repository into\&. If not provided, a subdirectory of ~/\&.ansible/pull/ will be used\&.
+.RE
+.PP
+\fB\-e\fR \fIEXTRA_VARS\fR, \fB\-\-extra\-vars=\fR\*(AqEXTRA_VARS*
+.RS 4
+Set additional variables as key=value or YAML/JSON
+.RE
+.PP
\fB\-f\fR, \fB\-\-force\fR
.RS 4
Force running of playbook even if unable to update playbook repository\&. This can be useful, for example, to enforce run\-time state when a network connection may not always be up or possible\&.
.RE
.PP
+\fB\-h\fR, \fB\-\-help\fR
+.RS 4
+Show the help message and exit\&.
+.RE
+.PP
\fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR
.RS 4
The
@@ -77,9 +92,11 @@ The
to the inventory hosts file\&. This can be a relative path within the checkout\&.
.RE
.PP
-\fB\-\-purge\fR
+\fB\-\-key\-file=\fR\fIKEYFILE\fR
.RS 4
-Purge the checkout after the playbook is run\&.
+Pass
+\fI\-i \fR
+to the SSH arguments used by git\&.
.RE
.PP
\fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR
@@ -89,7 +106,37 @@ Module used to checkout playbook repository\&. Defaults to git\&.
.PP
\fB\-o\fR, \fB\-\-only\-if\-changed\fR
.RS 4
-Run the playbook only if the repository has changed
+Only run the playbook if the repository has been updated\&.
+.RE
+.PP
+\fB\-\-purge\fR
+.RS 4
+Purge the checkout after the playbook is run\&.
+.RE
+.PP
+\fB\-s\fR \fISLEEP\fR, \fB\-\-sleep=\fR\fISLEEP\fR
+.RS 4
+Sleep for random interval (between 0 and SLEEP number of seconds) before starting\&. This is a useful way ot disperse git requests\&.
+.RE
+.PP
+\fB\-t\fR \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR
+.RS 4
+Only run plays and tasks tagged with these values\&.
+.RE
+.PP
+\fB\-U\fR \fIURL\fR, \fB\-\-url=\fR\fIURL\fR
+.RS 4
+URL of the playbook repository to checkout\&.
+.RE
+.PP
+\fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR
+.RS 4
+Vault password file\&.
+.RE
+.PP
+\fB\-v\fR, \fB\-\-verbose\fR
+.RS 4
+Pass \-vvv to ansible\-playbook\&.
.RE
.SH "AUTHOR"
.sp
diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in
index d75fc637946..b78b7e67a2b 100644
--- a/docs/man/man1/ansible-pull.1.asciidoc.in
+++ b/docs/man/man1/ansible-pull.1.asciidoc.in
@@ -50,19 +50,26 @@ host hostname and finally a playbook named *local.yml*.
OPTIONS
-------
+*--accept-host-key*::
+
+Adds the hostkey for the repo URL if not already added.
+
+*-K*, *--ask-sudo-pass*::
+
+Ask for sudo password.
+
+*-C* 'CHECKOUT', *--checkout=*'CHECKOUT'::
+
+Branch/Tag/Commit to checkout. If not provided, uses default behavior of module used to check out playbook repository.
+
*-d* 'DEST', *--directory=*'DEST'::
Directory to checkout repository into. If not provided, a subdirectory of
~/.ansible/pull/ will be used.
-*-U* 'URL', *--url=*'URL'::
-
-URL of the playbook repository to checkout.
-
-*-C* 'CHECKOUT', *--checkout=*'CHECKOUT'::
+*-e* 'EXTRA_VARS', *--extra-vars=*'EXTRA_VARS*::
-Branch/Tag/Commit to checkout. If not provided, uses default behavior
-of module used to check out playbook repository.
+Set additional variables as key=value or YAML/JSON
*-f*, *--force*::
@@ -70,14 +77,17 @@ Force running of playbook even if unable to update playbook repository. This
can be useful, for example, to enforce run-time state when a network
connection may not always be up or possible.
+*-h*, *--help*::
+
+Show the help message and exit.
+
*-i* 'PATH', *--inventory=*'PATH'::
-The 'PATH' to the inventory hosts file. This can be a relative path within
-the checkout.
+The 'PATH' to the inventory hosts file. This can be a relative path within the checkout.
-*--purge*::
+*--key-file=*'KEYFILE'::
-Purge the checkout after the playbook is run.
+Pass '-i ' to the SSH arguments used by git.
*-m* 'NAME', *--module-name=*'NAME'::
@@ -85,7 +95,32 @@ Module used to checkout playbook repository. Defaults to git.
*-o*, *--only-if-changed*::
-Run the playbook only if the repository has changed
+Only run the playbook if the repository has been updated.
+
+*--purge*::
+
+Purge the checkout after the playbook is run.
+
+*-s* 'SLEEP', *--sleep=*'SLEEP'::
+
+Sleep for random interval (between 0 and SLEEP number of seconds) before starting. This is a useful way ot disperse git requests.
+
+*-t* 'TAGS', *--tags=*'TAGS'::
+
+Only run plays and tasks tagged with these values.
+
+*-U* 'URL', *--url=*'URL'::
+
+URL of the playbook repository to checkout.
+
+*--vault-password-file=*'VAULT_PASSWORD_FILE'::
+
+Vault password file.
+
+*-v*, *--verbose*::
+
+Pass -vvv to ansible-playbook.
+
AUTHOR
------
diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1
index 102ba7e5b0e..83bfc0500dd 100644
--- a/docs/man/man1/ansible.1
+++ b/docs/man/man1/ansible.1
@@ -2,12 +2,12 @@
.\" Title: ansible
.\" Author: :doctype:manpage
.\" Generator: DocBook XSL Stylesheets v1.78.1
-.\" Date: 05/05/2015
+.\" Date: 07/15/2015
.\" Manual: System administration commands
-.\" Source: Ansible 2.0.0
+.\" Source: Ansible %VERSION%
.\" Language: English
.\"
-.TH "ANSIBLE" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands"
+.TH "ANSIBLE" "1" "07/15/2015" "Ansible %VERSION%" "System administration commands"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -43,9 +43,86 @@ A name of a group in the inventory file, a shell\-like glob selecting hosts in i
.RE
.SH "OPTIONS"
.PP
-\fB\-v\fR, \fB\-\-verbose\fR
+\fB\-a\fR \*(Aq\fIARGUMENTS\fR\*(Aq, \fB\-\-args=\fR\*(Aq\fIARGUMENTS\fR\*(Aq
.RS 4
-Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&.
+The
+\fIARGUMENTS\fR
+to pass to the module\&.
+.RE
+.PP
+\fB\-\-ask\-become\-pass\fR
+.RS 4
+Ask for privilege escalation password\&.
+.RE
+.PP
+\fB\-k\fR, \fB\-\-ask\-pass\fR
+.RS 4
+Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&.
+.RE
+.PP
+\fB\-\-ask\-su\-pass\fR
+.RS 4
+Prompt for su password (deprecated, use become)\&.
+.RE
+.PP
+\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR
+.RS 4
+Prompt for the password to use with \-\-sudo, if any\&.
+.RE
+.PP
+\fB\-\-ask\-vault\-pass\fR
+.RS 4
+Prompt for vault password\&.
+.RE
+.PP
+\fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR
+.RS 4
+Run commands in the background, killing the task after
+\fINUM\fR
+seconds\&.
+.RE
+.PP
+\fB\-\-become\-method=\fR\fIBECOME_METHOD\fR
+.RS 4
+Privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | runas ]
+.RE
+.PP
+\fB\-\-become\-user=\fR\fIBECOME_USER\fR
+.RS 4
+Run operations as this user (default=None)\&.
+.RE
+.PP
+\fB\-C\fR, \fB\-\-check\fR
+.RS 4
+Don\(cqt make any changes; instead try to predict some of the changes that may occur\&.
+.RE
+.PP
+\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR
+.RS 4
+Connection type to use\&. Possible options are
+\fIparamiko\fR
+(SSH),
+\fIssh\fR, and
+\fIlocal\fR\&.
+\fIlocal\fR
+is mostly useful for crontab or kickstarts\&.
+.RE
+.PP
+\fB\-e\fR \fIEXTRA_VARS*, \fR\fI\fB\-\-extra\-vars=\fR\fR\fI\*(AqEXTRA_VARS\fR
+.RS 4
+Set additional variables as key=value or YAML/JSON\&.
+.RE
+.PP
+\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR
+.RS 4
+Level of parallelism\&.
+\fINUM\fR
+is specified as an integer, the default is 5\&.
+.RE
+.PP
+\fB\-h\fR, \fB\-\-help\fR
+.RS 4
+Show help message and exit\&.
.RE
.PP
\fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR
@@ -56,16 +133,19 @@ to the inventory hosts file, which defaults to
\fI/etc/ansible/hosts\fR\&.
.RE
.PP
-\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR
+\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR
.RS 4
-Level of parallelism\&.
-\fINUM\fR
-is specified as an integer, the default is 5\&.
+Further limits the selected host/group patterns\&.
.RE
.PP
-\fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR
+\fB\-l\fR \fI~REGEX\fR, \fB\-\-limit=\fR\fI~REGEX\fR
.RS 4
-Use this file to authenticate the connection\&.
+Further limits hosts with a regex pattern\&.
+.RE
+.PP
+\fB\-\-list\-hosts\fR
+.RS 4
+Outputs a list of matching hosts; does not execute anything else\&.
.RE
.PP
\fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR
@@ -82,26 +162,32 @@ search path to load modules from\&. The default is
\fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&.
.RE
.PP
-\fB\-a\fR \*(Aq\fIARGUMENTS\fR\*(Aq, \fB\-\-args=\fR\*(Aq\fIARGUMENTS\fR\*(Aq
+\fB\-o\fR, \fB\-\-one\-line\fR
.RS 4
-The
-\fIARGUMENTS\fR
-to pass to the module\&.
+Try to output everything on one line\&.
.RE
.PP
-\fB\-k\fR, \fB\-\-ask\-pass\fR
+\fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR
.RS 4
-Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&.
+Poll a background job every
+\fINUM\fR
+seconds\&. Requires
+\fB\-B\fR\&.
.RE
.PP
-\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR
+\fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR
.RS 4
-Prompt for the password to use with \-\-sudo, if any
+Use this file to authenticate the connection\&.
.RE
.PP
-\fB\-o\fR, \fB\-\-one\-line\fR
+\fB\-S\fR, \fB\-\-su\fR
.RS 4
-Try to output everything on one line\&.
+Run operations with su (deprecated, use become)\&.
+.RE
+.PP
+\fB\-R\fR \fISU_USER\fR, \fB\-\-se\-user=\fR\fISUDO_USER\fR
+.RS 4
+Run operations with su as this user (default=root) (deprecated, use become)
.RE
.PP
\fB\-s\fR, \fB\-\-sudo\fR
@@ -109,6 +195,13 @@ Try to output everything on one line\&.
Run the command as the user given by \-u and sudo to root\&.
.RE
.PP
+\fB\-U\fR \fISUDO_USERNAME\fR, \fB\-\-sudo\-user=\fR\fISUDO_USERNAME\fR
+.RS 4
+Sudo to
+\fISUDO_USERNAME\fR
+instead of root\&. Implies \-\-sudo\&.
+.RE
+.PP
\fB\-t\fR \fIDIRECTORY\fR, \fB\-\-tree=\fR\fIDIRECTORY\fR
.RS 4
Save contents in this output
@@ -121,21 +214,6 @@ Connection timeout to use when trying to talk to hosts, in
\fISECONDS\fR\&.
.RE
.PP
-\fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR
-.RS 4
-Run commands in the background, killing the task after
-\fINUM\fR
-seconds\&.
-.RE
-.PP
-\fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR
-.RS 4
-Poll a background job every
-\fINUM\fR
-seconds\&. Requires
-\fB\-B\fR\&.
-.RE
-.PP
\fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR
.RS 4
Use this remote
@@ -143,37 +221,19 @@ Use this remote
instead of the current user\&.
.RE
.PP
-\fB\-U\fR \fISUDO_USERNAME\fR, \fB\-\-sudo\-user=\fR\fISUDO_USERNAME\fR
+\fB\-\-vault\-password\-file=\fR\fIVAULT_PASSWORD_FILE\fR
.RS 4
-Sudo to
-\fISUDO_USERNAME\fR
-instead of root\&. Implies \-\-sudo\&.
+Vault password file\&.
.RE
.PP
-\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR
-.RS 4
-Connection type to use\&. Possible options are
-\fIparamiko\fR
-(SSH),
-\fIssh\fR, and
-\fIlocal\fR\&.
-\fIlocal\fR
-is mostly useful for crontab or kickstarts\&.
-.RE
-.PP
-\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR
-.RS 4
-Further limits the selected host/group patterns\&.
-.RE
-.PP
-\fB\-l\fR \fI~REGEX\fR, \fB\-\-limit=\fR\fI~REGEX\fR
+\fB\-v\fR, \fB\-\-verbose\fR
.RS 4
-Further limits hosts with a regex pattern\&.
+Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&.
.RE
.PP
-\fB\-\-list\-hosts\fR
+\fB\-\-version\fR
.RS 4
-Outputs a list of matching hosts; does not execute anything else\&.
+Show program version number and exit\&.
.RE
.SH "INVENTORY"
.sp
diff --git a/docs/man/man1/ansible.1.asciidoc.in b/docs/man/man1/ansible.1.asciidoc.in
index f0f81b7d9bd..26bd0144d4e 100644
--- a/docs/man/man1/ansible.1.asciidoc.in
+++ b/docs/man/man1/ansible.1.asciidoc.in
@@ -34,56 +34,119 @@ semicolons.
OPTIONS
-------
-*-v*, *--verbose*::
+*-a* \'_ARGUMENTS_', *--args=*\'_ARGUMENTS_'::
-Verbose mode, more output from successful actions will be shown. Give
-up to three times for more output.
+The 'ARGUMENTS' to pass to the module.
-*-i* 'PATH', *--inventory=*'PATH'::
+*--ask-become-pass*::
-The 'PATH' to the inventory hosts file, which defaults to '/etc/ansible/hosts'.
+Ask for privilege escalation password.
+
+*-k*, *--ask-pass*::
+
+Prompt for the SSH password instead of assuming key-based authentication with ssh-agent.
+
+*--ask-su-pass*::
+
+Prompt for su password (deprecated, use become).
+
+*-K*, *--ask-sudo-pass*::
+
+Prompt for the password to use with --sudo, if any.
+
+*--ask-vault-pass*::
+
+Prompt for vault password.
+
+*-B* 'NUM', *--background=*'NUM'::
+
+Run commands in the background, killing the task after 'NUM' seconds.
+
+*--become-method=*'BECOME_METHOD'::
+
+Privilege escalation method to use (default=sudo),
+valid choices: [ sudo | su | pbrun | pfexec | runas ]
+
+*--become-user=*'BECOME_USER'::
+Run operations as this user (default=None).
+
+*-C*, *--check*::
+
+Don't make any changes; instead try to predict some of the changes that may occur.
+
+*-c* 'CONNECTION', *--connection=*'CONNECTION'::
+
+Connection type to use. Possible options are 'paramiko' (SSH), 'ssh',
+and 'local'. 'local' is mostly useful for crontab or kickstarts.
+
+*-e* 'EXTRA_VARS*, *--extra-vars=*'EXTRA_VARS'::
+
+Set additional variables as key=value or YAML/JSON.
*-f* 'NUM', *--forks=*'NUM'::
Level of parallelism. 'NUM' is specified as an integer, the default is 5.
-*--private-key=*'PRIVATE_KEY_FILE'::
+*-h*, *--help*::
-Use this file to authenticate the connection.
+Show help message and exit.
+
+*-i* 'PATH', *--inventory=*'PATH'::
+
+The 'PATH' to the inventory hosts file, which defaults to '/etc/ansible/hosts'.
+
+*-l* 'SUBSET', *--limit=*'SUBSET'::
+
+Further limits the selected host/group patterns.
+
+*-l* '\~REGEX', *--limit=*'~REGEX'::
+Further limits hosts with a regex pattern.
+
+*--list-hosts*::
+
+Outputs a list of matching hosts; does not execute anything else.
*-m* 'NAME', *--module-name=*'NAME'::
Execute the module called 'NAME'.
-
*-M* 'DIRECTORY', *--module-path=*'DIRECTORY'::
The 'DIRECTORY' search path to load modules from. The default is
'/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY
environment variable.
-*-a* \'_ARGUMENTS_', *--args=*\'_ARGUMENTS_'::
+*-o*, *--one-line*::
-The 'ARGUMENTS' to pass to the module.
+Try to output everything on one line.
-*-k*, *--ask-pass*::
+*-P* 'NUM', *--poll=*'NUM'::
-Prompt for the SSH password instead of assuming key-based authentication with ssh-agent.
+Poll a background job every 'NUM' seconds. Requires *-B*.
-*-K*, *--ask-sudo-pass*::
+*--private-key=*'PRIVATE_KEY_FILE'::
-Prompt for the password to use with --sudo, if any
+Use this file to authenticate the connection.
-*-o*, *--one-line*::
+*-S*, *--su*::
-Try to output everything on one line.
+Run operations with su (deprecated, use become).
+
+*-R* 'SU_USER', *--se-user=*'SUDO_USER'::
+
+Run operations with su as this user (default=root)
+(deprecated, use become)
*-s*, *--sudo*::
Run the command as the user given by -u and sudo to root.
+*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME'::
+
+Sudo to 'SUDO_USERNAME' instead of root. Implies --sudo.
+
*-t* 'DIRECTORY', *--tree=*'DIRECTORY'::
Save contents in this output 'DIRECTORY', with the results saved in a
@@ -93,38 +156,22 @@ file named after each host.
Connection timeout to use when trying to talk to hosts, in 'SECONDS'.
-*-B* 'NUM', *--background=*'NUM'::
-
-Run commands in the background, killing the task after 'NUM' seconds.
-
-*-P* 'NUM', *--poll=*'NUM'::
-
-Poll a background job every 'NUM' seconds. Requires *-B*.
-
*-u* 'USERNAME', *--user=*'USERNAME'::
Use this remote 'USERNAME' instead of the current user.
-*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME'::
-
-Sudo to 'SUDO_USERNAME' instead of root. Implies --sudo.
-
-*-c* 'CONNECTION', *--connection=*'CONNECTION'::
-
-Connection type to use. Possible options are 'paramiko' (SSH), 'ssh',
-and 'local'. 'local' is mostly useful for crontab or kickstarts.
-
-*-l* 'SUBSET', *--limit=*'SUBSET'::
+*--vault-password-file=*'VAULT_PASSWORD_FILE'::
-Further limits the selected host/group patterns.
+Vault password file.
-*-l* '\~REGEX', *--limit=*'~REGEX'::
+*-v*, *--verbose*::
-Further limits hosts with a regex pattern.
+Verbose mode, more output from successful actions will be shown. Give
+up to three times for more output.
-*--list-hosts*::
+*--version*::
-Outputs a list of matching hosts; does not execute anything else.
+Show program version number and exit.
INVENTORY
---------
diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html
index b6422f9a2dd..b70cfde7ad8 100644
--- a/docsite/_themes/srtd/footer.html
+++ b/docsite/_themes/srtd/footer.html
@@ -20,6 +20,6 @@
{%- endif %}
-Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs . {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %}
+Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs . {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %}
diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html
index b9d9d065c7b..74fb6008ba0 100644
--- a/docsite/_themes/srtd/layout.html
+++ b/docsite/_themes/srtd/layout.html
@@ -113,23 +113,8 @@
}
-
-
-
-
-
-
-
-
-
+
@@ -140,16 +125,32 @@
+
+
+
+ Documentation
+
+
+
{# SIDE NAV, TOGGLES ON MOBILE #}
-
-
-
-
+
+
+
-
+
+
{% include "searchbox.html" %}
@@ -200,8 +201,8 @@
-
-
+
+
diff --git a/docsite/_themes/srtd/static/css/theme.css b/docsite/_themes/srtd/static/css/theme.css
index dcad941b9cf..29a1c6ba57c 100644
--- a/docsite/_themes/srtd/static/css/theme.css
+++ b/docsite/_themes/srtd/static/css/theme.css
@@ -4634,3 +4634,83 @@ span[id*='MathJax-Span'] {
#search-box-id {
padding-right: 25px;
}
+
+.DocSiteProduct-header {
+ flex: 1;
+ -webkit-flex: 1;
+ padding: 20px;
+ padding-top: 10px;
+ padding-bottom: 20px;
+ display: flex;
+ display: -webkit-flex;
+ flex-direction: column;
+ -webkit-flex-direction: column;
+ align-items: center;
+ -webkit-align-items: center;
+ justify-content: flex-start;
+ -webkit-justify-content: flex-start;
+ margin-left: 20px;
+ margin-right: 20px;
+ text-decoration: none;
+ font-weight: 400;
+ font-family: 'Open Sans', sans-serif;
+}
+
+.DocSiteProduct-header:active,
+.DocSiteProduct-header:focus {
+ color: #fff;
+}
+
+.DocSiteProduct-header:visited {
+ color: #fff;
+}
+
+.DocSiteProduct-header--core {
+ font-size: 25px;
+ background-color: #5bbdbf;
+ border: 2px solid #5bbdbf;
+ border-top-left-radius: 4px;
+ border-top-right-radius: 4px;
+ color: #fff;
+}
+
+.DocSiteProduct-headerAlign {
+ width: 100%;
+}
+
+.DocSiteProduct-logo {
+ width: 60px;
+ height: 60px;
+ margin-bottom: -9px;
+}
+
+.DocSiteProduct-logoText {
+ margin-top: 6px;
+ font-size: 25px;
+}
+
+.DocSite-nav {
+ flex: initial;
+ -webkit-flex: initial;
+ display: flex;
+ display: -webkit-flex;
+ flex-direction: row;
+ -webkit-flex-direction: row;
+ justify-content: flex-start;
+ -webkit-justify-content: flex-start;
+ padding: 15px;
+ background-color: #000;
+ text-decoration: none;
+ font-family: 'Open Sans', sans-serif;
+}
+
+.DocSiteNav-logo {
+ width: 28px;
+ height: 28px;
+ margin-right: 8px;
+}
+
+.DocSiteNav-title {
+ color: #fff;
+ font-size: 20px;
+}
diff --git a/docsite/_themes/srtd/static/images/logo_invert.png b/docsite/_themes/srtd/static/images/logo_invert.png
new file mode 100644
index 00000000000..ea565b7550b
Binary files /dev/null and b/docsite/_themes/srtd/static/images/logo_invert.png differ
diff --git a/docsite/conf.py b/docsite/conf.py
index 61c6de5c11e..95bc1fb8328 100644
--- a/docsite/conf.py
+++ b/docsite/conf.py
@@ -55,7 +55,7 @@ master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
-copyright = "2013 Ansible, Inc"
+copyright = "2013-2015 Ansible, Inc"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
@@ -100,6 +100,8 @@ exclude_patterns = ['modules']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
+highlight_language = 'YAML'
+
# Options for HTML output
# -----------------------
diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst
index d3eb8435231..76683f6ba3b 100644
--- a/docsite/rst/YAMLSyntax.rst
+++ b/docsite/rst/YAMLSyntax.rst
@@ -107,7 +107,7 @@ with a "{", YAML will think it is a dictionary, so you must quote it, like so::
Learn what playbooks can do and how to write/run them.
`YAMLLint
`_
YAML Lint (online) helps you debug YAML syntax if you are having problems
- `Github examples directory
`_
+ `Github examples directory `_
Complete playbook files from the github project source
`Mailing List `_
Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst
index ca639c01f0d..0d09e4116ed 100644
--- a/docsite/rst/become.rst
+++ b/docsite/rst/become.rst
@@ -49,7 +49,7 @@ New command line options
--ask-become-pass
ask for privilege escalation password
--b, --become
+--become,-b
run operations with become (no password implied)
--become-method=BECOME_METHOD
diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst
index 561e214bd9d..5cac69fe9a1 100644
--- a/docsite/rst/community.rst
+++ b/docsite/rst/community.rst
@@ -62,11 +62,11 @@ I'd Like To Report A Bug
Ansible practices responsible disclosure - if this is a security related bug, email `security@ansible.com `_ instead of filing a ticket or posting to the Google Group and you will receive a prompt response.
-Bugs related to the core language should be reported to `github.com/ansible/ansible `_ after
-signing up for a free github account. Before reporting a bug, please use the bug/issue search
-to see if the issue has already been reported.
+Bugs related to the core language should be reported to `github.com/ansible/ansible `_ after
+signing up for a free github account. Before reporting a bug, please use the bug/issue search
+to see if the issue has already been reported.
-MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module.
+MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module.
When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against.
@@ -132,9 +132,9 @@ Modules are some of the easiest places to get started.
Contributing Code (Features or Bugfixes)
----------------------------------------
-The Ansible project keeps its source on github at `github.com/ansible/ansible `_ for
-the core application, and two sub repos `github.com/ansible/ansible-modules-core `_
-and `ansible/ansible-modules-extras `_ for module related items.
+The Ansible project keeps its source on github at `github.com/ansible/ansible `_ for
+the core application, and two sub repos `github.com/ansible/ansible-modules-core `_
+and `ansible/ansible-modules-extras `_ for module related items.
If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module.
The project takes contributions through `github pull requests `_.
diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst
index ddd4e90c82a..d3a6562a945 100644
--- a/docsite/rst/developing_modules.rst
+++ b/docsite/rst/developing_modules.rst
@@ -18,7 +18,7 @@ The directory "./library", alongside your top level playbooks, is also automatic
added as a search directory.
Should you develop an interesting Ansible module, consider sending a pull request to the
-`modules-extras project `_. There's also a core
+`modules-extras project `_. There's also a core
repo for more established and widely used modules. "Extras" modules may be promoted to core periodically,
but there's no fundamental difference in the end - both ship with ansible, all in one package, regardless
of how you acquire ansible.
@@ -238,7 +238,8 @@ The 'group' and 'user' modules are reasonably non-trivial and showcase what this
Key parts include always ending the module file with::
from ansible.module_utils.basic import *
- main()
+ if __name__ == '__main__':
+ main()
And instantiating the module class like::
@@ -291,7 +292,7 @@ will evaluate to True when check mode is enabled. For example::
)
if module.check_mode:
- # Check if any changes would be made by don't actually make those changes
+ # Check if any changes would be made but don't actually make those changes
module.exit_json(changed=check_if_system_state_would_be_changed())
Remember that, as module developer, you are responsible for ensuring that no
@@ -342,7 +343,7 @@ and guidelines:
* If packaging modules in an RPM, they only need to be installed on the control machine and should be dropped into /usr/share/ansible. This is entirely optional and up to you.
-* Modules should output valid JSON only. All return types must be hashes (dictionaries) although they can be nested. Lists or simple scalar values are not supported, though they can be trivially contained inside a dictionary.
+* Modules must output valid JSON only. The toplevel return type must be a hash (dictionary) although they can be nested. Lists or simple scalar values are not supported, though they can be trivially contained inside a dictionary.
* In the event of failure, a key of 'failed' should be included, along with a string explanation in 'msg'. Modules that raise tracebacks (stacktraces) are generally considered 'poor' modules, though Ansible can deal with these returns and will automatically convert anything unparseable into a failed result. If you are using the AnsibleModule common Python code, the 'failed' element will be included for you automatically when you call 'fail_json'.
@@ -370,7 +371,7 @@ See an example documentation string in the checkout under `examples/DOCUMENTATIO
Include it in your module file like this::
- #!/usr/bin/env python
+ #!/usr/bin/python
# Copyright header....
DOCUMENTATION = '''
@@ -464,7 +465,7 @@ Module checklist
* Requirements should be documented, using the `requirements=[]` field
* Author should be set, name and github id at least
* Made use of U() for urls, C() for files and options, I() for params, M() for modules?
- * GPL License header
+ * GPL 3 License header
* Does module use check_mode? Could it be modified to use it? Document it
* Examples: make sure they are reproducible
* Return: document the return structure of the module
@@ -483,11 +484,72 @@ Module checklist
* The return structure should be consistent, even if NA/None are used for keys normally returned under other options.
* Are module actions idempotent? If not document in the descriptions or the notes
* Import module snippets `from ansible.module_utils.basic import *` at the bottom, conserves line numbers for debugging.
+* Call your :func:`main` from a conditional so that it would be possible to
+ test them in the future example::
+
+ if __name__ == '__main__':
+ main()
+
* Try to normalize parameters with other modules, you can have aliases for when user is more familiar with underlying API name for the option
* Being pep8 compliant is nice, but not a requirement. Specifically, the 80 column limit now hinders readability more that it improves it
* Avoid '`action`/`command`', they are imperative and not declarative, there are other ways to express the same thing
* Sometimes you want to split the module, specially if you are adding a list/info state, you want a _facts version
-* If you are asking 'how can i have a module execute other modules' ... you want to write a role
+* If you are asking 'how can I have a module execute other modules' ... you want to write a role
+* Return values must be able to be serialized as json via the python stdlib
+ json library. basic python types (strings, int, dicts, lists, etc) are
+ serializable. A common pitfall is to try returning an object via
+ exit_json(). Instead, convert the fields you need from the object into the
+ fields of a dictionary and return the dictionary.
+* Do not use urllib2 to handle urls. urllib2 does not natively verify TLS
+ certificates and so is insecure for https. Instead, use either fetch_url or
+ open_url from ansible.module_utils.urls.
+
+Windows modules checklist
+`````````````````````````
+* Favour native powershell and .net ways of doing things over calls to COM libraries or calls to native executables which may or may not be present in all versions of windows
+* modules are in powershell (.ps1 files) but the docs reside in same name python file (.py)
+* look at ansible/lib/ansible/module_utils/powershell.ps1 for commmon code, avoid duplication
+* start with::
+
+ #!powershell
+
+then::
+
+then::
+ # WANT_JSON
+ # POWERSHELL_COMMON
+
+* Arguments:
+ * Try and use state present and state absent like other modules
+ * You need to check that all your mandatory args are present::
+
+ If ($params.state) {
+ $state = $params.state.ToString().ToLower()
+ If (($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted')) {
+ Fail-Json $result "state is '$state'; must be 'started', 'stopped', or 'restarted'"
+ }
+ }
+
+ * Look at existing modules for more examples of argument checking.
+
+* Results
+ * The result object should allways contain an attribute called changed set to either $true or $false
+ * Create your result object like this::
+
+ $result = New-Object psobject @{
+ changed = $false
+ other_result_attribute = $some_value
+ };
+
+ If all is well, exit with a
+ Exit-Json $result
+
+ * Ensure anything you return, including errors can be converted to json.
+ * Be aware that because exception messages could contain almost anything.
+ * ConvertTo-Json will fail if it encounters a trailing \ in a string.
+ * If all is not well use Fail-Json to exit.
+
+* Have you tested for powershell 3.0 and 4.0 compliance?
Deprecating and making module aliases
diff --git a/docsite/rst/developing_plugins.rst b/docsite/rst/developing_plugins.rst
index a54e8830f21..97be451b545 100644
--- a/docsite/rst/developing_plugins.rst
+++ b/docsite/rst/developing_plugins.rst
@@ -21,7 +21,7 @@ Carrier Pigeon?) it's as simple as copying the format of one of the existing mod
directory. The value of 'smart' for a connection allows selection of paramiko or openssh based on system capabilities, and chooses
'ssh' if OpenSSH supports ControlPersist, in Ansible 1.2.1 an later. Previous versions did not support 'smart'.
-More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/runner/connection_plugins `_ and figure things out pretty easily.
+More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/plugins/connections `_ and figure things out pretty easily.
.. _developing_lookup_plugins:
@@ -30,7 +30,7 @@ Lookup Plugins
Language constructs like "with_fileglob" and "with_items" are implemented via lookup plugins. Just like other plugin types, you can write your own.
-More documentation on writing lookup plugins is pending, though you can jump into `lib/ansible/runner/lookup_plugins `_ and figure
+More documentation on writing lookup plugins is pending, though you can jump into `lib/ansible/plugins/lookup `_ and figure
things out pretty easily.
.. _developing_vars_plugins:
@@ -54,7 +54,7 @@ Filter Plugins
If you want more Jinja2 filters available in a Jinja2 template (filters like to_yaml and to_json are provided by default), they can be extended by writing a filter plugin. Most of the time, when someone comes up with an idea for a new filter they would like to make available in a playbook, we'll just include them in 'core.py' instead.
-Jump into `lib/ansible/runner/filter_plugins/ `_ for details.
+Jump into `lib/ansible/plugins/filter `_ for details.
.. _developing_callbacks:
@@ -68,17 +68,17 @@ Callbacks are one of the more interesting plugin types. Adding additional callb
Examples
++++++++
-Example callbacks are shown in `plugins/callbacks `_.
+Example callbacks are shown in `lib/ansible/plugins/callback `_.
The `log_plays
-`_
+`_
callback is an example of how to intercept playbook events to a log
file, and the `mail
-`_
+`_
callback sends email when playbooks complete.
The `osx_say
-`_
+`_
callback provided is particularly entertaining -- it will respond with
computer synthesized speech on OS X in relation to playbook events,
and is guaranteed to entertain and/or annoy coworkers.
diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst
index ba3ae1264ff..faac872fad7 100644
--- a/docsite/rst/faq.rst
+++ b/docsite/rst/faq.rst
@@ -11,7 +11,7 @@ How can I set the PATH or any other environment variable for a task or entire pl
Setting environment variables can be done with the `environment` keyword. It can be used at task or playbook level::
environment:
- PATH: {{ ansible_env.PATH }}:/thingy/bin
+ PATH: "{{ ansible_env.PATH }}:/thingy/bin"
SOME: value
diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst
index d7639848a61..808e3e42356 100644
--- a/docsite/rst/galaxy.rst
+++ b/docsite/rst/galaxy.rst
@@ -8,7 +8,7 @@ Ansible Galaxy
The Website
```````````
-The website `Ansible Galaxy `_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects.
+The website `Ansible Galaxy `_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects.
You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later.
diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst
index c4e12eab497..e0d0c12630d 100644
--- a/docsite/rst/guide_aws.rst
+++ b/docsite/rst/guide_aws.rst
@@ -13,7 +13,7 @@ Requirements for the AWS modules are minimal.
All of the modules require and are tested against recent versions of boto. You'll need this Python module installed on your control machine. Boto can be installed from your OS distribution or python's "pip install boto".
-Whereas classically ansible will execute tasks in it's host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control.
+Whereas classically ansible will execute tasks in its host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control.
In your playbook steps we'll typically be using the following pattern for provisioning steps::
@@ -214,7 +214,7 @@ AWS Image Building With Ansible
```````````````````````````````
Many users may want to have images boot to a more complete configuration rather than configuring them entirely after instantiation. To do this,
-one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get it's own AMI ID for usage with
+one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get its own AMI ID for usage with
the ec2 module or other Ansible AWS modules such as ec2_asg or the cloudformation module. Possible tools include Packer, aminator, and Ansible's
ec2_ami module.
diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst
index ed236544a3d..c689632818e 100644
--- a/docsite/rst/guide_gce.rst
+++ b/docsite/rst/guide_gce.rst
@@ -22,7 +22,7 @@ The GCE modules all require the apache-libcloud module, which you can install fr
Credentials
-----------
-To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command:
+To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded (you must click **Generate new P12 Key**) the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command:
.. code-block:: bash
@@ -79,6 +79,8 @@ Create a file ``secrets.py`` looking like following, and put it in some folder w
GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem')
GCE_KEYWORD_PARAMS = {'project': 'project_id'}
+Ensure to enter the email address from the created services account and not the one from your main account.
+
Now the modules can be used as above, but the account information can be omitted.
GCE Dynamic Inventory
@@ -86,9 +88,9 @@ GCE Dynamic Inventory
The best way to interact with your hosts is to use the gce inventory plugin, which dynamically queries GCE and tells Ansible what nodes can be managed.
-Note that when using the inventory script ``gce.py``, you also need to populate the ``gce.ini`` file that you can find in the plugins/inventory directory of the ansible checkout.
+Note that when using the inventory script ``gce.py``, you also need to populate the ``gce.ini`` file that you can find in the contrib/inventory directory of the ansible checkout.
-To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugins/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script.
+To use the GCE dynamic inventory script, copy ``gce.py`` from ``contrib/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script.
Let's see if inventory is working:
@@ -109,7 +111,7 @@ Now let's see if we can use the inventory script to talk to Google.
"x.x.x.x"
],
-As with all dynamic inventory plugins in Ansible, you can configure the inventory path in ansible.cfg. The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. This can allow for cloud inventory to be used alongside local inventory (such as a physical datacenter) or machines running in different providers.
+As with all dynamic inventory scripts in Ansible, you can configure the inventory path in ansible.cfg. The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. This can allow for cloud inventory to be used alongside local inventory (such as a physical datacenter) or machines running in different providers.
Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead of an individual file will cause ansible to evaluate each file in that directory for inventory.
diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst
index 2a2f415e698..b1cc347eb13 100644
--- a/docsite/rst/guide_rax.rst
+++ b/docsite/rst/guide_rax.rst
@@ -6,7 +6,7 @@ Rackspace Cloud Guide
Introduction
````````````
-.. note:: This section of the documentation is under construction. We are in the process of adding more examples about the Rackspace modules and how they work together. Once complete, there will also be examples for Rackspace Cloud in `ansible-examples `_.
+.. note:: This section of the documentation is under construction. We are in the process of adding more examples about the Rackspace modules and how they work together. Once complete, there will also be examples for Rackspace Cloud in `ansible-examples `_.
Ansible contains a number of core modules for interacting with Rackspace Cloud.
@@ -131,7 +131,7 @@ The rax module returns data about the nodes it creates, like IP addresses, hostn
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
- groupname: raxhosts
+ groups: raxhosts
with_items: rax.success
when: rax.action == 'create'
@@ -519,7 +519,7 @@ Build a complete webserver environment with servers, custom networks and load ba
ansible_ssh_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
ansible_ssh_user: root
- groupname: web
+ groups: web
with_items: rax.success
when: rax.action == 'create'
diff --git a/docsite/rst/guides.rst b/docsite/rst/guides.rst
index bcaefc83da2..c9b7c03ccec 100644
--- a/docsite/rst/guides.rst
+++ b/docsite/rst/guides.rst
@@ -11,7 +11,6 @@ This section is new and evolving. The idea here is explore particular use cases
guide_gce
guide_vagrant
guide_rolling_upgrade
- test_strategies
Pending topics may include: Docker, Jenkins, Google Compute Engine, Linode/DigitalOcean, Continuous Deployment, and more.
diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst
index 1afa47db87d..936a485c9e4 100644
--- a/docsite/rst/index.rst
+++ b/docsite/rst/index.rst
@@ -9,14 +9,16 @@ Welcome to the Ansible documentation!
Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks
such as continuous deployments or zero downtime rolling updates.
-Ansible's goals are foremost those of simplicity and maximum ease of use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with an accelerated socket mode and pull modes as alternatives), and a language that is designed around auditability by humans -- even those not familiar with the program.
+Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with an accelerated socket mode and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program.
-We believe simplicity is relevant to all sizes of environments and design for busy users of all types -- whether this means developers, sysadmins, release engineers, IT managers, and everywhere in between. Ansible is appropriate for managing small setups with a handful of instances as well as enterprise environments with many thousands.
+We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all environments, from small setups with a handful of instances to enterprise environments with many thousands of instances.
-Ansible manages machines in an agentless manner. There is never a question of how to
-upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems.
+Ansible manages machines in an agent-less manner. There is never a question of how to
+upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems.
-This documentation covers the current released version of Ansible (1.8.4) and also some development version features (1.9). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release.
+This documentation covers the current released version of Ansible (1.9.1) and also some development version features (2.0). For recent features, we note in each section the version of Ansible where the feature was added.
+
+Ansible, Inc. releases a new major release of Ansible approximately every two months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. However, the community around new modules and plugins being developed and contributed moves very quickly, typically adding 20 or so new modules in each release.
.. _an_introduction:
diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst
index 2ff53c22485..466e37fcbfe 100644
--- a/docsite/rst/intro_configuration.rst
+++ b/docsite/rst/intro_configuration.rst
@@ -1,5 +1,5 @@
-The Ansible Configuration File
-++++++++++++++++++++++++++++++
+Configuration file
+++++++++++++++++++
.. contents:: Topics
@@ -144,6 +144,27 @@ different locations::
Most users will not need to use this feature. See :doc:`developing_plugins` for more details
+.. _stdout_callback:
+
+stdout_callback
+===============
+
+.. versionadded:: 2.0
+
+This setting allows you to override the default stdout callback for ansible-playbook.
+
+.. _callback_whitelist:
+
+callback_whitelist
+==================
+
+.. versionadded:: 2.0
+
+Now ansible ships with all included callback plugins ready to use but they are disabled by default,
+this setting lets you enable a list of additional callbacks, this cannot change or override the
+default stdout callback, use :ref:`stdout_callback` for that.
+
+
.. _command_warnings:
command_warnings
@@ -224,8 +245,7 @@ or ansible action line exactly as written.
executable
==========
-This indicates the command to use to spawn a shell under a sudo environment. Users may need to change this in
-rare instances to /bin/bash in rare instances when sudo is constrained, but in most cases it may be left as is::
+This indicates the command to use to spawn a shell under a sudo environment. Users may need to change this to /bin/bash in rare instances when sudo is constrained, but in most cases it may be left as is::
executable = /bin/bash
@@ -286,9 +306,10 @@ gathering
New in 1.6, the 'gathering' setting controls the default policy of facts gathering (variables discovered about remote systems).
-The value 'implicit' is the default, meaning facts will be gathered per play unless 'gather_facts: False' is set in the play. The value 'explicit' is the inverse, facts will not be gathered unless directly requested in the play.
-
-The value 'smart' means each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the playbook run. This option can be useful for those wishing to save fact gathering time.
+The value 'implicit' is the default, which means that the fact cache will be ignored and facts will be gathered per play unless 'gather_facts: False' is set.
+The value 'explicit' is the inverse, facts will not be gathered unless directly requested in the play.
+The value 'smart' means each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the playbook run.
+This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the fact cache.
hash_behaviour
==============
@@ -309,7 +330,7 @@ The valid values are either 'replace' (the default) or 'merge'.
hostfile
========
-This is a deprecated setting since 1.9, please look at :ref:`inventory` for the new setting.
+This is a deprecated setting since 1.9, please look at :ref:`inventory_file` for the new setting.
.. _host_key_checking:
@@ -321,7 +342,7 @@ implications and wish to disable it, you may do so here by setting the value to
host_key_checking=True
-.. _inventory:
+.. _inventory_file:
inventory
=========
@@ -680,7 +701,7 @@ If set, this will pass a specific set of options to Ansible rather than Ansible'
ssh_args = -o ControlMaster=auto -o ControlPersist=60s
In particular, users may wish to raise the ControlPersist time to encourage performance. A value of 30 minutes may
-be appropriate.
+be appropriate. If `ssh_args` is set, the default ``control_path`` setting is not used.
.. _control_path:
@@ -700,7 +721,7 @@ may wish to shorten the string to something like the below::
Ansible 1.4 and later will instruct users to run with "-vvvv" in situations where it hits this problem
and if so it is easy to tell there is too long of a Control Path filename. This may be frequently
-encountered on EC2.
+encountered on EC2. This setting is ignored if ``ssh_args`` is set.
.. _scp_if_ssh:
@@ -802,3 +823,19 @@ If enabled, this setting allows multiple private keys to be uploaded to the daem
New clients first connect to the target node over SSH to upload the key, which is done via a local socket file, so they must have the same access as the user that launched the daemon originally.
+.. _selinux_settings:
+
+Selinux Specific Settings
+-------------------------
+
+These are settings that control SELinux interactions.
+
+
+special_context_filesystems
+===========================
+
+.. versionadded:: 1.9
+
+This is a list of file systems that require special treatment when dealing with security context.
+The normal behaviour is for operations to copy the existing context or use the user default, this changes it to use a file system dependent context.
+The default list is: nfs,vboxsf,fuse,ramfs
diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst
index 00023a4ccae..5b634d86cd9 100644
--- a/docsite/rst/intro_dynamic_inventory.rst
+++ b/docsite/rst/intro_dynamic_inventory.rst
@@ -12,7 +12,7 @@ in a different software system. Ansible provides a basic text-based system as d
Frequent examples include pulling inventory from a cloud provider, LDAP, `Cobbler `_,
or a piece of expensive enterprisey CMDB software.
-Ansible easily supports all of these options via an external inventory system. The plugins directory contains some of these already -- including options for EC2/Eucalyptus, Rackspace Cloud, and OpenStack, examples of some of which will be detailed below.
+Ansible easily supports all of these options via an external inventory system. The contrib/inventory directory contains some of these already -- including options for EC2/Eucalyptus, Rackspace Cloud, and OpenStack, examples of some of which will be detailed below.
:doc:`tower` also provides a database to store inventory results that is both web and REST Accessible. Tower syncs with all Ansible dynamic inventory sources you might be using, and also includes a graphical inventory editor. By having a database record of all of your hosts, it's easy to correlate past event history and see which ones have had failures on their last playbook runs.
@@ -30,7 +30,7 @@ While primarily used to kickoff OS installations and manage DHCP and DNS, Cobble
layer that allows it to represent data for multiple configuration management systems (even at the same time), and has
been referred to as a 'lightweight CMDB' by some admins.
-To tie Ansible's inventory to Cobbler (optional), copy `this script `_ to /etc/ansible and `chmod +x` the file. cobblerd will now need
+To tie Ansible's inventory to Cobbler (optional), copy `this script `_ to /etc/ansible and `chmod +x` the file. cobblerd will now need
to be running when you are using Ansible and you'll need to use Ansible's ``-i`` command line option (e.g. ``-i /etc/ansible/cobbler.py``).
This particular script will communicate with Cobbler using Cobbler's XMLRPC API.
@@ -80,14 +80,14 @@ So in other words, you can use those variables in arguments/actions as well.
Example: AWS EC2 External Inventory Script
``````````````````````````````````````````
-If you use Amazon Web Services EC2, maintaining an inventory file might not be the best approach, because hosts may come and go over time, be managed by external applications, or you might even be using AWS autoscaling. For this reason, you can use the `EC2 external inventory `_ script.
+If you use Amazon Web Services EC2, maintaining an inventory file might not be the best approach, because hosts may come and go over time, be managed by external applications, or you might even be using AWS autoscaling. For this reason, you can use the `EC2 external inventory `_ script.
You can use this script in one of two ways. The easiest is to use Ansible's ``-i`` command line option and specify the path to the script after
marking it executable::
ansible -i ec2.py -u ubuntu us-east-1d -m ping
-The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally.
+The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally.
To successfully make an API call to AWS, you will need to configure Boto (the Python interface to AWS). There are a `variety of methods `_ available, but the simplest is just to export two environment variables::
@@ -96,7 +96,7 @@ To successfully make an API call to AWS, you will need to configure Boto (the Py
You can test the script by itself to make sure your config is correct::
- cd plugins/inventory
+ cd contrib/inventory
./ec2.py --list
After a few moments, you should see your entire EC2 inventory across all regions in JSON.
@@ -185,7 +185,7 @@ Both ``ec2_security_group_ids`` and ``ec2_security_group_names`` are comma-separ
To see the complete list of variables available for an instance, run the script by itself::
- cd plugins/inventory
+ cd contrib/inventory
./ec2.py --host ec2-12-12-12-12.compute-1.amazonaws.com
Note that the AWS inventory script will cache results to avoid repeated API calls, and this cache setting is configurable in ec2.ini. To
@@ -210,7 +210,7 @@ In addition to Cobbler and EC2, inventory scripts are also available for::
Vagrant (not to be confused with the provisioner in vagrant, which is preferred)
Zabbix
-Sections on how to use these in more detail will be added over time, but by looking at the "plugins/" directory of the Ansible checkout
+Sections on how to use these in more detail will be added over time, but by looking at the "contrib/inventory" directory of the Ansible checkout
it should be very obvious how to use them. The process for the AWS inventory script is the same.
If you develop an interesting inventory script that might be general purpose, please submit a pull request -- we'd likely be glad
diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst
index 6dc91c32bbc..472c158eac2 100644
--- a/docsite/rst/intro_installation.rst
+++ b/docsite/rst/intro_installation.rst
@@ -8,8 +8,8 @@ Installation
Getting Ansible
```````````````
-You may also wish to follow the `Github project `_ if
-you have a github account. This is also where we keep the issue tracker for sharing
+You may also wish to follow the `GitHub project `_ if
+you have a GitHub account. This is also where we keep the issue tracker for sharing
bugs and feature ideas.
.. _what_will_be_installed:
@@ -126,7 +126,7 @@ If you don't have pip installed in your version of Python, install pip::
Ansible also uses the following Python modules that need to be installed::
- $ sudo pip install paramiko PyYAML Jinja2 httplib2
+ $ sudo pip install paramiko PyYAML Jinja2 httplib2 six
Note when updating ansible, be sure to not only update the source tree, but also the "submodules" in git
which point at Ansible's own modules (not the same kind of modules, alas).
@@ -147,7 +147,7 @@ other than /etc/ansible/hosts:
.. note::
- ANSIBLE_INVENTORY is available starting at 1.9 and subtitutes the deprecated ANSIBLE_HOSTS
+ ANSIBLE_INVENTORY is available starting at 1.9 and substitutes the deprecated ANSIBLE_HOSTS
You can read more about the inventory file in later parts of the manual.
@@ -267,6 +267,24 @@ Ansible is available for Solaris as `SysV package from OpenCSW `_.
+
+Also see the `Ansible `_ page on the ArchWiki.
+
+.. note::
+
+ If you have Python 3 as a default Python slot on your Arch nodes (default setting), then you
+ must set ``ansible_python_interpreter = /usr/bin/python2`` in your group or inventory variables.
+
.. _from_pip:
Latest Releases Via Pip
diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst
index 5c38372e76b..3265821831f 100644
--- a/docsite/rst/intro_inventory.rst
+++ b/docsite/rst/intro_inventory.rst
@@ -46,7 +46,7 @@ To make things explicit, it is suggested that you set them if things are not run
badwolf.example.com:5309
-Suppose you have just static IPs and want to set up some aliases that don't live in your host file, or you are connecting through tunnels. You can do things like this::
+Suppose you have just static IPs and want to set up some aliases that live in your host file, or you are connecting through tunnels. You can also describe hosts like this::
jumper ansible_ssh_port=5555 ansible_ssh_host=192.168.1.50
@@ -106,9 +106,7 @@ Variables can also be applied to an entire group at once::
Groups of Groups, and Group Variables
+++++++++++++++++++++++++++++++++++++
-It is also possible to make groups of groups and assign
-variables to groups. These variables can be used by /usr/bin/ansible-playbook, but not
-/usr/bin/ansible::
+It is also possible to make groups of groups using the ``:children`` suffix. Just like above, you can apply variables using ``:vars``::
[atlanta]
host1
@@ -194,8 +192,14 @@ is an excellent way to track changes to your inventory and host variables.
List of Behavioral Inventory Parameters
+++++++++++++++++++++++++++++++++++++++
-As alluded to above, setting the following variables controls how ansible interacts with remote hosts. Some we have already
-mentioned::
+As alluded to above, setting the following variables controls how ansible interacts with remote hosts.
+
+Host connection::
+
+ ansible_connection
+ Connection type to the host. Candidates are local, smart, ssh or paramiko. The default is smart.
+
+Ssh connection::
ansible_ssh_host
The name of the host to connect to, if different from the alias you wish to give to it.
@@ -205,18 +209,24 @@ mentioned::
The default ssh user name to use.
ansible_ssh_pass
The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys)
- ansible_sudo
- The boolean to decide if sudo should be used for this host. Defaults to false.
- ansible_sudo_pass
- The sudo password to use (this is insecure, we strongly recommend using --ask-sudo-pass)
- ansible_sudo_exe (new in version 1.8)
- The sudo command path.
- ansible_connection
- Connection type of the host. Candidates are local, ssh or paramiko. The default is paramiko before Ansible 1.2, and 'smart' afterwards which detects whether usage of 'ssh' would be feasible based on whether ControlPersist is supported.
ansible_ssh_private_key_file
Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent.
+
+Privilege escalation (see :doc:`Ansible Privilege Escalation` for further details)::
+
+ ansible_become
+ Equivalent to ansible_sudo or ansible_su, allows to force privilege escalation
+ ansible_become_method
+ Allows to set privilege escalation method
+ ansible_become_user
+ Equivalent to ansible_sudo_user or ansible_su_user, allows to set the user you become through privilege escalation
+ ansible_become_pass
+ Equivalent to ansible_sudo_pass or ansible_su_pass, allows you to set the privilege escalation password
+
+Remote host environnement parameters::
+
ansible_shell_type
- The shell type of the target system. By default commands are formatted using 'sh'-style syntax by default. Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead.
+ The shell type of the target system. Commands are formatted using 'sh'-style syntax by default. Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead.
ansible_python_interpreter
The target host python path. This is useful for systems with more
than one Python or not located at "/usr/bin/python" such as \*BSD, or where /usr/bin/python
@@ -242,7 +252,7 @@ Examples from a host file::
:doc:`intro_adhoc`
Examples of basic commands
:doc:`playbooks`
- Learning ansible's configuration management language
+ Learning Ansible’s configuration, deployment, and orchestration language.
`Mailing List `_
Questions? Help? Ideas? Stop by the list on Google Groups
`irc.freenode.net `_
diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst
index 5dd9ad5d1d0..645248fde50 100644
--- a/docsite/rst/intro_windows.rst
+++ b/docsite/rst/intro_windows.rst
@@ -26,7 +26,7 @@ Installing on the Control Machine
On a Linux control machine::
- pip install http://github.com/diyan/pywinrm/archive/master.zip#egg=pywinrm
+ pip install https://github.com/diyan/pywinrm/archive/master.zip#egg=pywinrm
If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host)::
diff --git a/docsite/rst/modules.rst b/docsite/rst/modules.rst
index aa9ca0f40a1..6988fac8dce 100644
--- a/docsite/rst/modules.rst
+++ b/docsite/rst/modules.rst
@@ -2,48 +2,20 @@ About Modules
=============
.. toctree::
- :maxdepth: 4
+ :maxdepth: 1
-.. _modules_intro:
+ modules_intro
+ modules_core
+ modules_extra
+ common_return_values
-Introduction
-````````````
Ansible ships with a number of modules (called the 'module library')
that can be executed directly on remote hosts or through :doc:`Playbooks `.
-Users can also write their own modules. These modules can control system resources, like services, packages, or files (anything really), or
-handle executing system commands.
+Users can also write their own modules. These modules can control system resources,
+like services, packages, or files (anything really), or handle executing system commands.
-Let's review how we execute three different modules from the command line::
-
- ansible webservers -m service -a "name=httpd state=started"
- ansible webservers -m ping
- ansible webservers -m command -a "/sbin/reboot -t now"
-
-Each module supports taking arguments. Nearly all modules take ``key=value``
-arguments, space delimited. Some modules take no arguments, and the command/shell modules simply
-take the string of the command you want to run.
-
-From playbooks, Ansible modules are executed in a very similar way::
-
- - name: reboot the servers
- action: command /sbin/reboot -t now
-
-Which can be abbreviated to::
-
- - name: reboot the servers
- command: /sbin/reboot -t now
-
-All modules technically return JSON format data, though if you are using the command line or playbooks, you don't really need to know much about
-that. If you're writing your own module, you care, and this means you do not have to write modules in any particular language -- you get to choose.
-
-Modules are `idempotent`, meaning they will seek to avoid changes to the system unless a change needs to be made. When using Ansible
-playbooks, these modules can trigger 'change events' in the form of notifying 'handlers' to run additional tasks.
-
-Documentation for each module can be accessed from the command line with the ansible-doc tool::
-
- ansible-doc yum
.. seealso::
@@ -59,4 +31,3 @@ Documentation for each module can be accessed from the command line with the ans
Questions? Help? Ideas? Stop by the list on Google Groups
`irc.freenode.net `_
#ansible IRC chat channel
-
diff --git a/docsite/rst/modules_core.rst b/docsite/rst/modules_core.rst
new file mode 100644
index 00000000000..6364a1556ff
--- /dev/null
+++ b/docsite/rst/modules_core.rst
@@ -0,0 +1,13 @@
+Core Modules
+------------
+
+These are modules that the core ansible team maintains and will always ship with ansible itself.
+They will also receive slightly higher priority for all requests than those in the "extras" repos.
+
+The source of these modules is hosted on GitHub in the `ansible-modules-core `_ repo.
+
+If you believe you have found a bug in a core module and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
+
+Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
+
+Documentation updates for these modules can also be edited directly in the module itself and by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
diff --git a/docsite/rst/modules_extra.rst b/docsite/rst/modules_extra.rst
new file mode 100644
index 00000000000..479013bb667
--- /dev/null
+++ b/docsite/rst/modules_extra.rst
@@ -0,0 +1,22 @@
+Extras Modules
+--------------
+
+These modules are currently shipped with Ansible, but might be shipped separately in the future. They are also mostly maintained by the community.
+Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests.
+
+Popular "extras" modules may be promoted to core modules over time.
+
+This source for these modules is hosted on GitHub in the `ansible-modules-extras `_ repo.
+
+If you believe you have found a bug in an extras module and are already running the latest stable or development version of Ansible,
+first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_
+o see if a bug has already been filed. If not, we would be grateful if you would file one.
+
+Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_
+or on Ansible's "#ansible" channel, located on irc.freenode.net.
+Development oriented topics should instead use the similar `ansible-devel google group `_.
+
+Documentation updates for this module can also be edited directly in the module and by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
+
+For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`.
+
diff --git a/docsite/rst/modules_intro.rst b/docsite/rst/modules_intro.rst
new file mode 100644
index 00000000000..bb17bd6a19d
--- /dev/null
+++ b/docsite/rst/modules_intro.rst
@@ -0,0 +1,64 @@
+Introduction
+============
+
+Modules (also referred to as "task plugins" or "library plugins") are the ones that do
+the actual work in ansible, they are what gets executed in each playbook task.
+But you can also run a single one using the 'ansible' command.
+
+Let's review how we execute three different modules from the command line::
+
+ ansible webservers -m service -a "name=httpd state=started"
+ ansible webservers -m ping
+ ansible webservers -m command -a "/sbin/reboot -t now"
+
+Each module supports taking arguments. Nearly all modules take ``key=value``
+arguments, space delimited. Some modules take no arguments, and the command/shell modules simply
+take the string of the command you want to run.
+
+From playbooks, Ansible modules are executed in a very similar way::
+
+ - name: reboot the servers
+ action: command /sbin/reboot -t now
+
+Which can be abbreviated to::
+
+ - name: reboot the servers
+ command: /sbin/reboot -t now
+
+Another way to pass arguments to a module is using yaml syntax also called 'complex args' ::
+
+ - name: restart webserver
+ service:
+ name: httpd
+ state: restarted
+
+All modules technically return JSON format data, though if you are using the command line or playbooks, you don't really need to know much about
+that. If you're writing your own module, you care, and this means you do not have to write modules in any particular language -- you get to choose.
+
+Modules strive to be `idempotent`, meaning they will seek to avoid changes to the system unless a change needs to be made. When using Ansible
+playbooks, these modules can trigger 'change events' in the form of notifying 'handlers' to run additional tasks.
+
+Documentation for each module can be accessed from the command line with the ansible-doc tool::
+
+ ansible-doc yum
+
+A list of all installed modules is also available::
+
+ ansible-doc -l
+
+
+.. seealso::
+
+ :doc:`intro_adhoc`
+ Examples of using modules in /usr/bin/ansible
+ :doc:`playbooks`
+ Examples of using modules with /usr/bin/ansible-playbook
+ :doc:`developing_modules`
+ How to write your own modules
+ :doc:`developing_api`
+ Examples of using modules with the Python API
+ `Mailing List `_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net `_
+ #ansible IRC chat channel
+
diff --git a/docsite/rst/playbooks.rst b/docsite/rst/playbooks.rst
index cda4e9bdb7f..5e1c1b98695 100644
--- a/docsite/rst/playbooks.rst
+++ b/docsite/rst/playbooks.rst
@@ -24,6 +24,8 @@ It is recommended to look at `Example Playbooks `_
+ Have a question? Stop by the google group!
+ `irc.freenode.net `_
+ #ansible IRC chat channel
+
+
+
+
diff --git a/docsite/rst/playbooks_conditionals.rst b/docsite/rst/playbooks_conditionals.rst
index bdadaf59715..ed818ee2860 100644
--- a/docsite/rst/playbooks_conditionals.rst
+++ b/docsite/rst/playbooks_conditionals.rst
@@ -90,7 +90,7 @@ If a required variable has not been set, you can skip or fail using Jinja2's
when: foo is defined
- fail: msg="Bailing out. this play requires 'bar'"
- when: bar is not defined
+ when: bar is undefined
This is especially useful in combination with the conditional import of vars
files (see below).
@@ -119,12 +119,13 @@ Applying 'when' to roles and includes
`````````````````````````````````````
Note that if you have several tasks that all share the same conditional statement, you can affix the conditional
-to a task include statement as below. Note this does not work with playbook includes, just task includes. All the tasks
-get evaluated, but the conditional is applied to each and every task::
+to a task include statement as below. All the tasks get evaluated, but the conditional is applied to each and every task::
- include: tasks/sometasks.yml
when: "'reticulating splines' in output"
+.. note:: In versions prior to 2.0 this worked with task includes but not playbook includes. 2.0 allows it to work with both.
+
Or with a role::
- hosts: webservers
diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst
index 8f672791add..20981503df4 100644
--- a/docsite/rst/playbooks_delegation.rst
+++ b/docsite/rst/playbooks_delegation.rst
@@ -9,7 +9,7 @@ This in particular is very applicable when setting up continuous deployment infr
Additional features allow for tuning the orders in which things complete, and assigning a batch window size for how many machines to process at once during a rolling update.
-This section covers all of these features. For examples of these items in use, `please see the ansible-examples repository `_. There are quite a few examples of zero-downtime update procedures for different kinds of applications.
+This section covers all of these features. For examples of these items in use, `please see the ansible-examples repository `_. There are quite a few examples of zero-downtime update procedures for different kinds of applications.
You should also consult the :doc:`modules` section, various modules like 'ec2_elb', 'nagios', and 'bigip_pool', and 'netscaler' dovetail neatly with the concepts mentioned here.
@@ -189,7 +189,7 @@ use the default remote connection type::
:doc:`playbooks`
An introduction to playbooks
- `Ansible Examples on GitHub `_
+ `Ansible Examples on GitHub `_
Many examples of full-stack deployments
`User Mailing List `_
Have a question? Stop by the google group!
diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst
index ef6185f9514..d95f617f192 100644
--- a/docsite/rst/playbooks_filters.rst
+++ b/docsite/rst/playbooks_filters.rst
@@ -3,6 +3,7 @@ Jinja2 filters
.. contents:: Topics
+
Filters in Jinja2 are a way of transforming template expressions from one kind of data into another. Jinja2
ships with many of these. See `builtin filters`_ in the official Jinja2 template documentation.
@@ -16,9 +17,27 @@ Filters For Formatting Data
The following filters will take a data structure in a template and render it in a slightly different format. These
are occasionally useful for debugging::
+ {{ some_variable | to_json }}
+ {{ some_variable | to_yaml }}
+
+For human readable output, you can use::
+
{{ some_variable | to_nice_json }}
{{ some_variable | to_nice_yaml }}
+Alternatively, you may be reading in some already formatted data::
+
+ {{ some_variable | from_json }}
+ {{ some_variable | from_yaml }}
+
+for example::
+
+ tasks:
+ - shell: cat /some/path/to/file.json
+ register: result
+
+ - set_fact: myvar="{{ result.stdout | from_json }}"
+
.. _filters_used_with_conditionals:
Filters Often Used With Conditionals
@@ -91,6 +110,10 @@ As of Ansible 1.8, it is possible to use the default filter to omit variables an
For the first two files in the list, the default mode will be determined by the umask of the system as the `mode=`
parameter will not be sent to the file module while the final file will receive the `mode=0444` option.
+.. note:: If you are "chaining" additional filters after the `default(omit)` filter, you should instead do something like this:
+ `"{{ foo | default(None) | some_filter or omit }}"`. In this example, the default `None` (python null) value will cause the
+ later filters to fail, which will trigger the `or omit` portion of the logic. Using omit in this manner is very specific to
+ the later filters you're chaining though, so be prepared for some trial and error if you do this.
.. _list_filters:
@@ -299,7 +322,11 @@ Hash types available depend on the master system running ansible,
Other Useful Filters
--------------------
-To use one value on true and another on false (since 1.9)::
+To add quotes for shell usage::
+
+ - shell: echo={{ string_value | quote }}
+
+To use one value on true and another on false (new in version 1.9)::
{{ (name == "John") | ternary('Mr','Ms') }}
@@ -323,6 +350,15 @@ To get the real path of a link (new in version 1.8)::
{{ path | realpath }}
+To get the relative path of a link, from a start point (new in version 1.7)::
+
+ {{ path | relpath('/etc') }}
+
+To get the root and extension of a path or filename (new in version 2.0)::
+
+ # with path == 'nginx.conf' the return would be ('nginx', '.conf')
+ {{ path | splitext }}
+
To work with Base64 encoded strings::
{{ encoded | b64decode }}
@@ -352,6 +388,8 @@ To match strings against a regex, use the "match" or "search" filter::
'match' will require a complete match in the string, while 'search' will require a match inside of the string.
+.. versionadded:: 1.6
+
To replace text in a string with regex, use the "regex_replace" filter::
# convert "ansible" to "able"
@@ -363,6 +401,11 @@ To replace text in a string with regex, use the "regex_replace" filter::
.. note:: If "regex_replace" filter is used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments),
then you need to escape backreferences (e.g. ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``).
+To escape special characters within a regex, use the "regex_escape" filter::
+
+ # convert '^f.*o(.*)$' to '\^f\.\*o\(\.\*\)\$'
+ {{ '^f.*o(.*)$' | regex_escape() }}
+
A few useful filters are typically added with each new Ansible release. The development documentation shows
how to extend Ansible filters by writing your own as plugins, though in general, we encourage new ones
to be added to core so everyone can make use of them.
diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst
index 4fe2ab3ec3f..ad53cb9eb47 100644
--- a/docsite/rst/playbooks_intro.rst
+++ b/docsite/rst/playbooks_intro.rst
@@ -106,6 +106,33 @@ YAML dictionaries to supply the modules with their key=value arguments.::
name: httpd
state: restarted
+Playbooks can contain multiple plays. You may have a playbook that targets first
+the web servers, and then the database servers. For example::
+
+ ---
+ - hosts: webservers
+ remote_user: root
+
+ tasks:
+ - name: ensure apache is at the latest version
+ yum: pkg=httpd state=latest
+ - name: write the apache config file
+ template: src=/srv/httpd.j2 dest=/etc/httpd.conf
+
+ - hosts: databases
+ remote_user: root
+
+ tasks:
+ - name: ensure postgresql is at the latest version
+ yum: name=postgresql state=latest
+ - name: ensure that postgresql is started
+ service: name=postgresql state=running
+
+You can use this method to switch between the host group you're targeting,
+the username logging into the remote servers, whether to sudo or not, and so
+forth. Plays, like tasks, run in the order specified in the playbook: top to
+bottom.
+
Below, we'll break down what the various features of the playbook language are.
.. _playbook_basics:
@@ -338,10 +365,10 @@ The things listed in the 'notify' section of a task are called
handlers.
Handlers are lists of tasks, not really any different from regular
-tasks, that are referenced by name. Handlers are what notifiers
-notify. If nothing notifies a handler, it will not run. Regardless
-of how many things notify a handler, it will run only once, after all
-of the tasks complete in a particular play.
+tasks, that are referenced by a globally unique name. Handlers are
+what notifiers notify. If nothing notifies a handler, it will not
+run. Regardless of how many things notify a handler, it will run only
+once, after all of the tasks complete in a particular play.
Here's an example handlers section::
@@ -355,7 +382,10 @@ Handlers are best used to restart services and trigger reboots. You probably
won't need them for much else.
.. note::
- Notify handlers are always run in the order written.
+ * Notify handlers are always run in the order written.
+ * Handler names live in a global namespace.
+ * If two handler tasks have the same name, only one will run.
+ `* `_
Roles are described later on. It's worthwhile to point out that handlers are
automatically processed between 'pre_tasks', 'roles', 'tasks', and 'post_tasks'
diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst
index 9659e86ad80..502a808e493 100644
--- a/docsite/rst/playbooks_lookups.rst
+++ b/docsite/rst/playbooks_lookups.rst
@@ -237,7 +237,7 @@ Here are some examples::
# The following lookups were added in 1.9
- debug: msg="{{item}}"
with_url:
- - 'http://github.com/gremlin.keys'
+ - 'https://github.com/gremlin.keys'
# outputs the cartesian product of the supplied lists
- debug: msg="{{item}}"
diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst
index 56e43dd7d36..5acc6ae6301 100644
--- a/docsite/rst/playbooks_loops.rst
+++ b/docsite/rst/playbooks_loops.rst
@@ -23,7 +23,7 @@ To save some typing, repeated tasks can be written in short-hand like so::
If you have defined a YAML list in a variables file, or the 'vars' section, you can also do::
- with_items: somelist
+ with_items: "{{somelist}}"
The above would be the equivalent of::
@@ -58,12 +58,12 @@ Loops can be nested as well::
- [ 'alice', 'bob' ]
- [ 'clientdb', 'employeedb', 'providerdb' ]
-As with the case of 'with_items' above, you can use previously defined variables. Just specify the variable's name without templating it with '{{ }}'::
+As with the case of 'with_items' above, you can use previously defined variables.::
- name: here, 'users' contains the above list of employees
mysql_user: name={{ item[0] }} priv={{ item[1] }}.*:ALL append_privs=yes password=foo
with_nested:
- - users
+ - "{{users}}"
- [ 'clientdb', 'employeedb', 'providerdb' ]
.. _looping_over_hashes:
@@ -89,7 +89,7 @@ And you want to print every user's name and phone number. You can loop through
tasks:
- name: Print phone records
debug: msg="User {{ item.key }} is {{ item.value.name }} ({{ item.value.telephone }})"
- with_dict: users
+ with_dict: "{{users}}"
.. _looping_over_fileglobs:
@@ -111,7 +111,7 @@ be used like this::
- copy: src={{ item }} dest=/etc/fooapp/ owner=root mode=600
with_fileglob:
- /playbooks/files/fooapp/*
-
+
.. note:: When using a relative path with ``with_fileglob`` in a role, Ansible resolves the path relative to the `roles//files` directory.
Looping over Parallel Sets of Data
@@ -130,40 +130,71 @@ And you want the set of '(a, 1)' and '(b, 2)' and so on. Use 'with_together' t
tasks:
- debug: msg="{{ item.0 }} and {{ item.1 }}"
with_together:
- - alpha
- - numbers
+ - "{{alpha}}"
+ - "{{numbers}}"
Looping over Subelements
````````````````````````
Suppose you want to do something like loop over a list of users, creating them, and allowing them to login by a certain set of
-SSH keys.
+SSH keys.
How might that be accomplished? Let's assume you had the following defined and loaded in via "vars_files" or maybe a "group_vars/all" file::
---
users:
- name: alice
- authorized:
+ authorized:
- /tmp/alice/onekey.pub
- /tmp/alice/twokey.pub
+ mysql:
+ password: mysql-password
+ hosts:
+ - "%"
+ - "127.0.0.1"
+ - "::1"
+ - "localhost"
+ privs:
+ - "*.*:SELECT"
+ - "DB1.*:ALL"
- name: bob
authorized:
- /tmp/bob/id_rsa.pub
+ mysql:
+ password: other-mysql-password
+ hosts:
+ - "db1"
+ privs:
+ - "*.*:SELECT"
+ - "DB2.*:ALL"
It might happen like so::
- user: name={{ item.name }} state=present generate_ssh_key=yes
- with_items: users
+ with_items: "{{users}}"
- authorized_key: "user={{ item.0.name }} key='{{ lookup('file', item.1) }}'"
with_subelements:
- users
- authorized
-Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given key inside of those
+Given the mysql hosts and privs subkey lists, you can also iterate over a list in a nested subkey::
+
+ - name: Setup MySQL users
+ mysql_user: name={{ item.0.user }} password={{ item.0.mysql.password }} host={{ item.1 }} priv={{ item.0.mysql.privs | join('/') }}
+ with_subelements:
+ - users
+ - mysql.hosts
+
+Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those
records.
+Optionally, you can add a third element to the subelements list, that holds a
+dictionary of flags. Currently you can add the 'skip_missing' flag. If set to
+True, the lookup plugin will skip the lists items that do not contain the given
+subkey. Without this flag, or if that flag is set to False, the plugin will
+yield an error and complain about the missing subkey.
+
The authorized_key pattern is exactly where it comes up most.
.. _looping_over_integer_sequences:
@@ -298,7 +329,7 @@ Should you ever need to execute a command remotely, you would not use the above
- name: Do something with each result
shell: /usr/bin/something_else --param {{ item }}
- with_items: command_result.stdout_lines
+ with_items: "{{command_result.stdout_lines}}"
.. _indexed_lists:
@@ -314,7 +345,7 @@ It's uncommonly used::
- name: indexed loop demo
debug: msg="at array position {{ item.0 }} there is a value {{ item.1 }}"
- with_indexed_items: some_list
+ with_indexed_items: "{{some_list}}"
.. _using_ini_with_a_loop:
@@ -387,8 +418,8 @@ As you can see the formatting of packages in these lists is all over the place.
- name: flattened loop demo
yum: name={{ item }} state=installed
with_flattened:
- - packages_base
- - packages_apps
+ - "{{packages_base}}"
+ - "{{packages_apps}}"
That's how!
@@ -452,7 +483,7 @@ Subsequent loops over the registered variable to inspect the results may look li
fail:
msg: "The command ({{ item.cmd }}) did not have a 0 return code"
when: item.rc != 0
- with_items: echo.results
+ with_items: "{{echo.results}}"
.. _writing_your_own_iterators:
diff --git a/docsite/rst/playbooks_prompts.rst b/docsite/rst/playbooks_prompts.rst
index 29fc218fe86..a18ed1ae726 100644
--- a/docsite/rst/playbooks_prompts.rst
+++ b/docsite/rst/playbooks_prompts.rst
@@ -20,9 +20,12 @@ Here is a most basic example::
from: "camelot"
vars_prompt:
- name: "what is your name?"
- quest: "what is your quest?"
- favcolor: "what is your favorite color?"
+ - name: "name"
+ prompt: "what is your name?"
+ - name: "quest"
+ prompt: "what is your quest?"
+ - name: "favcolor"
+ prompt: "what is your favorite color?"
If you have a variable that changes infrequently, it might make sense to
provide a default value that can be overridden. This can be accomplished using
diff --git a/docsite/rst/playbooks_special_topics.rst b/docsite/rst/playbooks_special_topics.rst
index c57f5796c96..74974cad108 100644
--- a/docsite/rst/playbooks_special_topics.rst
+++ b/docsite/rst/playbooks_special_topics.rst
@@ -7,6 +7,7 @@ and adopt these only if they seem relevant or useful to your environment.
.. toctree::
:maxdepth: 1
+ become
playbooks_acceleration
playbooks_async
playbooks_checkmode
diff --git a/docsite/rst/playbooks_strategies.rst b/docsite/rst/playbooks_strategies.rst
new file mode 100644
index 00000000000..91e90eb344b
--- /dev/null
+++ b/docsite/rst/playbooks_strategies.rst
@@ -0,0 +1,39 @@
+Strategies
+===========
+
+In 2.0 we added a new way to control play execution, ``strategy``, by default plays will
+still run as they used to, with what we call the ``linear`` strategy. All hosts will run each
+task before any host starts the next task, using the number of forks (default 5) to parallelize.
+
+The ``serial`` directive can 'batch' this behaviour to a subset of the hosts, which then run to
+completion of the play before the next 'batch' starts.
+
+A second ``strategy`` ships with ansible ``free``, which allows each host to run until the end of
+the play as fast as it can.::
+
+ - hosts: all
+ strategy: free
+ tasks:
+ ...
+
+
+.. _strategy_plugins:
+
+Strategy Plugins
+`````````````````
+
+The strategies are implelented via a new type of plugin, this means that in the future new
+execution types can be added, either locally by users or to Ansible itself by
+a code contribution.
+
+.. seealso::
+
+ :doc:`playbooks`
+ An introduction to playbooks
+ :doc:`playbooks_roles`
+ Playbook organization by roles
+ `User Mailing List `_
+ Have a question? Stop by the google group!
+ `irc.freenode.net `_
+ #ansible IRC chat channel
+
diff --git a/docsite/rst/playbooks_tags.rst b/docsite/rst/playbooks_tags.rst
index a03b975a4eb..76101717b7a 100644
--- a/docsite/rst/playbooks_tags.rst
+++ b/docsite/rst/playbooks_tags.rst
@@ -38,8 +38,9 @@ And you may also tag basic include statements::
- include: foo.yml tags=web,foo
-Both of these have the function of tagging every single task inside the include statement.
-
+Both of these apply the specified tags to every task inside the included
+file or role, so that these tasks can be selectively run when the playbook
+is invoked with the corresponding tags.
Special Tags
````````````
diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst
index b0e2e223cdc..c47dfe0fc73 100644
--- a/docsite/rst/playbooks_variables.rst
+++ b/docsite/rst/playbooks_variables.rst
@@ -308,7 +308,7 @@ This will return a ginormous amount of variable data, which may look like this,
"type": "ether"
},
"ansible_form_factor": "Other",
- "ansible_fqdn": "ubuntu2",
+ "ansible_fqdn": "ubuntu2.example.com",
"ansible_hostname": "ubuntu2",
"ansible_interfaces": [
"lo",
@@ -353,6 +353,7 @@ This will return a ginormous amount of variable data, which may look like this,
"size_total": 20079898624
}
],
+ "ansible_nodename": "ubuntu2.example.com",
"ansible_os_family": "Debian",
"ansible_pkg_mgr": "apt",
"ansible_processor": [
@@ -387,8 +388,11 @@ In the above the model of the first harddrive may be referenced in a template or
Similarly, the hostname as the system reports it is::
- {{ ansible_hostname }}
+ {{ ansible_nodename }}
+
+and the unqualified hostname shows the string before the first period(.)::
+ {{ ansible_hostname }}
Facts are frequently used in conditionals (see :doc:`playbooks_conditionals`) and also in templates.
@@ -494,7 +498,11 @@ not be necessary to "hit" all servers to reference variables and information abo
With fact caching enabled, it is possible for machine in one group to reference variables about machines in the other group, despite
the fact that they have not been communicated with in the current execution of /usr/bin/ansible-playbook.
-To configure fact caching, enable it in ansible.cfg as follows::
+To benefit from cached facts, you will want to change the 'gathering' setting to 'smart' or 'explicit' or set 'gather_facts' to False in most plays.
+
+Currently, Ansible ships with two persistent cache plugins: redis and jsonfile.
+
+To configure fact caching using redis, enable it in ansible.cfg as follows::
[defaults]
gathering = smart
@@ -502,9 +510,6 @@ To configure fact caching, enable it in ansible.cfg as follows::
fact_caching_timeout = 86400
# seconds
-You might also want to change the 'gathering' setting to 'smart' or 'explicit' or set gather_facts to False in most plays.
-
-At the time of writing, Redis is the only supported fact caching engine.
To get redis up and running, perform the equivalent OS commands::
yum install redis
@@ -515,6 +520,18 @@ Note that the Python redis library should be installed from pip, the version pac
In current embodiments, this feature is in beta-level state and the Redis plugin does not support port or password configuration, this is expected to change in the near future.
+To configure fact caching using jsonfile, enable it in ansible.cfg as follows::
+
+ [defaults]
+ gathering = smart
+ fact_caching = jsonfile
+ fact_caching_connection = /path/to/cachedir
+ fact_caching_timeout = 86400
+ # seconds
+
+`fact_caching_connection` is a local filesystem path to a writeable
+directory (ansible will attempt to create the directory if one does not exist).
+
.. _registered_variables:
Registered Variables
@@ -614,6 +631,8 @@ Don't worry about any of this unless you think you need it. You'll know when yo
Also available, *inventory_dir* is the pathname of the directory holding Ansible's inventory host file, *inventory_file* is the pathname and the filename pointing to the Ansible's inventory host file.
+And finally, *role_path* will return the current role's pathname (since 1.8). This will only work inside a role.
+
.. _variable_file_separation_details:
Variable File Separation
@@ -782,7 +801,7 @@ Parameterized roles are useful.
If you are using a role and want to override a default, pass it as a parameter to the role like so::
roles:
- - { name: apache, http_port: 8080 }
+ - { role: apache, http_port: 8080 }
This makes it clear to the playbook reader that you've made a conscious choice to override some default in the role, or pass in some
configuration that the role can't assume by itself. It also allows you to pass something site-specific that isn't really part of the
diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst
index 921a05c50ed..5cb1eb90c9c 100644
--- a/docsite/rst/playbooks_vault.rst
+++ b/docsite/rst/playbooks_vault.rst
@@ -5,7 +5,7 @@ Vault
New in Ansible 1.5, "Vault" is a feature of ansible that allows keeping sensitive data such as passwords or keys in encrypted files, rather than as plaintext in your playbooks or roles. These vault files can then be distributed or placed in source control.
-To enable this feature, a command line tool, `ansible-vault` is used to edit files, and a command line flag `--ask-vault-pass` or `--vault-password-file` is used.
+To enable this feature, a command line tool, `ansible-vault` is used to edit files, and a command line flag `--ask-vault-pass` or `--vault-password-file` is used. Alternately, you may specify the location of a password file in your ansible.cfg file. This option requires no command line flag usage.
.. _what_can_be_encrypted_with_vault:
@@ -14,7 +14,7 @@ What Can Be Encrypted With Vault
The vault feature can encrypt any structured data file used by Ansible. This can include "group_vars/" or "host_vars/" inventory variables, variables loaded by "include_vars" or "vars_files", or variable files passed on the ansible-playbook command line with "-e @file.yml" or "-e @file.json". Role variables and defaults are also included!
-Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault. If you'd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted. However, that might be a little much and could annoy your coworkers :)
+Ansible tasks, handlers, and so on are also data so these can be encrypted with vault as well. To hide the names of variables that you're using, you can encrypt the task files in their entirety. However, that might be a little too much and could annoy your coworkers :)
.. _creating_files:
diff --git a/docsite/rst/quickstart.rst b/docsite/rst/quickstart.rst
index 161748d9f02..055e4aecabb 100644
--- a/docsite/rst/quickstart.rst
+++ b/docsite/rst/quickstart.rst
@@ -3,7 +3,7 @@ Quickstart Video
We've recorded a short video that shows how to get started with Ansible that you may like to use alongside the documentation.
-The `quickstart video `_ is about 30 minutes long and will show you some of the basics about your
+The `quickstart video `_ is about 30 minutes long and will show you some of the basics about your
first steps with Ansible.
Enjoy, and be sure to visit the rest of the documentation to learn more.
diff --git a/docsite/rst/test_strategies.rst b/docsite/rst/test_strategies.rst
index a3abf160906..e26e9251831 100644
--- a/docsite/rst/test_strategies.rst
+++ b/docsite/rst/test_strategies.rst
@@ -19,16 +19,16 @@ also very easy to run the steps on the localhost or testing servers. Ansible let
The Right Level of Testing
``````````````````````````
-Ansible resources are models of desired-state. As such, it should not be necessary to test that services are running, packages are
+Ansible resources are models of desired-state. As such, it should not be necessary to test that services are started, packages are
installed, or other such things. Ansible is the system that will ensure these things are declaratively true. Instead, assert these
things in your playbooks.
.. code-block:: yaml
tasks:
- - service: name=foo state=running enabled=yes
+ - service: name=foo state=started enabled=yes
-If you think the service may not be running, the best thing to do is request it to be running. If the service fails to start, Ansible
+If you think the service may not be started, the best thing to do is request it to be started. If the service fails to start, Ansible
will yell appropriately. (This should not be confused with whether the service is doing something functional, which we'll show more about how to
do later).
@@ -114,14 +114,14 @@ Testing Lifecycle
If writing some degree of basic validation of your application into your playbooks, they will run every time you deploy.
-As such, deploying into a local development VM and a stage environment will both validate that things are according to plan
+As such, deploying into a local development VM and a staging environment will both validate that things are according to plan
ahead of your production deploy.
Your workflow may be something like this::
- Use the same playbook all the time with embedded tests in development
- - Use the playbook to deploy to a stage environment (with the same playbooks) that simulates production
- - Run an integration test battery written by your QA team against stage
+ - Use the playbook to deploy to a staging environment (with the same playbooks) that simulates production
+ - Run an integration test battery written by your QA team against staging
- Deploy to production, with the same integrated tests.
Something like an integration test battery should be written by your QA team if you are a production webservice. This would include
@@ -213,7 +213,7 @@ If desired, the above techniques may be extended to enable continuous deployment
The workflow may look like this::
- Write and use automation to deploy local development VMs
- - Have a CI system like Jenkins deploy to a stage environment on every code change
+ - Have a CI system like Jenkins deploy to a staging environment on every code change
- The deploy job calls testing scripts to pass/fail a build on every deploy
- If the deploy job succeeds, it runs the same deploy playbook against production inventory
@@ -241,7 +241,7 @@ as part of a Continuous Integration/Continuous Delivery pipeline, as is covered
The focus should not be on infrastructure testing, but on application testing, so we strongly encourage getting together with your
QA team and ask what sort of tests would make sense to run every time you deploy development VMs, and which sort of tests they would like
-to run against the stage environment on every deploy. Obviously at the development stage, unit tests are great too. But don't unit
+to run against the staging environment on every deploy. Obviously at the development stage, unit tests are great too. But don't unit
test your playbook. Ansible describes states of resources declaratively, so you don't have to. If there are cases where you want
to be sure of something though, that's great, and things like stat/assert are great go-to modules for that purpose.
diff --git a/examples/ansible.cfg b/examples/ansible.cfg
index 85eada17cc8..b6845c07033 100644
--- a/examples/ansible.cfg
+++ b/examples/ansible.cfg
@@ -38,6 +38,11 @@ gathering = implicit
# uncomment this to disable SSH key host checking
#host_key_checking = False
+# change the default callback
+#callback_stdout = skippy
+# enable additional callbacks
+#callback_whitelist = timer, mail
+
# change this for alternative sudo implementations
sudo_exe = sudo
@@ -68,6 +73,11 @@ timeout = 10
# this can also be set to 'merge'.
#hash_behaviour = replace
+# by default, variables from roles will be visible in the global variable
+# scope. To prevent this, the following option can be enabled, and only
+# tasks and handlers within the role will see the variables there
+#private_role_vars = yes
+
# list any Jinja2 extensions to enable here:
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
@@ -75,10 +85,12 @@ timeout = 10
# if passing --private-key to ansible or ansible-playbook
#private_key_file = /path/to/file
-# format of string {{ ansible_managed }} available within Jinja2
+# format of string {{ ansible_managed }} available within Jinja2
# templates indicates to users editing templates files will be replaced.
# replacing {file}, {host} and {uid} and strftime codes with proper values.
-ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
+#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
+# This short version is better used in templates as it won't flag the file as changed every run.
+ansible_managed = Ansible managed: {file} on {host}
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
@@ -161,8 +173,8 @@ fact_caching = memory
[privilege_escalation]
#become=True
-#become_method='sudo'
-#become_user='root'
+#become_method=sudo
+#become_user=root
#become_ask_pass=False
[paramiko_connection]
@@ -208,6 +220,11 @@ fact_caching = memory
# (default is sftp)
#scp_if_ssh = True
+# if False, sftp will not use batch mode to transfer files. This may cause some
+# types of file transfer failures impossible to catch however, and should
+# only be disabled if your sftp version has problems with batch mode
+#sftp_batch_mode = False
+
[accelerate]
accelerate_port = 5099
accelerate_timeout = 30
@@ -226,5 +243,5 @@ accelerate_daemon_timeout = 30
[selinux]
# file systems that require special treatment when dealing with security context
# the default behaviour that copies the existing context or uses the user default
-# needs to be changed to use the file system dependant context.
-#special_context_filesystems=nfs,vboxsf,fuse
+# needs to be changed to use the file system dependent context.
+#special_context_filesystems=nfs,vboxsf,fuse,ramfs
diff --git a/hacking/README.md b/hacking/README.md
index ae8db7e3a9b..be192495192 100644
--- a/hacking/README.md
+++ b/hacking/README.md
@@ -33,6 +33,22 @@ Example:
This is a good way to insert a breakpoint into a module, for instance.
+For more complex arguments such as the following yaml:
+
+```yaml
+parent:
+ child:
+ - item: first
+ val: foo
+ - item: second
+ val: boo
+```
+
+Use:
+
+ $ ./hacking/test-module -m module \
+ -a "{"parent": {"child": [{"item": "first", "val": "foo"}, {"item": "second", "val": "bar"}]}}"
+
Module-formatter
----------------
diff --git a/hacking/authors.sh b/hacking/authors.sh
index 7c97840b2fb..633240fe159 100755
--- a/hacking/authors.sh
+++ b/hacking/authors.sh
@@ -4,7 +4,7 @@ set -e
# Get a list of authors ordered by number of commits
# and remove the commit count column
-AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- | sort -f)
+AUTHORS=$(git --no-pager shortlog -nse --no-merges | cut -f 2- )
if [ -z "$AUTHORS" ] ; then
echo "Authors list was empty"
exit 1
diff --git a/hacking/env-setup b/hacking/env-setup
index 49390dfe5e0..7b48db28dbf 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -53,13 +53,15 @@ if [ "$ANSIBLE_HOME" != "$PWD" ] ; then
else
current_dir="$ANSIBLE_HOME"
fi
-cd "$ANSIBLE_HOME"
-if [ "$verbosity" = silent ] ; then
- gen_egg_info > /dev/null 2>&1
-else
- gen_egg_info
-fi
-cd "$current_dir"
+(
+ cd "$ANSIBLE_HOME"
+ if [ "$verbosity" = silent ] ; then
+ gen_egg_info > /dev/null 2>&1
+ else
+ gen_egg_info
+ fi
+ cd "$current_dir"
+)
if [ "$verbosity" != silent ] ; then
cat <<- EOF
diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py
index 32df84deb9b..30b8d6a1035 100755
--- a/hacking/module_formatter.py
+++ b/hacking/module_formatter.py
@@ -31,17 +31,19 @@ import time
import datetime
import subprocess
import cgi
+import warnings
from jinja2 import Environment, FileSystemLoader
-import ansible.utils
-import ansible.utils.module_docs as module_docs
+from ansible.utils import module_docs
+from ansible.utils.vars import merge_hash
+from ansible.errors import AnsibleError
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
-TO_OLD_TO_BE_NOTABLE = 1.0
+TO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR=os.path.abspath(os.path.join(
@@ -66,11 +68,14 @@ NOTCORE = " (E)"
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
- t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
- t = _BOLD.sub(r'**' + r"\1" + r"**", t)
- t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
- t = _URL.sub(r"\1", t)
- t = _CONST.sub(r'``' + r"\1" + r"``", t)
+ try:
+ t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
+ t = _BOLD.sub(r'**' + r"\1" + r"**", t)
+ t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
+ t = _URL.sub(r"\1", t)
+ t = _CONST.sub(r'``' + r"\1" + r"``", t)
+ except Exception as e:
+ raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
@@ -135,7 +140,7 @@ def list_modules(module_dir, depth=0):
res = list_modules(d, depth + 1)
for key in res.keys():
if key in categories:
- categories[key] = ansible.utils.merge_hash(categories[key], res[key])
+ categories[key] = merge_hash(categories[key], res[key])
res.pop(key, None)
if depth < 2:
@@ -214,6 +219,17 @@ def jinja2_environment(template_dir, typ):
return env, template, outputname
#####################################################################################
+def too_old(added):
+ if not added:
+ return False
+ try:
+ added_tokens = str(added).split(".")
+ readded = added_tokens[0] + "." + added_tokens[1]
+ added_float = float(readded)
+ except ValueError as e:
+ warnings.warn("Could not parse %s: %s" % (added, str(e)))
+ return False
+ return (added_float < TO_OLD_TO_BE_NOTABLE)
def process_module(module, options, env, template, outputname, module_map, aliases):
@@ -236,11 +252,11 @@ def process_module(module, options, env, template, outputname, module_map, alias
print "rendering: %s" % module
# use ansible core library to parse out doc metadata YAML and plaintext examples
- doc, examples, returndocs= ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose)
+ doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
- if module in ansible.utils.module_docs.BLACKLIST_MODULES:
+ if module in module_docs.BLACKLIST_MODULES:
return "SKIPPED"
else:
sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
@@ -271,15 +287,15 @@ def process_module(module, options, env, template, outputname, module_map, alias
added = doc['version_added']
# don't show version added information if it's too old to be called out
- if added:
- added_tokens = str(added).split(".")
- added = added_tokens[0] + "." + added_tokens[1]
- added_float = float(added)
- if added and added_float < TO_OLD_TO_BE_NOTABLE:
- del doc['version_added']
+ if too_old(added):
+ del doc['version_added']
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
+ if 'options' in doc and doc['options']:
+ for (k,v) in doc['options'].iteritems():
+ # don't show version added information if it's too old to be called out
+ if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
+ del doc['options'][k]['version_added']
+ all_keys.append(k)
all_keys = sorted(all_keys)
@@ -296,7 +312,10 @@ def process_module(module, options, env, template, outputname, module_map, alias
# here is where we build the table of contents...
- text = template.render(doc)
+ try:
+ text = template.render(doc)
+ except Exception as e:
+ raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e)))
write_data(text, options, outputname, module)
return doc['short_description']
@@ -314,7 +333,7 @@ def print_modules(module, category_file, deprecated, core, options, env, templat
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result != "SKIPPED":
- category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module))
+ category_file.write(" %s - %s <%s_module>\n" % (modstring, rst_ify(result), module))
def process_category(category, categories, options, env, template, outputname):
@@ -328,7 +347,7 @@ def process_category(category, categories, options, env, template, outputname):
category_file = open(category_file_path, "w")
print "*** recording category %s in %s ***" % (category, category_file_path)
- # TODO: start a new category file
+ # start a new category file
category = category.replace("_"," ")
category = category.title()
@@ -351,7 +370,6 @@ def process_category(category, categories, options, env, template, outputname):
deprecated.append(module)
elif '/core/' in module_map[module]:
core.append(module)
-
modules.append(module)
modules.sort()
diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2
index f6f38e59101..eccae4cb777 100644
--- a/hacking/templates/rst.j2
+++ b/hacking/templates/rst.j2
@@ -10,6 +10,11 @@
@{ title }@
@{ '+' * title_len }@
+{% if version_added is defined -%}
+.. versionadded:: @{ version_added }@
+{% endif %}
+
+
.. contents::
:local:
:depth: 1
@@ -21,10 +26,6 @@
#
--------------------------------------------#}
-{% if aliases is defined -%}
-Aliases: @{ ','.join(aliases) }@
-{% endif %}
-
{% if deprecated is defined -%}
DEPRECATED
----------
@@ -35,14 +36,24 @@ DEPRECATED
Synopsis
--------
-{% if version_added is defined -%}
-.. versionadded:: @{ version_added }@
-{% endif %}
-
{% for desc in description -%}
@{ desc | convert_symbols_to_format }@
{% endfor %}
+{% if aliases is defined -%}
+Aliases: @{ ','.join(aliases) }@
+{% endif %}
+
+{% if requirements %}
+Requirements
+------------
+
+{% for req in requirements %}
+ * @{ req | convert_symbols_to_format }@
+{% endfor %}
+{% endif %}
+
+
{% if options -%}
Options
-------
@@ -60,7 +71,7 @@ Options
{% for k in option_keys %}
{% set v = options[k] %}
- @{ k }@
+ @{ k }@{% if v['version_added'] %} (added in @{v['version_added']}@){% endif %}
{% if v.get('required', False) %}yes{% else %}no{% endif %}
{% if v['default'] %}@{ v['default'] }@{% endif %}
{% if v.get('type', 'not_bool') == 'bool' %}
@@ -68,46 +79,30 @@ Options
{% else %}
{% for choice in v.get('choices',[]) -%}@{ choice }@ {% endfor -%}
{% endif %}
- {% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%}{% if v['version_added'] %} (added in Ansible @{v['version_added']}@){% endif %}
-
+ {% for desc in v.description -%}@{ desc | html_ify }@
{% endfor -%} {% if 'aliases' in v and v.aliases -%}
+ aliases: @{ v.aliases|join(', ') }@
{%- endif %}
{% endfor %}
-{% endif %}
-
-{% if requirements %}
-{% for req in requirements %}
+
-.. note:: Requires @{ req | convert_symbols_to_format }@
-
-{% endfor %}
{% endif %}
-{% if examples or plainexamples %}
+
+{% if examples or plainexamples -%}
Examples
--------
-.. raw:: html
+ ::
{% for example in examples %}
- {% if example['description'] %}@{ example['description'] | html_ify }@
{% endif %}
-
-
+{% if example['description'] %}@{ example['description'] | indent(4, True) }@{% endif %}
@{ example['code'] | escape | indent(4, True) }@
-
-
{% endfor %}
-
-
-{% if plainexamples %}
-
-::
-
-@{ plainexamples | indent(4, True) }@
-{% endif %}
+{% if plainexamples %}@{ plainexamples | indent(4, True) }@{% endif %}
{% endif %}
-{% if returndocs %}
+{% if returndocs -%}
Return Values
-------------
@@ -164,7 +159,10 @@ Common return values are documented here :doc:`common_return_values`, the follow
{% endif %}
-{% if notes %}
+{% if notes -%}
+Notes
+-----
+
{% for note in notes %}
.. note:: @{ note | convert_symbols_to_format }@
{% endfor %}
@@ -177,31 +175,14 @@ Common return values are documented here :doc:`common_return_values`, the follow
This is a Core Module
---------------------
-The source of this module is hosted on GitHub in the `ansible-modules-core `_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos.
+For more information on what this means please read :doc:`modules_core`
{% else %}
This is an Extras Module
------------------------
-This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests.
-Popular "extras" modules may be promoted to core modules over time.
+For more information on what this means please read :doc:`modules_extra`
{% endif %}
{% endif %}
diff --git a/hacking/test-module b/hacking/test-module
index c226f32e889..bdb91d0d5b0 100755
--- a/hacking/test-module
+++ b/hacking/test-module
@@ -26,7 +26,8 @@
# test-module -m ../library/commands/command -a "/bin/sleep 3"
# test-module -m ../library/system/service -a "name=httpd ensure=restarted"
# test-module -m ../library/system/service -a "name=httpd ensure=restarted" --debugger /usr/bin/pdb
-# test-modulr -m ../library/file/lineinfile -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check
+# test-module -m ../library/file/lineinfile -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check
+# test-module -m ../library/commands/command -a "echo hello" -n -o "test_hello"
import sys
import base64
@@ -34,8 +35,11 @@ import os
import subprocess
import traceback
import optparse
-import ansible.utils as utils
-import ansible.module_common as module_common
+import ansible.utils.vars as utils_vars
+from ansible.parsing import DataLoader
+from ansible.parsing.utils.jsonify import jsonify
+from ansible.parsing.splitter import parse_kv
+import ansible.executor.module_common as module_common
import ansible.constants as C
try:
@@ -59,9 +63,15 @@ def parse():
help="path to python debugger (e.g. /usr/bin/pdb)")
parser.add_option('-I', '--interpreter', dest='interpreter',
help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)",
- metavar='INTERPRETER_TYPE=INTERPRETER_PATH')
+ metavar='INTERPRETER_TYPE=INTERPRETER_PATH',
+ default='python={}'.format(sys.executable))
parser.add_option('-c', '--check', dest='check', action='store_true',
help="run the module in check mode")
+ parser.add_option('-n', '--noexecute', dest='execute', action='store_false',
+ default=True, help="do not run the resulting module")
+ parser.add_option('-o', '--output', dest='filename',
+ help="Filename for resulting module",
+ default="~/.ansible_module_generated")
options, args = parser.parse_args()
if not options.module_path:
parser.print_help()
@@ -74,34 +84,39 @@ def write_argsfile(argstring, json=False):
argspath = os.path.expanduser("~/.ansible_test_module_arguments")
argsfile = open(argspath, 'w')
if json:
- args = utils.parse_kv(argstring)
- argstring = utils.jsonify(args)
+ args = parse_kv(argstring)
+ argstring = jsonify(args)
argsfile.write(argstring)
argsfile.close()
return argspath
-def boilerplate_module(modfile, args, interpreter, check):
+def boilerplate_module(modfile, args, interpreter, check, destfile):
""" simulate what ansible does with new style modules """
#module_fh = open(modfile)
#module_data = module_fh.read()
#module_fh.close()
- replacer = module_common.ModuleReplacer()
+ #replacer = module_common.ModuleReplacer()
+ loader = DataLoader()
#included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
complex_args = {}
if args.startswith("@"):
# Argument is a YAML file (JSON is a subset of YAML)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:]))
+ complex_args = utils_vars.combine_vars(complex_args, loader.load_from_file(args[1:]))
args=''
elif args.startswith("{"):
# Argument is a YAML document (not a file)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args))
+ complex_args = utils_vars.combine_vars(complex_args, loader.load(args))
args=''
- inject = {}
+ if args:
+ parsed_args = parse_kv(args)
+ complex_args = utils_vars.combine_vars(complex_args, parsed_args)
+
+ task_vars = {}
if interpreter:
if '=' not in interpreter:
print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python'
@@ -111,19 +126,18 @@ def boilerplate_module(modfile, args, interpreter, check):
interpreter_type = 'ansible_%s' % interpreter_type
if not interpreter_type.endswith('_interpreter'):
interpreter_type = '%s_interpreter' % interpreter_type
- inject[interpreter_type] = interpreter_path
+ task_vars[interpreter_type] = interpreter_path
if check:
complex_args['CHECKMODE'] = True
- (module_data, module_style, shebang) = replacer.modify_module(
- modfile,
+ (module_data, module_style, shebang) = module_common.modify_module(
+ modfile,
complex_args,
- args,
- inject
+ task_vars=task_vars
)
- modfile2_path = os.path.expanduser("~/.ansible_module_generated")
+ modfile2_path = os.path.expanduser(destfile)
print "* including generated source, if any, saving to: %s" % modfile2_path
print "* this may offset any line numbers in tracebacks/debuggers!"
modfile2 = open(modfile2_path, 'w')
@@ -150,7 +164,7 @@ def runtest( modfile, argspath):
print "RAW OUTPUT"
print out
print err
- results = utils.parse_json(out)
+ results = json.loads(out)
except:
print "***********************************"
print "INVALID OUTPUT FORMAT"
@@ -160,7 +174,7 @@ def runtest( modfile, argspath):
print "***********************************"
print "PARSED OUTPUT"
- print utils.jsonify(results,format=True)
+ print jsonify(results,format=True)
def rundebug(debugger, modfile, argspath):
"""Run interactively with console debugger."""
@@ -173,9 +187,9 @@ def rundebug(debugger, modfile, argspath):
def main():
options, args = parse()
- (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check)
+ (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check, options.filename)
- argspath=None
+ argspath = None
if module_style != 'new':
if module_style == 'non_native_want_json':
argspath = write_argsfile(options.module_args, json=True)
@@ -183,10 +197,11 @@ def main():
argspath = write_argsfile(options.module_args, json=False)
else:
raise Exception("internal error, unexpected module style: %s" % module_style)
- if options.debugger:
- rundebug(options.debugger, modfile, argspath)
- else:
- runtest(modfile, argspath)
+ if options.execute:
+ if options.debugger:
+ rundebug(options.debugger, modfile, argspath)
+ else:
+ runtest(modfile, argspath)
if __name__ == "__main__":
main()
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
index ba5ca83b723..704b6456f74 100644
--- a/lib/ansible/__init__.py
+++ b/lib/ansible/__init__.py
@@ -15,4 +15,4 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
__version__ = '2.0.0'
-__author__ = 'Michael DeHaan'
+__author__ = 'Ansible, Inc.'
diff --git a/v2/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
similarity index 75%
rename from v2/ansible/cli/__init__.py
rename to lib/ansible/cli/__init__.py
index 4a7f5bbacc1..a9599da1a47 100644
--- a/v2/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -31,8 +31,10 @@ import subprocess
from ansible import __version__
from ansible import constants as C
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.utils.unicode import to_bytes
+from ansible.utils.display import Display
+from ansible.utils.path import is_executable
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
@@ -98,7 +100,12 @@ class CLI(object):
raise Exception("Need to implement!")
def run(self):
- raise Exception("Need to implement!")
+
+ if self.options.verbosity > 0:
+ if C.CONFIG_FILE:
+ self.display.display("Using %s as config file" % C.CONFIG_FILE)
+ else:
+ self.display.display("No config file found; using defaults")
@staticmethod
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
@@ -107,21 +114,24 @@ class CLI(object):
vault_pass = None
new_vault_pass = None
- if ask_vault_pass:
- vault_pass = getpass.getpass(prompt="Vault password: ")
-
- if ask_vault_pass and confirm_vault:
- vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
- if vault_pass != vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- if ask_new_vault_pass:
- new_vault_pass = getpass.getpass(prompt="New Vault password: ")
-
- if ask_new_vault_pass and confirm_new:
- new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
- if new_vault_pass != new_vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
+ try:
+ if ask_vault_pass:
+ vault_pass = getpass.getpass(prompt="Vault password: ")
+
+ if ask_vault_pass and confirm_vault:
+ vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
+ if vault_pass != vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+
+ if ask_new_vault_pass:
+ new_vault_pass = getpass.getpass(prompt="New Vault password: ")
+
+ if ask_new_vault_pass and confirm_new:
+ new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
+ if new_vault_pass != new_vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+ except EOFError:
+ pass
# enforce no newline chars at the end of passwords
if vault_pass:
@@ -140,20 +150,23 @@ class CLI(object):
becomepass = None
become_prompt = ''
- if op.ask_pass:
- sshpass = getpass.getpass(prompt="SSH password: ")
- become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
- if sshpass:
- sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
- else:
- become_prompt = "%s password: " % op.become_method.upper()
-
- if op.become_ask_pass:
- becomepass = getpass.getpass(prompt=become_prompt)
- if op.ask_pass and becomepass == '':
- becomepass = sshpass
- if becomepass:
- becomepass = to_bytes(becomepass)
+ try:
+ if op.ask_pass:
+ sshpass = getpass.getpass(prompt="SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
+ if sshpass:
+ sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
+ else:
+ become_prompt = "%s password: " % op.become_method.upper()
+
+ if op.become_ask_pass:
+ becomepass = getpass.getpass(prompt=become_prompt)
+ if op.ask_pass and becomepass == '':
+ becomepass = sshpass
+ if becomepass:
+ becomepass = to_bytes(becomepass)
+ except EOFError:
+ pass
return (sshpass, becomepass)
@@ -170,35 +183,45 @@ class CLI(object):
self.options.become_method = 'sudo'
elif self.options.su:
self.options.become = True
- options.become_method = 'su'
+ self.options.become_method = 'su'
- def validate_conflicts(self):
+ def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
op = self.options
- # Check for vault related conflicts
- if (op.ask_vault_pass and op.vault_password_file):
- self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+ if vault_opts:
+ # Check for vault related conflicts
+ if (op.ask_vault_pass and op.vault_password_file):
+ self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
- # Check for privilege escalation conflicts
- if (op.su or op.su_user or op.ask_su_pass) and \
- (op.sudo or op.sudo_user or op.ask_sudo_pass) or \
- (op.su or op.su_user or op.ask_su_pass) and \
- (op.become or op.become_user or op.become_ask_pass) or \
- (op.sudo or op.sudo_user or op.ask_sudo_pass) and \
- (op.become or op.become_user or op.become_ask_pass):
+ if runas_opts:
+ # Check for privilege escalation conflicts
+ if (op.su or op.su_user or op.ask_su_pass) and \
+ (op.sudo or op.sudo_user or op.ask_sudo_pass) or \
+ (op.su or op.su_user or op.ask_su_pass) and \
+ (op.become or op.become_user or op.become_ask_pass) or \
+ (op.sudo or op.sudo_user or op.ask_sudo_pass) and \
+ (op.become or op.become_user or op.become_ask_pass):
+
+ self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
+ "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
+ "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
+ " are exclusive of each other")
+
+ if fork_opts:
+ if op.forks < 1:
+ self.parser.error("The number of processes (--forks) must be >= 1")
- self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
- "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
- " are exclusive of each other")
+ @staticmethod
+ def expand_tilde(option, opt, value, parser):
+ setattr(parser.values, option.dest, os.path.expanduser(value))
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None):
+ async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None, fork_opts=False):
''' create an options parser for most ansible scripts '''
#FIXME: implemente epilog parsing
@@ -210,28 +233,31 @@ class CLI(object):
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if runtask_opts:
- parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
- help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
- default=C.DEFAULT_HOST_LIST)
+ default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
- help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None)
+ help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None,
+ action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
+ if fork_opts:
+ parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
+ help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
+ parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
+ help='further limit selected hosts to an additional pattern')
+
if vault_opts:
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
- dest='vault_password_file', help="vault password file")
-
+ dest='vault_password_file', help="vault password file", action="callback",
+ callback=CLI.expand_tilde, type=str)
if subset_opts:
- parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
- help='further limit selected hosts to an additional pattern')
parser.add_option('-t', '--tags', dest='tags', default='all',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags',
@@ -272,7 +298,7 @@ class CLI(object):
if connect_opts:
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for connection password')
- parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
+ parser.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
@@ -281,7 +307,6 @@ class CLI(object):
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
-
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
@@ -301,7 +326,7 @@ class CLI(object):
)
if meta_opts:
- parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
+ parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
@@ -315,6 +340,7 @@ class CLI(object):
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
+ result += "\n config file = %s" % C.CONFIG_FILE
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
@@ -411,22 +437,22 @@ class CLI(object):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
- pager_print(text)
+ print(text)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
- pager_print(text)
+ print(text)
else:
CLI.pager_pipe(text, os.environ['PAGER'])
elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
CLI.pager_pipe(text, 'less')
else:
- pager_print(text)
+ print(text)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
- os.environ['LESS'] = LESS_OPTS
+ os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=text)
@@ -445,3 +471,33 @@ class CLI(object):
t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
+
+ @staticmethod
+ def read_vault_password_file(vault_password_file):
+ """
+ Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
+
+ this_path = os.path.realpath(os.path.expanduser(vault_password_file))
+ if not os.path.exists(this_path):
+ raise AnsibleError("The vault password file %s was not found" % this_path)
+
+ if is_executable(this_path):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
+ stdout, stderr = p.communicate()
+ vault_pass = stdout.strip('\r\n')
+ else:
+ try:
+ f = open(this_path, "rb")
+ vault_pass=f.read().strip()
+ f.close()
+ except (OSError, IOError) as e:
+ raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
+
+ return vault_pass
+
diff --git a/v2/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
similarity index 81%
rename from v2/ansible/cli/adhoc.py
rename to lib/ansible/cli/adhoc.py
index 16c2dc9e421..a92869fafe7 100644
--- a/v2/ansible/cli/adhoc.py
+++ b/lib/ansible/cli/adhoc.py
@@ -17,15 +17,14 @@
########################################################
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.cli import CLI
+from ansible.errors import AnsibleOptionsError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
from ansible.parsing import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
-from ansible.cli import CLI
-from ansible.utils.display import Display
-from ansible.utils.vault import read_vault_file
+from ansible.utils.vars import load_extra_vars
from ansible.vars import VariableManager
########################################################
@@ -45,6 +44,7 @@ class AdHocCLI(CLI):
check_opts=True,
runtask_opts=True,
vault_opts=True,
+ fork_opts=True,
)
# options unique to ansible ad-hoc
@@ -60,14 +60,24 @@ class AdHocCLI(CLI):
raise AnsibleOptionsError("Missing target hosts")
self.display.verbosity = self.options.verbosity
- self.validate_conflicts()
+ self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
return True
+ def _play_ds(self, pattern):
+ return dict(
+ name = "Ansible Ad-Hoc",
+ hosts = pattern,
+ gather_facts = 'no',
+ tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args))), ]
+ )
def run(self):
''' use Runner lib to do SSH things '''
+ super(AdHocCLI, self).run()
+
+
# only thing left should be host pattern
pattern = self.args[0]
@@ -85,26 +95,32 @@ class AdHocCLI(CLI):
if self.options.vault_password_file:
# read vault_pass from a file
- vault_pass = read_vault_file(self.options.vault_password_file)
+ vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
loader = DataLoader(vault_password=vault_pass)
variable_manager = VariableManager()
+ variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
+ variable_manager.set_inventory(inventory)
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0:
self.display.warning("provided hosts list is empty, only localhost is available")
if self.options.listhosts:
+ self.display.display(' hosts (%d):' % len(hosts))
for host in hosts:
self.display.display(' %s' % host)
return 0
if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args:
- raise AnsibleError("No argument passed to %s module" % self.options.module_name)
+ err = "No argument passed to %s module" % self.options.module_name
+ if pattern.endswith(".yml"):
+ err = err + ' (did you mean to run ansible-playbook?)'
+ raise AnsibleOptionsError(err)
#TODO: implement async support
#if self.options.seconds:
@@ -115,31 +131,30 @@ class AdHocCLI(CLI):
# results = runner.run()
# create a pseudo-play to execute the specified module via a single task
- play_ds = dict(
- name = "Ansible Ad-Hoc",
- hosts = pattern,
- gather_facts = 'no',
- tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args))), ]
- )
-
+ play_ds = self._play_ds(pattern)
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
+ if self.options.one_line:
+ cb = 'oneline'
+ else:
+ cb = 'minimal'
+
# now create a task queue manager to execute the play
- tqm = None
+ self._tqm = None
try:
- tqm = TaskQueueManager(
+ self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
display=self.display,
options=self.options,
passwords=passwords,
- stdout_callback='minimal',
+ stdout_callback=cb,
)
- result = tqm.run(play)
+ result = self._tqm.run(play)
finally:
- if tqm:
- tqm.cleanup()
+ if self._tqm:
+ self._tqm.cleanup()
return result
diff --git a/v2/ansible/cli/doc.py b/lib/ansible/cli/doc.py
similarity index 78%
rename from v2/ansible/cli/doc.py
rename to lib/ansible/cli/doc.py
index 797a59f0381..910255cda77 100644
--- a/v2/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -61,6 +61,8 @@ class DocCLI(CLI):
def run(self):
+ super(DocCLI, self).run()
+
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
@@ -81,43 +83,47 @@ class DocCLI(CLI):
text = ''
for module in self.args:
- filename = module_loader.find_plugin(module)
- if filename is None:
- self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
- continue
+ try:
+ filename = module_loader.find_plugin(module)
+ if filename is None:
+ self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
+ continue
- if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
- continue
+ if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
+ continue
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- except:
- self.display.vvv(traceback.print_exc())
- self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
- continue
+ try:
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ except:
+ self.display.vvv(traceback.print_exc())
+ self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
+ continue
- if doc is not None:
+ if doc is not None:
- all_keys = []
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
- all_keys = sorted(all_keys)
- doc['option_keys'] = all_keys
+ all_keys = []
+ for (k,v) in doc['options'].iteritems():
+ all_keys.append(k)
+ all_keys = sorted(all_keys)
+ doc['option_keys'] = all_keys
- doc['filename'] = filename
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['plainexamples'] = plainexamples
- doc['returndocs'] = returndocs
+ doc['filename'] = filename
+ doc['docuri'] = doc['module'].replace('_', '-')
+ doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
+ doc['plainexamples'] = plainexamples
+ doc['returndocs'] = returndocs
- if self.options.show_snippet:
- text += DocCLI.get_snippet_text(doc)
+ if self.options.show_snippet:
+ text += DocCLI.get_snippet_text(doc)
+ else:
+ text += DocCLI.get_man_text(doc)
else:
- text += DocCLI.get_man_text(doc)
- else:
- # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
- # probably a quoting issue.
- self.display.warning("module %s missing documentation (or could not parse documentation)\n" % module)
+ # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
+ # probably a quoting issue.
+ raise AnsibleError("Parsing produced an empty object.")
+ except Exception, e:
+ self.display.vvv(traceback.print_exc())
+ raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
CLI.pager(text)
return 0
@@ -280,4 +286,20 @@ class DocCLI(CLI):
text.append(doc['returndocs'])
text.append('')
+ maintainers = set()
+ if 'author' in doc:
+ if isinstance(doc['author'], basestring):
+ maintainers.add(doc['author'])
+ else:
+ maintainers.update(doc['author'])
+
+ if 'maintainers' in doc:
+ if isinstance(doc['maintainers'], basestring):
+ maintainers.add(doc['author'])
+ else:
+ maintainers.update(doc['author'])
+
+ text.append('MAINTAINERS: ' + ', '.join(maintainers))
+ text.append('')
+
return "\n".join(text)
diff --git a/v2/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
similarity index 78%
rename from v2/ansible/cli/galaxy.py
rename to lib/ansible/cli/galaxy.py
index abe85e0af8e..ddc10794c09 100644
--- a/v2/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -124,6 +124,8 @@ class GalaxyCLI(CLI):
def run(self):
+ super(GalaxyCLI, self).run()
+
# if not offline, get connect to galaxy api
if self.action in ("info","install") or (self.action == 'init' and not self.options.offline):
api_server = self.options.api_server
@@ -152,8 +154,7 @@ class GalaxyCLI(CLI):
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
- self.display.error('- you can use --ignore-errors to skip failed roles and finish processing the list.')
- return rc
+ raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def execute_init(self):
"""
@@ -318,53 +319,59 @@ class GalaxyCLI(CLI):
roles_done = []
roles_left = []
- role_name = self.args.pop(0).strip()
-
- gr = GalaxyRole(self.galaxy, role_name)
if role_file:
- f = open(role_file, 'r')
- if role_file.endswith('.yaml') or role_file.endswith('.yml'):
- roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
- else:
- # roles listed in a file, one per line
- for rname in f.readlines():
- roles_left.append(GalaxyRole(self.galaxy, rname))
- f.close()
+ self.display.debug('Getting roles from %s' % role_file)
+ try:
+ f = open(role_file, 'r')
+ if role_file.endswith('.yaml') or role_file.endswith('.yml'):
+ try:
+ rolesparsed = map(self.parse_requirements_files, yaml.safe_load(f))
+ except Exception as e:
+ raise AnsibleError("%s does not seem like a valid yaml file: %s" % (role_file, str(e)))
+ roles_left = [GalaxyRole(self.galaxy, **r) for r in rolesparsed]
+ else:
+ # roles listed in a file, one per line
+ self.display.deprecated("Non yaml files for role requirements")
+ for rname in f.readlines():
+ if rname.startswith("#") or rname.strip() == '':
+ continue
+ roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
+ f.close()
+ except (IOError,OSError) as e:
+ raise AnsibleError("Unable to read requirements file (%s): %s" % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
- roles_left.append(GalaxyRole(self.galaxy, rname))
+ roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
while len(roles_left) > 0:
# query the galaxy API for the role data
role_data = None
role = roles_left.pop(0)
- role_src = role.src
- role_scm = role.scm
role_path = role.path
+ self.display.debug('Installing role %s' % role_path)
+
if role_path:
self.options.roles_path = role_path
else:
self.options.roles_path = roles_path
tmp_file = None
- if role_src and os.path.isfile(role_src):
+ installed = False
+ if role.src and os.path.isfile(role.src):
# installing a local tar.gz
- tmp_file = role_src
+ tmp_file = role.src
else:
- if role_scm:
+ if role.scm:
# create tar file from scm url
- tmp_file = scm_archive_role(role_scm, role_src, role.version, role.name)
- if role_src:
- if '://' in role_src:
- # just download a URL - version will probably be in the URL
- tmp_file = gr.fetch()
- else:
- role_data = self.api.lookup_role_by_name(role_src)
+ tmp_file = scm_archive_role(role.scm, role.src, role.version, role.name)
+ if role.src:
+ if '://' not in role.src:
+ role_data = self.api.lookup_role_by_name(role.src)
if not role_data:
- self.display.warning("- sorry, %s was not found on %s." % (role_src, self.options.api_server))
+ self.display.warning("- sorry, %s was not found on %s." % (role.src, self.options.api_server))
self.exit_without_ignore()
continue
@@ -377,47 +384,45 @@ class GalaxyCLI(CLI):
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
- role["version"] = str(loose_versions[-1])
+ role.version = str(loose_versions[-1])
else:
- role["version"] = 'master'
- elif role['version'] != 'master':
+ role.version = 'master'
+ elif role.version != 'master':
if role_versions and role.version not in [a.get('name', None) for a in role_versions]:
self.display.warning('role is %s' % role)
self.display.warning("- the specified version (%s) was not found in the list of available versions (%s)." % (role.version, role_versions))
self.exit_without_ignore()
continue
- # download the role. if --no-deps was specified, we stop here,
- # otherwise we recursively grab roles and all of their deps.
- tmp_file = gr.fetch(role_data)
- installed = False
+ # download the role. if --no-deps was specified, we stop here,
+ # otherwise we recursively grab roles and all of their deps.
+ tmp_file = role.fetch(role_data)
if tmp_file:
- installed = install_role(role.name, role.version, tmp_file, options)
+ installed = role.install(tmp_file)
# we're done with the temp file, clean it up
- if tmp_file != role_src:
+ if tmp_file != role.src:
os.unlink(tmp_file)
# install dependencies, if we want them
-
- # this should use new roledepenencies code
- #if not no_deps and installed:
- # if not role_data:
- # role_data = gr.get_metadata(role.get("name"), options)
- # role_dependencies = role_data['dependencies']
- # else:
- # role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
- # for dep in role_dependencies:
- # if isinstance(dep, basestring):
- # dep = ansible.utils.role_spec_parse(dep)
- # else:
- # dep = ansible.utils.role_yaml_parse(dep)
- # if not get_role_metadata(dep["name"], options):
- # if dep not in roles_left:
- # print '- adding dependency: %s' % dep["name"]
- # roles_left.append(dep)
- # else:
- # print '- dependency %s already pending installation.' % dep["name"]
- # else:
- # print '- dependency %s is already installed, skipping.' % dep["name"]
+ if not no_deps and installed:
+ if not role_data:
+ role_data = gr.get_metadata(role.get("name"), options)
+ role_dependencies = role_data['dependencies']
+ else:
+ role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
+ for dep in role_dependencies:
+ self.display.debug('Installing dep %s' % dep)
+ if isinstance(dep, basestring):
+ dep = ansible.utils.role_spec_parse(dep)
+ else:
+ dep = ansible.utils.role_yaml_parse(dep)
+ if not get_role_metadata(dep["name"], options):
+ if dep not in roles_left:
+ self.display.display('- adding dependency: %s' % dep["name"])
+ roles_left.append(dep)
+ else:
+ self.display.display('- dependency %s already pending installation.' % dep["name"])
+ else:
+ self.display.display('- dependency %s is already installed, skipping.' % dep["name"])
if not tmp_file or not installed:
self.display.warning("- %s was NOT installed successfully." % role.name)
@@ -458,7 +463,8 @@ class GalaxyCLI(CLI):
if len(self.args) == 1:
# show only the request role, if it exists
- gr = GalaxyRole(self.galaxy, self.name)
+ name = self.args.pop()
+ gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
@@ -467,9 +473,9 @@ class GalaxyCLI(CLI):
if not version:
version = "(unknown version)"
# show some more info about single roles here
- self.display.display("- %s, %s" % (self.name, version))
+ self.display.display("- %s, %s" % (name, version))
else:
- self.display.display("- the role %s was not found" % self.name)
+ self.display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
@@ -480,6 +486,7 @@ class GalaxyCLI(CLI):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
path_files = os.listdir(roles_path)
for path_file in path_files:
+ gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.metadata
version = None
@@ -489,3 +496,36 @@ class GalaxyCLI(CLI):
version = "(unknown version)"
self.display.display("- %s, %s" % (path_file, version))
return 0
+
+ def parse_requirements_files(self, role):
+ if 'role' in role:
+ # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
+ role_info = role_spec_parse(role['role'])
+ if isinstance(role_info, dict):
+ # Warning: Slight change in behaviour here. name may be being
+ # overloaded. Previously, name was only a parameter to the role.
+ # Now it is both a parameter to the role and the name that
+ # ansible-galaxy will install under on the local system.
+ if 'name' in role and 'name' in role_info:
+ del role_info['name']
+ role.update(role_info)
+ else:
+ # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
+ role["src"] = "git+" + role["src"]
+
+ if '+' in role["src"]:
+ (scm, src) = role["src"].split('+')
+ role["scm"] = scm
+ role["src"] = src
+
+ if 'name' not in role:
+ role["name"] = GalaxyRole.url_to_spec(role["src"])
+
+ if 'version' not in role:
+ role['version'] = ''
+
+ if 'scm' not in role:
+ role['scm'] = None
+
+ return role
diff --git a/v2/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
similarity index 72%
rename from v2/ansible/cli/playbook.py
rename to lib/ansible/cli/playbook.py
index eb60bacbd22..474c0b6b9ad 100644
--- a/v2/ansible/cli/playbook.py
+++ b/lib/ansible/cli/playbook.py
@@ -24,7 +24,7 @@ import sys
from ansible import constants as C
from ansible.cli import CLI
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory import Inventory
from ansible.parsing import DataLoader
@@ -33,8 +33,7 @@ from ansible.playbook import Playbook
from ansible.playbook.task import Task
from ansible.utils.display import Display
from ansible.utils.unicode import to_unicode
-from ansible.utils.vars import combine_vars
-from ansible.utils.vault import read_vault_file
+from ansible.utils.vars import load_extra_vars
from ansible.vars import VariableManager
#---------------------------------------------------------------------------------------------------
@@ -55,30 +54,34 @@ class PlaybookCLI(CLI):
diff_opts=True,
runtask_opts=True,
vault_opts=True,
+ fork_opts=True,
)
# ansible playbook specific opts
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
+ parser.add_option('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
- parser.add_option('--start-at-task', dest='start_at',
+ parser.add_option('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
- parser.add_option('--list-tags', dest='listtags', action='store_true',
- help="list all available tags")
self.options, self.args = parser.parse_args()
- if len(self.args) == 0:
- raise AnsibleOptionsError("You must specify a playbook file to run")
self.parser = parser
+ if len(self.args) == 0:
+ raise AnsibleOptionsError("You must specify a playbook file to run")
+
self.display.verbosity = self.options.verbosity
- self.validate_conflicts()
+ self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
def run(self):
+ super(PlaybookCLI, self).run()
+
# Note: slightly wrong, this is written so that implicit localhost
# Manage passwords
sshpass = None
@@ -87,39 +90,19 @@ class PlaybookCLI(CLI):
passwords = {}
# don't deal with privilege escalation or passwords when we don't need to
- if not self.options.listhosts and not self.options.listtasks and not self.options.listtags:
+ if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax:
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
if self.options.vault_password_file:
# read vault_pass from a file
- vault_pass = read_vault_file(self.options.vault_password_file)
+ vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
loader = DataLoader(vault_password=vault_pass)
- extra_vars = {}
- for extra_vars_opt in self.options.extra_vars:
- extra_vars_opt = to_unicode(extra_vars_opt, errors='strict')
- if extra_vars_opt.startswith(u"@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- data = loader.load_from_file(extra_vars_opt[1:])
- elif extra_vars_opt and extra_vars_opt[0] in u'[{':
- # Arguments as YAML
- data = loader.load(extra_vars_opt)
- else:
- # Arguments as Key-value
- data = parse_kv(extra_vars_opt)
- extra_vars = combine_vars(extra_vars, data)
-
- # FIXME: this should be moved inside the playbook executor code
- only_tags = self.options.tags.split(",")
- skip_tags = self.options.skip_tags
- if self.options.skip_tags is not None:
- skip_tags = self.options.skip_tags.split(",")
-
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
for playbook in self.args:
@@ -131,7 +114,7 @@ class PlaybookCLI(CLI):
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager()
- variable_manager.set_extra_vars(extra_vars)
+ variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
# create the inventory, and filter it based on the subset specified (if any)
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
@@ -161,20 +144,45 @@ class PlaybookCLI(CLI):
if isinstance(results, list):
for p in results:
- self.display.display('\nplaybook: %s\n' % p['playbook'])
+ self.display.display('\nplaybook: %s' % p['playbook'])
+ i = 1
for play in p['plays']:
+ if play.name:
+ playname = play.name
+ else:
+ playname = '#' + str(i)
+
+ msg = "\n PLAY: %s" % (playname)
+ mytags = set()
+ if self.options.listtags and play.tags:
+ mytags = mytags.union(set(play.tags))
+ msg += ' TAGS: [%s]' % (','.join(mytags))
+
if self.options.listhosts:
- self.display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts'])))
- for host in play['hosts']:
- self.display.display(" %s" % host)
- if self.options.listtasks: #TODO: do we want to display block info?
- self.display.display("\n %s" % (play['name']))
- for task in play['tasks']:
- self.display.display(" %s" % task)
- if self.options.listtags: #TODO: fix once we figure out block handling above
- self.display.display("\n %s: tags count=%d" % (play['name'], len(play['tags'])))
- for tag in play['tags']:
- self.display.display(" %s" % tag)
+ playhosts = set(inventory.get_hosts(play.hosts))
+ msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
+ for host in playhosts:
+ msg += "\n %s" % host
+
+ self.display.display(msg)
+
+ if self.options.listtags or self.options.listtasks:
+ taskmsg = ' tasks:'
+
+ for block in play.compile():
+ if not block.has_tasks():
+ continue
+
+ j = 1
+ for task in block.block:
+ taskmsg += "\n %s" % task
+ if self.options.listtags and task.tags:
+ taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags)))
+ j = j + 1
+
+ self.display.display(taskmsg)
+
+ i = i + 1
return 0
else:
return results
diff --git a/v2/ansible/cli/pull.py b/lib/ansible/cli/pull.py
similarity index 86%
rename from v2/ansible/cli/pull.py
rename to lib/ansible/cli/pull.py
index 6b087d4ec06..569d5299a67 100644
--- a/v2/ansible/cli/pull.py
+++ b/lib/ansible/cli/pull.py
@@ -21,12 +21,14 @@ import os
import random
import shutil
import socket
+import sys
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.cli import CLI
+from ansible.plugins import module_loader
from ansible.utils.display import Display
-from ansible.utils.vault import read_vault_file
+from ansible.utils.cmd_functions import run_cmd
########################################################
@@ -48,6 +50,7 @@ class PullCLI(CLI):
usage='%prog [options]',
connect_opts=True,
vault_opts=True,
+ runtask_opts=True,
)
# options unique to pull
@@ -66,7 +69,9 @@ class PullCLI(CLI):
help='adds the hostkey for the repo url if not already added')
self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE)
-
+ self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true',
+ help='verify GPG signature of checked out commit, if it fails abort running the playbook.'
+ ' This needs the corresponding VCS module to support such an operation')
self.options, self.args = self.parser.parse_args()
@@ -87,11 +92,13 @@ class PullCLI(CLI):
raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES)))
self.display.verbosity = self.options.verbosity
- self.validate_conflicts()
+ self.validate_conflicts(vault_opts=True)
def run(self):
''' use Runner lib to do SSH things '''
+ super(PullCLI, self).run()
+
# log command line
now = datetime.datetime.now()
self.display.display(now.strftime("Starting Ansible Pull at %F %T"))
@@ -100,7 +107,7 @@ class PullCLI(CLI):
# Build Checkout command
# Now construct the ansible command
limit_opts = 'localhost:%s:127.0.0.1' % socket.getfqdn()
- base_opts = '-c local --limit "%s"' % limit_opts
+ base_opts = '-c local "%s"' % limit_opts
if self.options.verbosity > 0:
base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ])
@@ -120,15 +127,18 @@ class PullCLI(CLI):
if self.options.accept_host_key:
repo_opts += ' accept_hostkey=yes'
- if self.options.key_file:
- repo_opts += ' key_file=%s' % options.key_file
+ if self.options.private_key_file:
+ repo_opts += ' key_file=%s' % self.options.private_key_file
+
+ if self.options.verify:
+ repo_opts += ' verify_commit=yes'
- path = utils.plugins.module_finder.find_plugin(options.module_name)
+ path = module_loader.find_plugin(self.options.module_name)
if path is None:
- raise AnsibleOptionsError(("module '%s' not found.\n" % options.module_name))
+ raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name))
- bin_path = os.path.dirname(os.path.abspath(__file__))
- cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
+ bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
+ cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % (
bin_path, inv_opts, base_opts, self.options.module_name, repo_opts
)
@@ -141,7 +151,7 @@ class PullCLI(CLI):
time.sleep(self.options.sleep);
# RUN the Checkout command
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+ rc, out, err = run_cmd(cmd, live=True)
if rc != 0:
if self.options.force:
@@ -173,14 +183,14 @@ class PullCLI(CLI):
os.chdir(self.options.dest)
# RUN THE PLAYBOOK COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+ rc, out, err = run_cmd(cmd, live=True)
if self.options.purge:
os.chdir('/')
try:
- shutil.rmtree(options.dest)
+ shutil.rmtree(self.options.dest)
except Exception, e:
- print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
+ self.display.error("Failed to remove %s: %s" % (self.options.dest, str(e)))
return rc
diff --git a/v2/ansible/cli/vault.py b/lib/ansible/cli/vault.py
similarity index 91%
rename from v2/ansible/cli/vault.py
rename to lib/ansible/cli/vault.py
index 6231f74332a..8cf1bf73885 100644
--- a/v2/ansible/cli/vault.py
+++ b/lib/ansible/cli/vault.py
@@ -58,7 +58,7 @@ class VaultCLI(CLI):
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
- elif action == "rekey":
+ elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
self.options, self.args = self.parser.parse_args()
@@ -69,12 +69,17 @@ class VaultCLI(CLI):
def run(self):
+ super(VaultCLI, self).run()
+
if self.options.vault_password_file:
# read vault_pass from a file
- self.vault_pass = read_vault_file(self.options.vault_password_file)
- elif self.options.ask_vault_pass:
+ self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
+ else:
self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
+ if not self.vault_pass:
+ raise AnsibleOptionsError("A password is required to use Ansible's Vault")
+
self.execute()
def execute_create(self):
@@ -114,6 +119,9 @@ class VaultCLI(CLI):
self.display.display("Encryption successful")
def execute_rekey(self):
+ for f in self.args:
+ if not (os.path.isfile(f)):
+ raise AnsibleError(f + " does not exist")
__, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
for f in self.args:
diff --git a/v2/ansible/compat/__init__.py b/lib/ansible/compat/__init__.py
similarity index 100%
rename from v2/ansible/compat/__init__.py
rename to lib/ansible/compat/__init__.py
diff --git a/v2/ansible/compat/tests/__init__.py b/lib/ansible/compat/tests/__init__.py
similarity index 100%
rename from v2/ansible/compat/tests/__init__.py
rename to lib/ansible/compat/tests/__init__.py
diff --git a/v2/ansible/compat/tests/mock.py b/lib/ansible/compat/tests/mock.py
similarity index 100%
rename from v2/ansible/compat/tests/mock.py
rename to lib/ansible/compat/tests/mock.py
diff --git a/v2/ansible/compat/tests/unittest.py b/lib/ansible/compat/tests/unittest.py
similarity index 100%
rename from v2/ansible/compat/tests/unittest.py
rename to lib/ansible/compat/tests/unittest.py
diff --git a/v2/ansible/config/__init__.py b/lib/ansible/config/__init__.py
similarity index 100%
rename from v2/ansible/config/__init__.py
rename to lib/ansible/config/__init__.py
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 2cdc08d8ce8..376952821e8 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -15,12 +15,21 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# Make coding more python3-ish
+from __future__ import (absolute_import, division)
+__metaclass__ = type
+
import os
import pwd
import sys
-import ConfigParser
from string import ascii_letters, digits
+from six import string_types
+from six.moves import configparser
+
+from ansible.parsing.splitter import unquote
+from ansible.errors import AnsibleOptionsError
+
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
@@ -35,13 +44,17 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
- return mk_boolean(value)
- if value and integer:
- return int(value)
- if value and floating:
- return float(value)
- if value and islist:
- return [x.strip() for x in value.split(',')]
+ value = mk_boolean(value)
+ if value:
+ if integer:
+ value = int(value)
+ elif floating:
+ value = float(value)
+ elif islist:
+ if isinstance(value, string_types):
+ value = [x.strip() for x in value.split(',')]
+ elif isinstance(value, string_types):
+ value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
@@ -60,11 +73,13 @@ def _get_config(p, section, key, env_var, default):
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
- p = ConfigParser.ConfigParser()
+ p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
+ if os.path.isdir(path0):
+ path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
@@ -73,11 +88,10 @@ def load_config_file():
if path is not None and os.path.exists(path):
try:
p.read(path)
- except ConfigParser.Error as e:
- print "Error reading config file: \n%s" % e
- sys.exit(1)
- return p
- return None
+ except configparser.Error as e:
+ raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
+ return p, path
+ return None, ''
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
@@ -86,7 +100,7 @@ def shell_expand_path(path):
path = os.path.expanduser(os.path.expandvars(path))
return path
-p = load_config_file()
+p, CONFIG_FILE = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
@@ -97,8 +111,9 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# sections in config file
DEFAULTS='defaults'
-# configurable things
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
+# generally configurable things
+DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
+DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
@@ -112,46 +127,51 @@ DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
-DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None))
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
+DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
-DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
-DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
-DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
+DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
-DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su')
-DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
-DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '')
-DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
-DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
+DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
# selinux
-DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True)
+DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
+
+### PRIVILEGE ESCALATION ###
+# Backwards Compat
+DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
+DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
+DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su')
+DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '')
+DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
+DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
+DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
+DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
+DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
+DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
-#TODO: get rid of ternary chain mess
+# Become
+BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
-BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
-DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
+DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
+DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
+DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
-# need to rethink impementing these 2
-DEFAULT_BECOME_EXE = None
-#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo')
-#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H')
+# Plugin paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
@@ -159,12 +179,15 @@ DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', '
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
+DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins')
+DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
+# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
@@ -176,9 +199,7 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
-DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
-
-
+DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
@@ -186,7 +207,9 @@ RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path'
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
+ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
+
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
@@ -199,10 +222,17 @@ ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_fi
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
+# galaxy related
+DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
+# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
+GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
+
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
+MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
+MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
@@ -210,3 +240,4 @@ DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
+MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024
diff --git a/v2/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
similarity index 100%
rename from v2/ansible/errors/__init__.py
rename to lib/ansible/errors/__init__.py
diff --git a/v2/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py
similarity index 100%
rename from v2/ansible/errors/yaml_strings.py
rename to lib/ansible/errors/yaml_strings.py
diff --git a/v2/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py
similarity index 100%
rename from v2/ansible/executor/__init__.py
rename to lib/ansible/executor/__init__.py
diff --git a/v2/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
similarity index 88%
rename from v2/ansible/executor/module_common.py
rename to lib/ansible/executor/module_common.py
index 535fbd45e33..09fdaa46d85 100644
--- a/v2/ansible/executor/module_common.py
+++ b/lib/ansible/executor/module_common.py
@@ -31,11 +31,13 @@ from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.utils.jsonify import jsonify
+from ansible.utils.unicode import to_bytes
REPLACER = "#<>"
REPLACER_ARGS = "\"<>\""
REPLACER_COMPLEX = "\"<>\""
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
+REPLACER_WINARGS = "<>"
REPLACER_VERSION = "\"<>\""
# We could end up writing out parameters with unicode characters so we need to
@@ -64,6 +66,8 @@ def _find_snippet_imports(module_data, module_path, strip_comments):
module_style = 'old'
if REPLACER in module_data:
module_style = 'new'
+ elif REPLACER_WINDOWS in module_data:
+ module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
@@ -113,7 +117,7 @@ def _find_snippet_imports(module_data, module_path, strip_comments):
# ******************************************************************************
-def modify_module(module_path, module_args, strip_comments=False):
+def modify_module(module_path, module_args, task_vars=dict(), strip_comments=False):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
@@ -158,22 +162,19 @@ def modify_module(module_path, module_args, strip_comments=False):
(module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
- #module_args_json = jsonify(module_args)
module_args_json = json.dumps(module_args)
encoded_args = repr(module_args_json.encode('utf-8'))
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
+ module_data = module_data.replace(REPLACER_WINARGS, module_args_json.encode('utf-8'))
- # FIXME: we're not passing around an inject dictionary anymore, so
- # this needs to be fixed with whatever method we use for vars
- # like this moving forward
- #if module_style == 'new':
- # facility = C.DEFAULT_SYSLOG_FACILITY
- # if 'ansible_syslog_facility' in inject:
- # facility = inject['ansible_syslog_facility']
- # module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
+ if module_style == 'new':
+ facility = C.DEFAULT_SYSLOG_FACILITY
+ if 'ansible_syslog_facility' in task_vars:
+ facility = task_vars['ansible_syslog_facility']
+ module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split(b"\n", 1)
shebang = None
@@ -183,11 +184,9 @@ def modify_module(module_path, module_args, strip_comments=False):
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
- # FIXME: more inject stuff here...
- #from ansible.utils.unicode import to_bytes
- #if interpreter_config in inject:
- # interpreter = to_bytes(inject[interpreter_config], errors='strict')
- # lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
+ if interpreter_config in task_vars:
+ interpreter = to_bytes(task_vars[interpreter_config], errors='strict')
+ lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
lines.insert(1, ENCODING_STRING)
else:
diff --git a/v2/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
similarity index 86%
rename from v2/ansible/executor/play_iterator.py
rename to lib/ansible/executor/play_iterator.py
index dc4d4c7d5d2..45089d19d20 100644
--- a/v2/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -19,6 +19,10 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import fnmatch
+
+from ansible import constants as C
+
from ansible.errors import *
from ansible.playbook.block import Block
from ansible.playbook.task import Task
@@ -87,18 +91,32 @@ class PlayIterator:
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
- def __init__(self, inventory, play, connection_info, all_vars):
+ def __init__(self, inventory, play, play_context, all_vars):
self._play = play
self._blocks = []
for block in self._play.compile():
- new_block = block.filter_tagged_tasks(connection_info, all_vars)
+ new_block = block.filter_tagged_tasks(play_context, all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self._host_states = {}
for host in inventory.get_hosts(self._play.hosts):
self._host_states[host.name] = HostState(blocks=self._blocks)
+ # if we're looking to start at a specific task, iterate through
+ # the tasks for this host until we find the specified task
+ if play_context.start_at_task is not None:
+ while True:
+ (s, task) = self.get_next_task_for_host(host, peek=True)
+ if s.run_state == self.ITERATING_COMPLETE:
+ break
+ if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task):
+ break
+ else:
+ self.get_next_task_for_host(host)
+
+ # Extend the play handlers list to include the handlers defined in roles
+ self._play.handlers.extend(play.compile_roles_handlers())
def get_host_state(self, host):
try:
@@ -116,7 +134,18 @@ class PlayIterator:
elif s.run_state == self.ITERATING_SETUP:
s.run_state = self.ITERATING_TASKS
s.pending_setup = True
- if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
+
+ # Gather facts if the default is 'smart' and we have not yet
+ # done it for this host; or if 'explicit' and the play sets
+ # gather_facts to True; or if 'implicit' and the play does
+ # NOT explicitly set gather_facts to False.
+
+ gathering = C.DEFAULT_GATHERING
+ implied = self._play.gather_facts is None or boolean(self._play.gather_facts)
+
+ if (gathering == 'implicit' and implied) or \
+ (gathering == 'explicit' and boolean(self._play.gather_facts)) or \
+ (gathering == 'smart' and implied and not host._gathered_facts):
if not peek:
# mark the host as having gathered facts
host.set_gathered_facts(True)
@@ -278,6 +307,11 @@ class PlayIterator:
if res:
return res
+ for block in self._play.handlers:
+ res = _search_block(block, task)
+ if res:
+ return res
+
return None
def add_tasks(self, host, task_list):
diff --git a/v2/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
similarity index 63%
rename from v2/ansible/executor/playbook_executor.py
rename to lib/ansible/executor/playbook_executor.py
index 2d5958697b3..686da5c3219 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/lib/ansible/executor/playbook_executor.py
@@ -19,16 +19,22 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import getpass
+import locale
import signal
+import sys
from ansible import constants as C
from ansible.errors import *
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
+from ansible.plugins import module_loader
from ansible.template import Templar
from ansible.utils.color import colorize, hostcolor
from ansible.utils.debug import debug
+from ansible.utils.encrypt import do_encrypt
+from ansible.utils.unicode import to_unicode
class PlaybookExecutor:
@@ -46,7 +52,13 @@ class PlaybookExecutor:
self._options = options
self.passwords = passwords
- if options.listhosts or options.listtasks or options.listtags:
+ # make sure the module path (if specified) is parsed and
+ # added to the module_loader object
+ if options.module_path is not None:
+ for path in options.module_path.split(os.pathsep):
+ module_loader.add_directory(path)
+
+ if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords)
@@ -66,6 +78,7 @@ class PlaybookExecutor:
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
+ self._inventory.set_playbook_basedir(os.path.dirname(playbook_path))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
@@ -76,56 +89,61 @@ class PlaybookExecutor:
self._display.vv('%d plays in %s' % (len(plays), playbook_path))
for play in plays:
+ # clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
+ if play.vars_prompt:
+ for var in play.vars_prompt:
+ if 'name' not in var:
+ raise AnsibleError("'vars_prompt' item is missing 'name:'", obj=play._ds)
+
+ vname = var['name']
+ prompt = var.get("prompt", vname)
+ default = var.get("default", None)
+ private = var.get("private", True)
+
+ confirm = var.get("confirm", False)
+ encrypt = var.get("encrypt", None)
+ salt_size = var.get("salt_size", None)
+ salt = var.get("salt", None)
+
+ if vname not in play.vars:
+ self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
+ play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
+
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
- templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
+ templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
+ if self._options.syntax:
+ continue
+
if self._tqm is None:
# we are just doing a listing
-
- pname = new_play.get_name().strip()
- if pname == 'PLAY: ':
- pname = 'PLAY: #%d' % i
- p = { 'name': pname }
-
- if self._options.listhosts:
- p['pattern']=play.hosts
- p['hosts']=set(self._inventory.get_hosts(new_play.hosts))
-
- #TODO: play tasks are really blocks, need to figure out how to get task objects from them
- elif self._options.listtasks:
- p['tasks'] = []
- for task in play.get_tasks():
- p['tasks'].append(task)
- #p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags})
-
- elif self._options.listtags:
- p['tags'] = set(new_play.tags)
- for task in play.get_tasks():
- p['tags'].update(task)
- #p['tags'].update(task.tags)
- entry['plays'].append(p)
+ entry['plays'].append(new_play)
else:
+ # make sure the tqm has callbacks loaded
+ self._tqm.load_callbacks()
+
# we are actually running plays
for batch in self._get_serialized_batches(new_play):
if len(batch) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
- result = 0
break
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
+ # if the last result wasn't zero, break out of the serial batch loop
if result != 0:
break
+ # if the last result wasn't zero, break out of the play loop
if result != 0:
break
@@ -134,6 +152,10 @@ class PlaybookExecutor:
if entry:
entrylist.append(entry) # per playbook
+ # if the last result wasn't zero, break out of the playbook file name loop
+ if result != 0:
+ break
+
if entrylist:
return entrylist
@@ -141,6 +163,10 @@ class PlaybookExecutor:
if self._tqm is not None:
self._cleanup()
+ if self._options.syntax:
+ self.display.display("No issues encountered")
+ return result
+
# FIXME: this stat summary stuff should be cleaned up and moved
# to a new method, if it even belongs here...
self._display.banner("PLAY RECAP")
@@ -209,3 +235,47 @@ class PlaybookExecutor:
serialized_batches.append(play_hosts)
return serialized_batches
+
+ def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+
+ if prompt and default is not None:
+ msg = "%s [%s]: " % (prompt, default)
+ elif prompt:
+ msg = "%s: " % prompt
+ else:
+ msg = 'input for %s: ' % varname
+
+ def do_prompt(prompt, private):
+ if sys.stdout.encoding:
+ msg = prompt.encode(sys.stdout.encoding)
+ else:
+ # when piping the output, or at other times when stdout
+ # may not be the standard file descriptor, the stdout
+ # encoding may not be set, so default to something sane
+ msg = prompt.encode(locale.getpreferredencoding())
+ if private:
+ return getpass.getpass(msg)
+ return raw_input(msg)
+
+ if confirm:
+ while True:
+ result = do_prompt(msg, private)
+ second = do_prompt("confirm " + msg, private)
+ if result == second:
+ break
+ self._display.display("***** VALUES ENTERED DO NOT MATCH ****")
+ else:
+ result = do_prompt(msg, private)
+
+ # if result is false and default is not None
+ if not result and default is not None:
+ result = default
+
+ if encrypt:
+ result = do_encrypt(result, encrypt, salt_size, salt)
+
+ # handle utf-8 chars
+ result = to_unicode(result, errors='strict')
+ return result
+
+
diff --git a/v2/ansible/executor/process/__init__.py b/lib/ansible/executor/process/__init__.py
similarity index 100%
rename from v2/ansible/executor/process/__init__.py
rename to lib/ansible/executor/process/__init__.py
diff --git a/v2/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py
similarity index 79%
rename from v2/ansible/executor/process/result.py
rename to lib/ansible/executor/process/result.py
index f0416db852d..038a68fbef7 100644
--- a/v2/ansible/executor/process/result.py
+++ b/lib/ansible/executor/process/result.py
@@ -58,7 +58,7 @@ class ResultProcess(multiprocessing.Process):
super(ResultProcess, self).__init__()
def _send_result(self, result):
- debug("sending result: %s" % (result,))
+ debug(u"sending result: %s" % ([unicode(x) for x in result],))
self._final_q.put(result, block=False)
debug("done sending result")
@@ -105,7 +105,9 @@ class ResultProcess(multiprocessing.Process):
time.sleep(0.1)
continue
- host_name = result._host.get_name()
+ # if this task is registering a result, do it now
+ if result._task.register:
+ self._send_result(('register_host_var', result._host, result._task.register, result._result))
# send callbacks, execute other options based on the result status
# FIXME: this should all be cleaned up and probably moved to a sub-function.
@@ -120,15 +122,6 @@ class ResultProcess(multiprocessing.Process):
elif result.is_skipped():
self._send_result(('host_task_skipped', result))
else:
- # if this task is notifying a handler, do it now
- if result._task.notify:
- # The shared dictionary for notified handlers is a proxy, which
- # does not detect when sub-objects within the proxy are modified.
- # So, per the docs, we reassign the list so the proxy picks up and
- # notifies all other threads
- for notify in result._task.notify:
- self._send_result(('notify_handler', result._host, notify))
-
if result._task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
@@ -137,33 +130,39 @@ class ResultProcess(multiprocessing.Process):
result_items = [ result._result ]
for result_item in result_items:
- #if 'include' in result_item:
- # include_variables = result_item.get('include_variables', dict())
- # if 'item' in result_item:
- # include_variables['item'] = result_item['item']
- # self._send_result(('include', result._host, result._task, result_item['include'], include_variables))
- #elif 'add_host' in result_item:
+ # if this task is notifying a handler, do it now
+ if '_ansible_notify' in result_item:
+ if result.is_changed():
+ # The shared dictionary for notified handlers is a proxy, which
+ # does not detect when sub-objects within the proxy are modified.
+ # So, per the docs, we reassign the list so the proxy picks up and
+ # notifies all other threads
+ for notify in result_item['_ansible_notify']:
+ if result._task._role:
+ role_name = result._task._role.get_name()
+ notify = "%s : %s" % (role_name, notify)
+ self._send_result(('notify_handler', result, notify))
+ # now remove the notify field from the results, as its no longer needed
+ result_item.pop('_ansible_notify')
+
if 'add_host' in result_item:
# this task added a new host (add_host module)
self._send_result(('add_host', result_item))
elif 'add_group' in result_item:
# this task added a new group (group_by module)
- self._send_result(('add_group', result._host, result_item))
+ self._send_result(('add_group', result._task))
elif 'ansible_facts' in result_item:
# if this task is registering facts, do that now
+ item = result_item.get('item', None)
if result._task.action in ('set_fact', 'include_vars'):
for (key, value) in result_item['ansible_facts'].iteritems():
- self._send_result(('set_host_var', result._host, key, value))
+ self._send_result(('set_host_var', result._host, result._task, item, key, value))
else:
- self._send_result(('set_host_facts', result._host, result_item['ansible_facts']))
+ self._send_result(('set_host_facts', result._host, result._task, item, result_item['ansible_facts']))
# finally, send the ok for this task
self._send_result(('host_task_ok', result))
- # if this task is registering a result, do it now
- if result._task.register:
- self._send_result(('set_host_var', result._host, result._task.register, result._result))
-
except queue.Empty:
pass
except (KeyboardInterrupt, IOError, EOFError):
diff --git a/v2/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
similarity index 94%
rename from v2/ansible/executor/process/worker.py
rename to lib/ansible/executor/process/worker.py
index d8e8960fe40..595cf872e71 100644
--- a/v2/ansible/executor/process/worker.py
+++ b/lib/ansible/executor/process/worker.py
@@ -94,7 +94,7 @@ class WorkerProcess(multiprocessing.Process):
try:
if not self._main_q.empty():
debug("there's work to be done!")
- (host, task, basedir, job_vars, connection_info, shared_loader_obj) = self._main_q.get(block=False)
+ (host, task, basedir, job_vars, play_context, shared_loader_obj) = self._main_q.get(block=False)
debug("got a task/handler to work on: %s" % task)
# because the task queue manager starts workers (forks) before the
@@ -111,11 +111,11 @@ class WorkerProcess(multiprocessing.Process):
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
- new_connection_info = connection_info.set_task_override(task)
+ new_play_context = play_context.set_task_and_variable_override(task=task, variables=job_vars)
# execute the task and build a TaskResult from the result
debug("running TaskExecutor() for %s/%s" % (host, task))
- executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._new_stdin, self._loader, shared_loader_obj).run()
+ executor_result = TaskExecutor(host, task, job_vars, new_play_context, self._new_stdin, self._loader, shared_loader_obj).run()
debug("done running TaskExecutor() for %s/%s" % (host, task))
task_result = TaskResult(host, task, executor_result)
diff --git a/v2/ansible/executor/stats.py b/lib/ansible/executor/stats.py
similarity index 100%
rename from v2/ansible/executor/stats.py
rename to lib/ansible/executor/stats.py
diff --git a/v2/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
similarity index 68%
rename from v2/ansible/executor/task_executor.py
rename to lib/ansible/executor/task_executor.py
index 2f90b3d87eb..297d8b2526f 100644
--- a/v2/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -27,10 +27,9 @@ import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.executor.connection_info import ConnectionInformation
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
-from ansible.plugins import lookup_loader, connection_loader, action_loader
+from ansible.plugins import connection_loader, action_loader
from ansible.template import Templar
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode
@@ -48,15 +47,26 @@ class TaskExecutor:
class.
'''
- def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, shared_loader_obj):
+ # Modules that we optimize by squashing loop items into a single call to
+ # the module
+ SQUASH_ACTIONS = frozenset(('apt', 'yum', 'pkgng', 'zypper', 'dnf'))
+
+ def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj):
self._host = host
self._task = task
self._job_vars = job_vars
- self._connection_info = connection_info
+ self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
+ try:
+ from __main__ import display
+ self._display = display
+ except ImportError:
+ from ansible.utils.display import Display
+ self._display = Display()
+
def run(self):
'''
The main executor entrypoint, where we determine if the specified
@@ -83,9 +93,9 @@ class TaskExecutor:
changed = False
failed = False
for item in item_results:
- if 'changed' in item:
+ if 'changed' in item and item['changed']:
changed = True
- if 'failed' in item:
+ if 'failed' in item and item['failed']:
failed = True
# create the overall result item, and set the changed/failed
@@ -117,6 +127,13 @@ class TaskExecutor:
return result
except AnsibleError, e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
+ finally:
+ try:
+ self._connection.close()
+ except AttributeError:
+ pass
+ except Exception, e:
+ debug("error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
'''
@@ -124,10 +141,22 @@ class TaskExecutor:
and returns the items result.
'''
+ # create a copy of the job vars here so that we can modify
+ # them temporarily without changing them too early for other
+ # parts of the code that might still need a pristine version
+ vars_copy = self._job_vars.copy()
+
+ # now we update them with the play context vars
+ self._play_context.update_vars(vars_copy)
+
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy)
items = None
- if self._task.loop and self._task.loop in lookup_loader:
- loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, variables=self._job_vars, loader=self._loader)
- items = lookup_loader.get(self._task.loop, loader=self._loader).run(terms=loop_terms, variables=self._job_vars)
+ if self._task.loop:
+ if self._task.loop in self._shared_loader_obj.lookup_loader:
+ loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True)
+ items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy)
+ else:
+ raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
return items
@@ -165,9 +194,6 @@ class TaskExecutor:
res['item'] = item
results.append(res)
- # FIXME: we should be sending back a callback result for each item in the loop here
- print(res)
-
return results
def _squash_items(self, items, variables):
@@ -175,14 +201,21 @@ class TaskExecutor:
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
-
- if len(items) > 0 and self._task.action in ('apt', 'yum', 'pkgng', 'zypper'):
+ if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS:
final_items = []
+ name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None)
for item in items:
variables['item'] = item
- if self._task.evaluate_conditional(variables):
- final_items.append(item)
- return [",".join(final_items)]
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
+ if self._task.evaluate_conditional(templar, variables):
+ if templar._contains_vars(name):
+ new_item = templar.template(name)
+ final_items.append(new_item)
+ else:
+ final_items.append(item)
+ joined_items = ",".join(final_items)
+ self._task.args['name'] = joined_items
+ return [joined_items]
else:
return items
@@ -200,26 +233,37 @@ class TaskExecutor:
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
- self._connection_info.post_validate(templar=templar)
+ self._play_context.post_validate(templar=templar)
- # now that the connection information is finalized, we can add 'magic'
+ # now that the play context is finalized, we can add 'magic'
# variables to the variable dictionary
- self._connection_info.update_vars(variables)
-
- # get the connection and the handler for this execution
- self._connection = self._get_connection(variables)
- self._handler = self._get_action_handler(connection=self._connection)
+ self._play_context.update_vars(variables)
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
- if not self._task.evaluate_conditional(variables):
+ if not self._task.evaluate_conditional(templar, variables):
debug("when evaulation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
- # Now we do final validation on the task, which sets all fields to their final values
+ # Now we do final validation on the task, which sets all fields to their final values.
+ # In the case of debug tasks, we save any 'var' params and restore them after validating
+ # so that variables are not replaced too early.
+ prev_var = None
+ if self._task.action == 'debug' and 'var' in self._task.args:
+ prev_var = self._task.args.pop('var')
+
self._task.post_validate(templar=templar)
+ if '_variable_params' in self._task.args:
+ variable_params = self._task.args.pop('_variable_params')
+ if isinstance(variable_params, dict):
+ self._display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts")
+ variable_params.update(self._task.args)
+ self._task.args = variable_params
+
+ if prev_var is not None:
+ self._task.args['var'] = prev_var
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
@@ -227,7 +271,13 @@ class TaskExecutor:
include_variables = self._task.args.copy()
include_file = include_variables.get('_raw_params')
del include_variables['_raw_params']
- return dict(changed=True, include=include_file, include_variables=include_variables)
+ return dict(include=include_file, include_variables=include_variables)
+
+ # get the connection and the handler for this execution
+ self._connection = self._get_connection(variables)
+ self._connection.set_host_overrides(host=self._host)
+
+ self._handler = self._get_action_handler(connection=self._connection, templar=templar)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
@@ -252,7 +302,7 @@ class TaskExecutor:
for attempt in range(retries):
if attempt > 0:
# FIXME: this should use the callback/message passing mechanism
- print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt))
+ print("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result))
result['attempts'] = attempt + 1
debug("running the handler")
@@ -264,11 +314,11 @@ class TaskExecutor:
# response, so we parse it here and replace the result
try:
result = json.loads(result.get('stdout'))
- except ValueError, e:
+ except (TypeError, ValueError) as e:
return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
if self._task.poll > 0:
- result = self._poll_async_result(result=result)
+ result = self._poll_async_result(result=result, templar=templar)
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
@@ -284,21 +334,24 @@ class TaskExecutor:
# FIXME: make sure until is mutually exclusive with changed_when/failed_when
if self._task.until:
cond.when = self._task.until
- if cond.evaluate_conditional(vars_copy):
+ if cond.evaluate_conditional(templar, vars_copy):
break
elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result:
if self._task.changed_when:
cond.when = [ self._task.changed_when ]
- result['changed'] = cond.evaluate_conditional(vars_copy)
+ result['changed'] = cond.evaluate_conditional(templar, vars_copy)
if self._task.failed_when:
cond.when = [ self._task.failed_when ]
- failed_when_result = cond.evaluate_conditional(vars_copy)
+ failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
if failed_when_result:
break
- elif 'failed' not in result and result.get('rc', 0) == 0:
- # if the result is not failed, stop trying
- break
+ elif 'failed' not in result:
+ if result.get('rc', 0) != 0:
+ result['failed'] = True
+ else:
+ # if the result is not failed, stop trying
+ break
if attempt < retries - 1:
time.sleep(delay)
@@ -311,11 +364,17 @@ class TaskExecutor:
if 'ansible_facts' in result:
variables.update(result['ansible_facts'])
+ # save the notification target in the result, if it was specified, as
+ # this task may be running in a loop in which case the notification
+ # may be item-specific, ie. "notify: service {{item}}"
+ if self._task.notify is not None:
+ result['_ansible_notify'] = self._task.notify
+
# and return
debug("attempt loop complete, returning result")
return result
- def _poll_async_result(self, result):
+ def _poll_async_result(self, result, templar):
'''
Polls for the specified JID to be complete
'''
@@ -337,8 +396,9 @@ class TaskExecutor:
'normal',
task=async_task,
connection=self._connection,
- connection_info=self._connection_info,
+ play_context=self._play_context,
loader=self._loader,
+ templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
@@ -363,35 +423,39 @@ class TaskExecutor:
correct connection object from the list of connection plugins
'''
- # FIXME: delegate_to calculation should be done here
# FIXME: calculation of connection params/auth stuff should be done here
- self._connection_info.remote_addr = self._host.ipv4_address
+ if not self._play_context.remote_addr:
+ self._play_context.remote_addr = self._host.ipv4_address
+
if self._task.delegate_to is not None:
self._compute_delegate(variables)
- conn_type = self._connection_info.connection
+ conn_type = self._play_context.connection
if conn_type == 'smart':
conn_type = 'ssh'
- if sys.platform.startswith('darwin') and self._connection_info.remote_pass:
+ if sys.platform.startswith('darwin') and self._play_context.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
- cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- if "Bad configuration option" in err:
+ try:
+ cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ if "Bad configuration option" in err:
+ conn_type = "paramiko"
+ except OSError:
conn_type = "paramiko"
- connection = connection_loader.get(conn_type, self._connection_info, self._new_stdin)
+ connection = connection_loader.get(conn_type, self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
return connection
- def _get_action_handler(self, connection):
+ def _get_action_handler(self, connection, templar):
'''
Returns the correct action plugin to handle the requestion task action
'''
@@ -409,8 +473,9 @@ class TaskExecutor:
handler_name,
task=self._task,
connection=connection,
- connection_info=self._connection_info,
+ play_context=self._play_context,
loader=self._loader,
+ templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
@@ -424,21 +489,21 @@ class TaskExecutor:
# get the vars for the delegate by its name
try:
this_info = variables['hostvars'][self._task.delegate_to]
+
+ # get the real ssh_address for the delegate and allow ansible_ssh_host to be templated
+ #self._play_context.remote_user = self._compute_delegate_user(self.delegate_to, delegate['inject'])
+ self._play_context.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to)
+ self._play_context.port = this_info.get('ansible_ssh_port', self._play_context.port)
+ self._play_context.password = this_info.get('ansible_ssh_pass', self._play_context.password)
+ self._play_context.private_key_file = this_info.get('ansible_ssh_private_key_file', self._play_context.private_key_file)
+ self._play_context.connection = this_info.get('ansible_connection', C.DEFAULT_TRANSPORT)
+ self._play_context.become_pass = this_info.get('ansible_sudo_pass', self._play_context.become_pass)
except:
# make sure the inject is empty for non-inventory hosts
this_info = {}
- # get the real ssh_address for the delegate and allow ansible_ssh_host to be templated
- #self._connection_info.remote_user = self._compute_delegate_user(self.delegate_to, delegate['inject'])
- self._connection_info.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to)
- self._connection_info.port = this_info.get('ansible_ssh_port', self._connection_info.port)
- self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password)
- self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file)
- self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection)
- self._connection_info.become_pass = this_info.get('ansible_sudo_pass', self._connection_info.become_pass)
-
- if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'):
- self._connection_info.connection = 'local'
+ if self._play_context.remote_addr in ('127.0.0.1', 'localhost'):
+ self._play_context.connection = 'local'
# Last chance to get private_key_file from global variables.
# this is useful if delegated host is not defined in the inventory
diff --git a/v2/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
similarity index 82%
rename from v2/ansible/executor/task_queue_manager.py
rename to lib/ansible/executor/task_queue_manager.py
index a875c310d51..25546b21de6 100644
--- a/v2/ansible/executor/task_queue_manager.py
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -26,16 +26,14 @@ import sys
from ansible import constants as C
from ansible.errors import AnsibleError
-from ansible.executor.connection_info import ConnectionInformation
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
+from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader
from ansible.template import Templar
-from ansible.utils.debug import debug
-
__all__ = ['TaskQueueManager']
class TaskQueueManager:
@@ -59,6 +57,10 @@ class TaskQueueManager:
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
+ self._stdout_callback = stdout_callback
+
+ self._callbacks_loaded = False
+ self._callback_plugins = []
# a special flag to help us exit cleanly
self._terminated = False
@@ -72,9 +74,6 @@ class TaskQueueManager:
self._final_q = multiprocessing.Queue()
- # load callback plugins
- self._callback_plugins = self._load_callbacks(stdout_callback)
-
# create the pool of worker threads, based on the number of forks specified
try:
fileno = sys.stdin.fileno()
@@ -116,21 +115,22 @@ class TaskQueueManager:
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
- def _load_callbacks(self, stdout_callback):
+ def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
- loaded_plugins = []
+ if self._callbacks_loaded:
+ return
stdout_callback_loaded = False
- if stdout_callback is None:
- stdout_callback = C.DEFAULT_STDOUT_CALLBACK
+ if self._stdout_callback is None:
+ self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
- if stdout_callback not in callback_loader:
- raise AnsibleError("Invalid callback for stdout specified: %s" % stdout_callback)
+ if self._stdout_callback not in callback_loader:
+ raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
@@ -140,15 +140,17 @@ class TaskQueueManager:
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
- if callback_name != stdout_callback or stdout_callback_loaded:
+ if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
+ elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST:
+ continue
- loaded_plugins.append(callback_plugin(self._display))
+ self._callback_plugins.append(callback_plugin(self._display))
else:
- loaded_plugins.append(callback_plugin())
+ self._callback_plugins.append(callback_plugin())
- return loaded_plugins
+ self._callbacks_loaded = True
def run(self, play):
'''
@@ -159,16 +161,19 @@ class TaskQueueManager:
are done with the current task).
'''
+ if not self._callbacks_loaded:
+ self.load_callbacks()
+
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
- templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
+ templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
- connection_info = ConnectionInformation(new_play, self._options, self.passwords)
+ play_context = PlayContext(new_play, self._options, self.passwords)
for callback_plugin in self._callback_plugins:
- if hasattr(callback_plugin, 'set_connection_info'):
- callback_plugin.set_connection_info(connection_info)
+ if hasattr(callback_plugin, 'set_play_context'):
+ callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
@@ -181,13 +186,13 @@ class TaskQueueManager:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
- iterator = PlayIterator(inventory=self._inventory, play=new_play, connection_info=connection_info, all_vars=all_vars)
+ iterator = PlayIterator(inventory=self._inventory, play=new_play, play_context=play_context, all_vars=all_vars)
# and run the play using the strategy
- return strategy.run(iterator, connection_info)
+ return strategy.run(iterator, play_context)
def cleanup(self):
- debug("RUNNING CLEANUP")
+ self._display.debug("RUNNING CLEANUP")
self.terminate()
@@ -225,9 +230,12 @@ class TaskQueueManager:
continue
methods = [
getattr(callback_plugin, method_name, None),
- getattr(callback_plugin, 'on_any', None)
+ getattr(callback_plugin, 'v2_on_any', None)
]
for method in methods:
if method is not None:
- method(*args, **kwargs)
+ try:
+ method(*args, **kwargs)
+ except Exception as e:
+ self._display.warning('Error when using %s: %s' % (method, str(e)))
diff --git a/v2/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
similarity index 77%
rename from v2/ansible/executor/task_result.py
rename to lib/ansible/executor/task_result.py
index 2b760bac003..d87f9413efb 100644
--- a/v2/ansible/executor/task_result.py
+++ b/lib/ansible/executor/task_result.py
@@ -40,10 +40,18 @@ class TaskResult:
return self._check_key('changed')
def is_skipped(self):
- return self._check_key('skipped')
+ if 'results' in self._result and self._task.loop:
+ flag = True
+ for res in self._result.get('results', []):
+ if isinstance(res, dict):
+ flag &= res.get('skipped', False)
+ return flag
+ else:
+ return self._result.get('skipped', False)
def is_failed(self):
- if 'failed_when_result' in self._result:
+ if 'failed_when_result' in self._result or \
+ 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
return self._check_key('failed_when_result')
else:
return self._check_key('failed') or self._result.get('rc', 0) != 0
@@ -52,10 +60,11 @@ class TaskResult:
return self._check_key('unreachable')
def _check_key(self, key):
- if 'results' in self._result:
+ if 'results' in self._result and self._task.loop:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):
flag |= res.get(key, False)
+ return flag
else:
return self._result.get(key, False)
diff --git a/v2/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py
similarity index 100%
rename from v2/ansible/galaxy/__init__.py
rename to lib/ansible/galaxy/__init__.py
diff --git a/v2/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
old mode 100755
new mode 100644
similarity index 100%
rename from v2/ansible/galaxy/api.py
rename to lib/ansible/galaxy/api.py
diff --git a/lib/ansible/callback_plugins/__init__.py b/lib/ansible/galaxy/data/__init__.py
similarity index 100%
rename from lib/ansible/callback_plugins/__init__.py
rename to lib/ansible/galaxy/data/__init__.py
diff --git a/v2/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2
similarity index 95%
rename from v2/ansible/galaxy/data/metadata_template.j2
rename to lib/ansible/galaxy/data/metadata_template.j2
index 328e13a814c..c6b6fd9dbdf 100644
--- a/v2/ansible/galaxy/data/metadata_template.j2
+++ b/lib/ansible/galaxy/data/metadata_template.j2
@@ -40,6 +40,6 @@ dependencies: []
# List your role dependencies here, one per line.
# Be sure to remove the '[]' above if you add dependencies
# to this list.
- {% for dependency in dependencies %}
+ {%- for dependency in dependencies %}
#- {{ dependency }}
- {% endfor %}
+ {%- endfor %}
diff --git a/v2/ansible/galaxy/data/readme b/lib/ansible/galaxy/data/readme
similarity index 100%
rename from v2/ansible/galaxy/data/readme
rename to lib/ansible/galaxy/data/readme
diff --git a/v2/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
similarity index 86%
rename from v2/ansible/galaxy/role.py
rename to lib/ansible/galaxy/role.py
index b5a628726f5..3a58ccb6d1a 100644
--- a/v2/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -49,7 +49,7 @@ class GalaxyRole(object):
self.name = name
self.version = version
- self.src = src
+ self.src = src or name
self.scm = scm
self.path = (os.path.join(galaxy.roles_path, self.name))
@@ -178,33 +178,34 @@ class GalaxyRole(object):
return False
- def fetch(self, target, role_data):
+ def fetch(self, role_data):
"""
- Downloads the archived role from github to a temp location, extracts
- it, and then copies the extracted role to the role library path.
+ Downloads the archived role from github to a temp location
"""
+ if role_data:
- # first grab the file and save it to a temp location
- if self.src:
- archive_url = self.src
- else:
- archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
- self.display.display("- downloading role from %s" % archive_url)
+ # first grab the file and save it to a temp location
+ if "github_user" in role_data and "github_repo" in role_data:
+ archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
+ else:
+ archive_url = self.src
+ self.display.display("- downloading role from %s" % archive_url)
- try:
- url_file = urlopen(archive_url)
- temp_file = tempfile.NamedTemporaryFile(delete=False)
- data = url_file.read()
- while data:
- temp_file.write(data)
+ try:
+ url_file = urlopen(archive_url)
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
- temp_file.close()
- return temp_file.name
- except:
- # TODO: better urllib2 error handling for error
- # messages that are more exact
- self.display.error("failed to download the file.")
- return False
+ while data:
+ temp_file.write(data)
+ data = url_file.read()
+ temp_file.close()
+ return temp_file.name
+ except:
+ # TODO: better urllib2 error handling for error
+ # messages that are more exact
+ self.display.error("failed to download the file.")
+
+ return False
def install(self, role_filename):
# the file is a tar, so open it that way and extract it
@@ -293,3 +294,21 @@ class GalaxyRole(object):
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
+
+
+ @staticmethod
+ def url_to_spec(roleurl):
+ # gets the role name out of a repo like
+ # http://git.example.com/repos/repo.git" => "repo"
+
+ if '://' not in roleurl and '@' not in roleurl:
+ return roleurl
+ trailing_path = roleurl.split('/')[-1]
+ if trailing_path.endswith('.git'):
+ trailing_path = trailing_path[:-4]
+ if trailing_path.endswith('.tar.gz'):
+ trailing_path = trailing_path[:-7]
+ if ',' in trailing_path:
+ trailing_path = trailing_path.split(',')[0]
+ return trailing_path
+
diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py
index f012246e227..2bcea0f3519 100644
--- a/lib/ansible/inventory/__init__.py
+++ b/lib/ansible/inventory/__init__.py
@@ -16,36 +16,44 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import fnmatch
import os
import sys
import re
-import subprocess
+import stat
+
+from ansible import constants as C
+from ansible import errors
-import ansible.constants as C
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group
from ansible.inventory.host import Host
-from ansible import errors
-from ansible import utils
+from ansible.plugins import vars_loader
+from ansible.utils.path import is_executable
+from ansible.utils.vars import combine_vars
class Inventory(object):
"""
Host inventory for ansible.
"""
- __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
- 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
+ #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
+ # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
+ # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
- def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None):
+ LOCALHOST_ALIASES = frozenset(('localhost', '127.0.0.1', '::1'))
+ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
- self._vault_password=vault_password
+ self._loader = loader
+ self._variable_manager = variable_manager
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
@@ -97,7 +105,7 @@ class Inventory(object):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(filename=host_list)
+ self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values()
else:
# check to see if the specified file starts with a
@@ -105,19 +113,18 @@ class Inventory(object):
# class we can show a more apropos error
shebang_present = False
try:
- inv_file = open(host_list)
- first_line = inv_file.readlines()[0]
- inv_file.close()
- if first_line.startswith('#!'):
- shebang_present = True
- except:
+ with open(host_list, "r") as inv_file:
+ first_line = inv_file.readline()
+ if first_line.startswith("#!"):
+ shebang_present = True
+ except IOError:
pass
- if utils.is_executable(host_list):
+ if is_executable(host_list):
try:
- self.parser = InventoryScript(filename=host_list)
+ self.parser = InventoryScript(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values()
- except:
+ except errors.AnsibleError:
if not shebang_present:
raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \
"If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list)
@@ -127,26 +134,29 @@ class Inventory(object):
try:
self.parser = InventoryParser(filename=host_list)
self.groups = self.parser.groups.values()
- except:
+ except errors.AnsibleError:
if shebang_present:
raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \
"Perhaps you want to correct this with `chmod +x %s`?" % host_list)
else:
raise
- utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
+ vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
- raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
+ raise errors.AnsibleError("Unable to find an inventory file (%s), "
+ "specify one with -i ?" % host_list)
- self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
+ self._vars_plugins = [ x for x in vars_loader.all(self) ]
+ # FIXME: shouldn't be required, since the group/host vars file
+ # management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password))
+ group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
+ host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
def _match(self, str, pattern_str):
@@ -192,9 +202,9 @@ class Inventory(object):
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
- hosts = [ h for h in hosts if h.name in self._restriction ]
+ hosts = [ h for h in hosts if h in self._restriction ]
if self._also_restriction is not None:
- hosts = [ h for h in hosts if h.name in self._also_restriction ]
+ hosts = [ h for h in hosts if h in self._also_restriction ]
return hosts
@@ -320,6 +330,8 @@ class Inventory(object):
new_host = Host(pattern)
new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local")
+ new_host.ipv4_address = '127.0.0.1'
+
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
@@ -349,7 +361,7 @@ class Inventory(object):
for host in group.get_hosts():
__append_host_to_results(host)
else:
- if self._match(group.name, pattern):
+ if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'):
for host in group.get_hosts():
__append_host_to_results(host)
else:
@@ -357,7 +369,7 @@ class Inventory(object):
for host in matching_hosts:
__append_host_to_results(host)
- if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
+ if pattern in self.LOCALHOST_ALIASES and len(results) == 0:
new_host = self._create_implicit_localhost(pattern)
results.append(new_host)
return results
@@ -390,12 +402,15 @@ class Inventory(object):
def get_host(self, hostname):
if hostname not in self._hosts_cache:
self._hosts_cache[hostname] = self._get_host(hostname)
+ if hostname in self.LOCALHOST_ALIASES:
+ for host in self.LOCALHOST_ALIASES.difference((hostname,)):
+ self._hosts_cache[host] = self._hosts_cache[hostname]
return self._hosts_cache[hostname]
def _get_host(self, hostname):
- if hostname in ['localhost','127.0.0.1']:
+ if hostname in self.LOCALHOST_ALIASES:
for host in self.get_group('all').get_hosts():
- if host.name in ['localhost', '127.0.0.1']:
+ if host.name in self.LOCALHOST_ALIASES:
return host
return self._create_implicit_localhost(hostname)
else:
@@ -420,7 +435,7 @@ class Inventory(object):
group = self.get_group(groupname)
if group is None:
- raise errors.AnsibleError("group not found: %s" % groupname)
+ raise Exception("group not found: %s" % groupname)
vars = {}
@@ -428,19 +443,19 @@ class Inventory(object):
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ vars = combine_vars(vars, updated)
# Read group_vars/ files
- vars = utils.combine_vars(vars, self.get_group_vars(group))
+ vars = combine_vars(vars, self.get_group_vars(group))
return vars
- def get_variables(self, hostname, update_cached=False, vault_password=None):
+ def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
- raise errors.AnsibleError("host not found: %s" % hostname)
- return host.get_variables()
+ raise Exception("host not found: %s" % hostname)
+ return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
@@ -460,22 +475,22 @@ class Inventory(object):
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
- vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
+ vars = combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files
- vars = utils.combine_vars(vars, self.get_host_vars(host))
+ vars = combine_vars(vars, self.get_host_vars(host))
return vars
@@ -490,19 +505,15 @@ class Inventory(object):
""" return a list of hostnames for a pattern """
- result = [ h.name for h in self.get_hosts(pattern) ]
- if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
+ result = [ h for h in self.get_hosts(pattern) ]
+ if len(result) == 0 and pattern in self.LOCALHOST_ALIASES:
result = [pattern]
return result
def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x)
- # TODO: remove this function
- def get_restriction(self):
- return self._restriction
-
- def restrict_to(self, restriction):
+ def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to exclude failed hosts in main playbook code, don't use this for other
@@ -544,7 +555,7 @@ class Inventory(object):
results.append(x)
self._subset = results
- def lift_restriction(self):
+ def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
@@ -578,20 +589,27 @@ class Inventory(object):
""" returns the directory of the current playbook """
return self._playbook_basedir
- def set_playbook_basedir(self, dir):
+ def set_playbook_basedir(self, dir_name):
"""
sets the base directory of the playbook so inventory can use it as a
basedir for host_ and group_vars, and other things.
"""
# Only update things if dir is a different playbook basedir
- if dir != self._playbook_basedir:
- self._playbook_basedir = dir
+ if dir_name != self._playbook_basedir:
+ self._playbook_basedir = dir_name
# get group vars from group_vars/ files
+ # FIXME: excluding the new_pb_basedir directory may result in group_vars
+ # files loading more than they should, however with the file caching
+ # we do this shouldn't be too much of an issue. Still, this should
+ # be fixed at some point to allow a "first load" to touch all of the
+ # directories, then later runs only touch the new basedir specified
for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
+ #group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
+ group.vars = combine_vars(group.vars, self.get_group_vars(group))
# get host vars from host_vars/ files
for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
+ #host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
+ host.vars = combine_vars(host.vars, self.get_host_vars(host))
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
@@ -627,7 +645,7 @@ class Inventory(object):
# this can happen from particular API usages, particularly if not run
# from /usr/bin/ansible-playbook
if basedir is None:
- continue
+ basedir = './'
scan_pass = scan_pass + 1
@@ -639,15 +657,15 @@ class Inventory(object):
if _basedir == self._playbook_basedir and scan_pass != 1:
continue
+ # FIXME: these should go to VariableManager
if group and host is None:
# load vars in dir/group_vars/name_of_group
base_path = os.path.join(basedir, "group_vars/%s" % group.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
-
+ results = self._variable_manager.add_group_vars_file(base_path, self._loader)
elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host
base_path = os.path.join(basedir, "host_vars/%s" % host.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
+ results = self._variable_manager.add_host_vars_file(base_path, self._loader)
# all done, results is a dictionary of variables for this particular host.
return results
diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py
index 9ac23fff899..735f32d62c3 100644
--- a/lib/ansible/inventory/dir.py
+++ b/lib/ansible/inventory/dir.py
@@ -17,20 +17,25 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
-import ansible.constants as C
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
-from ansible import utils
-from ansible import errors
+from ansible.utils.path import is_executable
+from ansible.utils.vars import combine_vars
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
@@ -38,10 +43,12 @@ class InventoryDirectory(object):
self.hosts = {}
self.groups = {}
+ self._loader = loader
+
for i in self.names:
# Skip files that end with certain extensions or characters
- if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
+ if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
@@ -51,9 +58,9 @@ class InventoryDirectory(object):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
- parser = InventoryDirectory(filename=fullpath)
- elif utils.is_executable(fullpath):
- parser = InventoryScript(filename=fullpath)
+ parser = InventoryDirectory(loader=loader, filename=fullpath)
+ elif is_executable(fullpath):
+ parser = InventoryScript(loader=loader, filename=fullpath)
else:
parser = InventoryParser(filename=fullpath)
self.parsers.append(parser)
@@ -153,7 +160,7 @@ class InventoryDirectory(object):
# name
if group.name != newgroup.name:
- raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
+ raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
@@ -196,14 +203,14 @@ class InventoryDirectory(object):
self.groups[newparent.name].add_child_group(group)
# variables
- group.vars = utils.combine_vars(group.vars, newgroup.vars)
+ group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
- raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
+ raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
@@ -218,7 +225,7 @@ class InventoryDirectory(object):
self.groups[newgroup.name].add_host(host)
# variables
- host.vars = utils.combine_vars(host.vars, newhost.vars)
+ host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
diff --git a/lib/ansible/inventory/expand_hosts.py b/lib/ansible/inventory/expand_hosts.py
index f1297409355..b5a957c53fe 100644
--- a/lib/ansible/inventory/expand_hosts.py
+++ b/lib/ansible/inventory/expand_hosts.py
@@ -30,6 +30,9 @@ expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised.
'''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import string
from ansible import errors
diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py
index 262558e69c8..8dbda631560 100644
--- a/lib/ansible/inventory/group.py
+++ b/lib/ansible/inventory/group.py
@@ -14,11 +14,16 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class Group(object):
+from ansible.errors import AnsibleError
+from ansible.utils.debug import debug
+
+class Group:
''' a group of ansible hosts '''
- __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
+ #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
@@ -29,9 +34,47 @@ class Group(object):
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
+
#self.clear_hosts_cache()
- if self.name is None:
- raise Exception("group name is required")
+ #if self.name is None:
+ # raise Exception("group name is required")
+
+ def __repr__(self):
+ return self.get_name()
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def serialize(self):
+ parent_groups = []
+ for parent in self.parent_groups:
+ parent_groups.append(parent.serialize())
+
+ result = dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ parent_groups=parent_groups,
+ depth=self.depth,
+ )
+
+ return result
+
+ def deserialize(self, data):
+ self.__init__()
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+
+ parent_groups = data.get('parent_groups', [])
+ for parent_data in parent_groups:
+ g = Group()
+ g.deserialize(parent_data)
+ self.parent_groups.append(g)
+
+ def get_name(self):
+ return self.name
def add_child_group(self, group):
@@ -57,9 +100,12 @@ class Group(object):
def _check_children_depth(self):
- for group in self.child_groups:
- group.depth = max([self.depth+1, group.depth])
- group._check_children_depth()
+ try:
+ for group in self.child_groups:
+ group.depth = max([self.depth+1, group.depth])
+ group._check_children_depth()
+ except RuntimeError:
+ raise AnsibleError("The group named '%s' has a recursive dependency loop." % self.name)
def add_host(self, host):
@@ -100,7 +146,7 @@ class Group(object):
hosts.append(mine)
return hosts
- def get_variables(self):
+ def get_vars(self):
return self.vars.copy()
def _get_ancestors(self):
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
index d4dc20fa462..43a96d54bfb 100644
--- a/lib/ansible/inventory/host.py
+++ b/lib/ansible/inventory/host.py
@@ -15,24 +15,83 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import ansible.constants as C
-from ansible import utils
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class Host(object):
+from ansible.inventory.group import Group
+from ansible.utils.vars import combine_vars
+
+__all__ = ['Host']
+
+class Host:
''' a single ansible host '''
- __slots__ = [ 'name', 'vars', 'groups' ]
+ #__slots__ = [ 'name', 'vars', 'groups' ]
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ def serialize(self):
+ groups = []
+ for group in self.groups:
+ groups.append(group.serialize())
+
+ return dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ ipv4_address=self.ipv4_address,
+ ipv6_address=self.ipv6_address,
+ gathered_facts=self._gathered_facts,
+ groups=groups,
+ )
+
+ def deserialize(self, data):
+ self.__init__()
+
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.ipv4_address = data.get('ipv4_address', '')
+ self.ipv6_address = data.get('ipv6_address', '')
+
+ groups = data.get('groups', [])
+ for group_data in groups:
+ g = Group()
+ g.deserialize(group_data)
+ self.groups.append(g)
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
- if port and port != C.DEFAULT_REMOTE_PORT:
+
+ self.ipv4_address = name
+ self.ipv6_address = name
+
+ if port:
self.set_variable('ansible_ssh_port', int(port))
- if self.name is None:
- raise Exception("host name is required")
+ self._gathered_facts = False
+
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self):
+ return self.name
+
+ @property
+ def gathered_facts(self):
+ return self._gathered_facts
+
+ def set_gathered_facts(self, gathered):
+ self._gathered_facts = gathered
def add_group(self, group):
@@ -52,16 +111,15 @@ class Host(object):
groups[a.name] = a
return groups.values()
- def get_variables(self):
+ def get_vars(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
- results = utils.combine_vars(results, group.get_variables())
- results = utils.combine_vars(results, self.vars)
+ results = combine_vars(results, group.get_vars())
+ results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
-
diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py
index bd9a98e7f86..f4762bad2fd 100644
--- a/lib/ansible/inventory/ini.py
+++ b/lib/ansible/inventory/ini.py
@@ -16,17 +16,20 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-import ansible.constants as C
+import ast
+import shlex
+import re
+
+from ansible import constants as C
+from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible import errors
-from ansible import utils
-import shlex
-import re
-import ast
+from ansible.utils.unicode import to_unicode
class InventoryParser(object):
"""
@@ -34,9 +37,8 @@ class InventoryParser(object):
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
-
+ self.filename = filename
with open(filename) as fh:
- self.filename = filename
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
@@ -54,10 +56,7 @@ class InventoryParser(object):
def _parse_value(v):
if "#" not in v:
try:
- ret = ast.literal_eval(v)
- if not isinstance(ret, float):
- # Do not trim floats. Eg: "1.20" to 1.2
- return ret
+ v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
@@ -66,7 +65,7 @@ class InventoryParser(object):
except SyntaxError:
# Is this a hash with an equals at the end?
pass
- return v
+ return to_unicode(v, nonstring='passthru', errors='strict')
# [webservers]
# alpha
@@ -91,8 +90,8 @@ class InventoryParser(object):
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
- for lineno in range(len(self.lines)):
- line = utils.before_comment(self.lines[lineno]).strip()
+ for line in self.lines:
+ line = self._before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
@@ -109,7 +108,7 @@ class InventoryParser(object):
if len(tokens) == 0:
continue
hostname = tokens[0]
- port = C.DEFAULT_REMOTE_PORT
+ port = None
# Three cases to check:
# 0. A hostname that contains a range pesudo-code and a port
# 1. A hostname that contains just a port
@@ -146,8 +145,11 @@ class InventoryParser(object):
try:
(k,v) = t.split("=", 1)
except ValueError, e:
- raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e)))
- host.set_variable(k, self._parse_value(v))
+ raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e)))
+ v = self._parse_value(v)
+ if k == 'ansible_ssh_host':
+ host.ipv4_address = v
+ host.set_variable(k, v)
self.groups[active_group_name].add_host(host)
# [southeast:children]
@@ -157,8 +159,8 @@ class InventoryParser(object):
def _parse_group_children(self):
group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
+ for line in self.lines:
+ line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
@@ -173,7 +175,7 @@ class InventoryParser(object):
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
- raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line))
+ raise AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
@@ -184,13 +186,13 @@ class InventoryParser(object):
def _parse_group_variables(self):
group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
+ for line in self.lines:
+ line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
- raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line))
+ raise AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
@@ -199,10 +201,18 @@ class InventoryParser(object):
pass
elif group:
if "=" not in line:
- raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1))
+ raise AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
+
+ def _before_comment(self, msg):
+ ''' what's the part of a string before a comment? '''
+ msg = msg.replace("\#","**NOT_A_COMMENT**")
+ msg = msg.split("#")[0]
+ msg = msg.replace("**NOT_A_COMMENT**","#")
+ return msg
+
diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py
index b83cb9bcc7a..91549d78fb2 100644
--- a/lib/ansible/inventory/script.py
+++ b/lib/ansible/inventory/script.py
@@ -16,22 +16,28 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
import subprocess
-import ansible.constants as C
+import sys
+
+from collections import Mapping
+
+from ansible import constants as C
+from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
-from ansible import utils
-from ansible import errors
-import sys
-class InventoryScript(object):
+class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. '''
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
+
+ self._loader = loader
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
@@ -41,11 +47,11 @@ class InventoryScript(object):
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
- raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
+ raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
@@ -58,7 +64,16 @@ class InventoryScript(object):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
- self.raw = utils.parse_json(self.data)
+ try:
+ self.raw = self._loader.load(self.data)
+ except Exception as e:
+ sys.stderr.write(err + "\n")
+ raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e)))
+
+ if not isinstance(self.raw, Mapping):
+ sys.stderr.write(err + "\n")
+ raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename))
+
self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all')
@@ -66,10 +81,6 @@ class InventoryScript(object):
group = None
- if 'failed' in self.raw:
- sys.stderr.write(err + "\n")
- raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
-
for (group_name, data) in self.raw.items():
# in Ansible 1.3 and later, a "_meta" subelement may contain
@@ -92,12 +103,12 @@ class InventoryScript(object):
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars','children')):
+ elif not any(k in data for k in ('hosts','vars')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
+ raise AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
@@ -108,7 +119,7 @@ class InventoryScript(object):
if 'vars' in data:
if not isinstance(data['vars'], dict):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
+ raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems():
@@ -143,12 +154,12 @@ class InventoryScript(object):
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
- return json_dict_bytes_to_unicode(utils.parse_json(out))
+ return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError:
- raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
+ raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
diff --git a/lib/ansible/inventory/vars_plugins/noop.py b/lib/ansible/inventory/vars_plugins/noop.py
index 5d4b4b6658c..8f0c98cad56 100644
--- a/lib/ansible/inventory/vars_plugins/noop.py
+++ b/lib/ansible/inventory/vars_plugins/noop.py
@@ -15,6 +15,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
class VarsModule(object):
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index e772a12efce..3c2def324a2 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -45,7 +45,7 @@ SELINUX_SPECIAL_FS="<>"
# can be inserted in any module source automatically by including
# #<> on a blank line by itself inside
# of an ansible module. The source of this common code lives
-# in lib/ansible/module_common.py
+# in ansible/executor/module_common.py
import locale
import os
@@ -66,18 +66,25 @@ import grp
import pwd
import platform
import errno
-import tempfile
+from itertools import imap, repeat
try:
import json
+ # Detect the python-json library which is incompatible
+ # Look for simplejson if that's the case
+ try:
+ if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
+ raise ImportError
+ except AttributeError:
+ raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
- sys.stderr.write('Error: ansible requires a json module, none found!')
+ print('{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
- sys.stderr.write('SyntaxError: probably due to json and python being for different versions')
+ print('{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
HAVE_SELINUX=False
@@ -112,7 +119,6 @@ try:
from systemd import journal
has_journal = True
except ImportError:
- import syslog
has_journal = False
try:
@@ -120,10 +126,10 @@ try:
except ImportError:
# a replacement for literal_eval that works with python 2.4. from:
# https://mail.python.org/pipermail/python-list/2009-September/551880.html
- # which is essentially a cut/past from an earlier (2.6) version of python's
+ # which is essentially a cut/paste from an earlier (2.6) version of python's
# ast.py
- from compiler import parse
- from compiler.ast import *
+ from compiler import ast, parse
+
def _literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
@@ -134,21 +140,22 @@ except ImportError:
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
- if isinstance(node_or_string, Expression):
+ if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.node
+
def _convert(node):
- if isinstance(node, Const) and isinstance(node.value, (basestring, int, float, long, complex)):
- return node.value
- elif isinstance(node, Tuple):
+ if isinstance(node, ast.Const) and isinstance(node.value, (basestring, int, float, long, complex)):
+ return node.value
+ elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.nodes))
- elif isinstance(node, List):
+ elif isinstance(node, ast.List):
return list(map(_convert, node.nodes))
- elif isinstance(node, Dict):
+ elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v in node.items)
- elif isinstance(node, Name):
+ elif isinstance(node, ast.Name):
if node.name in _safe_names:
return _safe_names[node.name]
- elif isinstance(node, UnarySub):
+ elif isinstance(node, ast.UnarySub):
return -_convert(node.expr)
raise ValueError('malformed string')
return _convert(node_or_string)
@@ -237,7 +244,7 @@ def load_platform_subclass(cls, *args, **kwargs):
return super(cls, subclass).__new__(subclass)
-def json_dict_unicode_to_bytes(d):
+def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -245,17 +252,17 @@ def json_dict_unicode_to_bytes(d):
'''
if isinstance(d, unicode):
- return d.encode('utf-8')
+ return d.encode(encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
+ return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_unicode_to_bytes, d))
+ return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_unicode_to_bytes, d))
+ return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
-def json_dict_bytes_to_unicode(d):
+def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -263,13 +270,13 @@ def json_dict_bytes_to_unicode(d):
'''
if isinstance(d, str):
- return unicode(d, 'utf-8')
+ return unicode(d, encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
+ return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_bytes_to_unicode, d))
+ return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_bytes_to_unicode, d))
+ return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
@@ -351,9 +358,9 @@ class AnsibleModule(object):
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
-
+
self.aliases = {}
-
+
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.iteritems():
if k not in self.argument_spec:
@@ -363,10 +370,10 @@ class AnsibleModule(object):
# reset to LANG=C if it's an invalid/unavailable locale
self._check_locale()
- (self.params, self.args) = self._load_params()
+ self.params = self._load_params()
+
+ self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log']
- self._legal_inputs = ['CHECKMODE', 'NO_LOG']
-
self.aliases = self._handle_aliases()
if check_invalid_arguments:
@@ -380,10 +387,20 @@ class AnsibleModule(object):
self._set_defaults(pre=True)
+
+ self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
+ 'str': self._check_type_str,
+ 'list': self._check_type_list,
+ 'dict': self._check_type_dict,
+ 'bool': self._check_type_bool,
+ 'int': self._check_type_int,
+ 'float': self._check_type_float,
+ 'path': self._check_type_path,
+ }
if not bypass_checks:
self._check_required_arguments()
- self._check_argument_values()
self._check_argument_types()
+ self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
@@ -579,7 +596,7 @@ class AnsibleModule(object):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
- if context[i] is None:
+ elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
@@ -588,8 +605,8 @@ class AnsibleModule(object):
return True
rc = selinux.lsetfilecon(self._to_filesystem_str(path),
str(':'.join(new_context)))
- except OSError:
- self.fail_json(path=path, msg='invalid selinux context', new_context=new_context, cur_context=cur_context, input_was=context)
+ except OSError, e:
+ self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
@@ -679,7 +696,6 @@ class AnsibleModule(object):
new_underlying_stat = os.stat(path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
- q_stat = os.stat(path)
except OSError, e:
if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
@@ -708,7 +724,8 @@ class AnsibleModule(object):
operator = match.group('operator')
perms = match.group('perms')
- if users == 'a': users = 'ugo'
+ if users == 'a':
+ users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
@@ -898,21 +915,21 @@ class AnsibleModule(object):
def _check_for_check_mode(self):
for (k,v) in self.params.iteritems():
- if k == 'CHECKMODE':
+ if k == '_ansible_check_mode' and v:
if not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module does not support check mode")
- if self.supports_check_mode:
- self.check_mode = True
+ self.check_mode = True
+ break
def _check_for_no_log(self):
for (k,v) in self.params.iteritems():
- if k == 'NO_LOG':
+ if k == '_ansible_no_log':
self.no_log = self.boolean(v)
def _check_invalid_arguments(self):
for (k,v) in self.params.iteritems():
# these should be in legal inputs already
- #if k in ('CHECKMODE', 'NO_LOG'):
+ #if k in ('_ansible_check_mode', '_ansible_no_log'):
# continue
if k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
@@ -930,7 +947,7 @@ class AnsibleModule(object):
for check in spec:
count = self._count_terms(check)
if count > 1:
- self.fail_json(msg="parameters are mutually exclusive: %s" % check)
+ self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
@@ -948,7 +965,7 @@ class AnsibleModule(object):
non_zero = [ c for c in counts if c > 0 ]
if len(non_zero) > 0:
if 0 in counts:
- self.fail_json(msg="parameters are required together: %s" % check)
+ self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self):
''' ensure all required arguments are present '''
@@ -968,7 +985,7 @@ class AnsibleModule(object):
missing = []
if key in self.params and self.params[key] == val:
for check in requirements:
- count = self._count_terms(check)
+ count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) > 0:
@@ -1021,6 +1038,101 @@ class AnsibleModule(object):
return (str, e)
return str
+ def _check_type_str(self, value):
+ if isinstance(value, basestring):
+ return value
+ # Note: This could throw a unicode error if value's __str__() method
+ # returns non-ascii. Have to port utils.to_bytes() if that happens
+ return str(value)
+
+ def _check_type_list(self, value):
+ if isinstance(value, list):
+ return value
+
+ if isinstance(value, basestring):
+ return value.split(",")
+ elif isinstance(value, int) or isinstance(value, float):
+ return [ str(value) ]
+
+ raise TypeError('%s cannot be converted to a list' % type(value))
+
+ def _check_type_dict(self, value):
+ if isinstance(value, dict):
+ return value
+
+ if isinstance(value, basestring):
+ if value.startswith("{"):
+ try:
+ return json.loads(value)
+ except:
+ (result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
+ if exc is not None:
+ raise TypeError('unable to evaluate string as dictionary')
+ return result
+ elif '=' in value:
+ fields = []
+ field_buffer = []
+ in_quote = False
+ in_escape = False
+ for c in value.strip():
+ if in_escape:
+ field_buffer.append(c)
+ in_escape = False
+ elif c == '\\':
+ in_escape = True
+ elif not in_quote and c in ('\'', '"'):
+ in_quote = c
+ elif in_quote and in_quote == c:
+ in_quote = False
+ elif not in_quote and c in (',', ' '):
+ field = ''.join(field_buffer)
+ if field:
+ fields.append(field)
+ field_buffer = []
+ else:
+ field_buffer.append(c)
+
+ field = ''.join(field_buffer)
+ if field:
+ fields.append(field)
+ return dict(x.split("=", 1) for x in fields)
+ else:
+ raise TypeError("dictionary requested, could not parse JSON or key=value")
+
+ raise TypeError('%s cannot be converted to a dict' % type(value))
+
+ def _check_type_bool(self, value):
+ if isinstance(value, bool):
+ return value
+
+ if isinstance(value, basestring):
+ return self.boolean(value)
+
+ raise TypeError('%s cannot be converted to a bool' % type(value))
+
+ def _check_type_int(self, value):
+ if isinstance(value, int):
+ return value
+
+ if isinstance(value, basestring):
+ return int(value)
+
+ raise TypeError('%s cannot be converted to an int' % type(value))
+
+ def _check_type_float(self, value):
+ if isinstance(value, float):
+ return value
+
+ if isinstance(value, basestring):
+ return float(value)
+
+ raise TypeError('%s cannot be converted to a float' % type(value))
+
+ def _check_type_path(self, value):
+ value = self._check_type_str(value)
+ return os.path.expanduser(os.path.expandvars(value))
+
+
def _check_argument_types(self):
''' ensure all arguments have the requested type '''
for (k, v) in self.argument_spec.iteritems():
@@ -1031,62 +1143,15 @@ class AnsibleModule(object):
continue
value = self.params[k]
- is_invalid = False
try:
- if wanted == 'str':
- if not isinstance(value, basestring):
- self.params[k] = str(value)
- elif wanted == 'list':
- if not isinstance(value, list):
- if isinstance(value, basestring):
- self.params[k] = value.split(",")
- elif isinstance(value, int) or isinstance(value, float):
- self.params[k] = [ str(value) ]
- else:
- is_invalid = True
- elif wanted == 'dict':
- if not isinstance(value, dict):
- if isinstance(value, basestring):
- if value.startswith("{"):
- try:
- self.params[k] = json.loads(value)
- except:
- (result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
- if exc is not None:
- self.fail_json(msg="unable to evaluate dictionary for %s" % k)
- self.params[k] = result
- elif '=' in value:
- self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")])
- else:
- self.fail_json(msg="dictionary requested, could not parse JSON or key=value")
- else:
- is_invalid = True
- elif wanted == 'bool':
- if not isinstance(value, bool):
- if isinstance(value, basestring):
- self.params[k] = self.boolean(value)
- else:
- is_invalid = True
- elif wanted == 'int':
- if not isinstance(value, int):
- if isinstance(value, basestring):
- self.params[k] = int(value)
- else:
- is_invalid = True
- elif wanted == 'float':
- if not isinstance(value, float):
- if isinstance(value, basestring):
- self.params[k] = float(value)
- else:
- is_invalid = True
- else:
- self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
-
- if is_invalid:
- self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted))
- except ValueError, e:
- self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted))
+ type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
+ except KeyError:
+ self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
+ try:
+ self.params[k] = type_checker(value)
+ except (TypeError, ValueError):
+ self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s" % (k, type(value), wanted))
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.iteritems():
@@ -1102,20 +1167,11 @@ class AnsibleModule(object):
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
- args = MODULE_ARGS
- items = shlex.split(args)
- params = {}
- for x in items:
- try:
- (k, v) = x.split("=",1)
- except Exception, e:
- self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
- if k in params:
- self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
- params[k] = v
- params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
- params2.update(params)
- return (params2, args)
+ params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
+ if params is None:
+ params = dict()
+ return params
+
def _log_invocation(self):
''' log that ansible ran the module '''
@@ -1166,13 +1222,13 @@ class AnsibleModule(object):
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send("%s %s" % (module, msg), **dict(journal_args))
- except IOError, e:
+ except IOError:
# fall back to syslog since logging to journal failed
syslog.openlog(str(module), 0, syslog.LOG_USER)
- syslog.syslog(syslog.LOG_NOTICE, msg) #1
+ syslog.syslog(syslog.LOG_INFO, msg) #1
else:
syslog.openlog(str(module), 0, syslog.LOG_USER)
- syslog.syslog(syslog.LOG_NOTICE, msg) #2
+ syslog.syslog(syslog.LOG_INFO, msg) #2
def _set_cwd(self):
try:
@@ -1236,13 +1292,17 @@ class AnsibleModule(object):
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
- for encoding in ("utf-8", "latin-1", "unicode_escape"):
+ for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
- # Old systems using simplejson module does not support encoding keyword.
- except TypeError, e:
- return json.dumps(data)
- except UnicodeDecodeError, e:
+ # Old systems using old simplejson module does not support encoding keyword.
+ except TypeError:
+ try:
+ new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
+ except UnicodeDecodeError:
+ continue
+ return json.dumps(new_data)
+ except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
@@ -1383,8 +1443,9 @@ class AnsibleModule(object):
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(src, dest)
except (IOError,OSError), e:
- # only try workarounds for errno 18 (cross device), 1 (not permitted) and 13 (permission denied)
- if e.errno != errno.EPERM and e.errno != errno.EXDEV and e.errno != errno.EACCES:
+ # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
+ # and 26 (text file busy) which happens on vagrant synced folders
+ if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]:
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
dest_dir = os.path.dirname(dest)
@@ -1479,7 +1540,7 @@ class AnsibleModule(object):
msg = None
st_in = None
- # Set a temporart env path if a prefix is passed
+ # Set a temporary env path if a prefix is passed
env=os.environ
if path_prefix:
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
@@ -1572,7 +1633,7 @@ class AnsibleModule(object):
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
- return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
+ return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py
index e887367c2fd..752defec2b6 100644
--- a/lib/ansible/module_utils/cloudstack.py
+++ b/lib/ansible/module_utils/cloudstack.py
@@ -64,19 +64,33 @@ class AnsibleCloudStack:
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
+ api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
+ timeout=api_timeout,
method=api_http_method
)
else:
self.cs = CloudStack(**read_config())
- # TODO: rename to has_changed()
+
+ def get_or_fallback(self, key=None, fallback_key=None):
+ value = self.module.params.get(key)
+ if not value:
+ value = self.module.params.get(fallback_key)
+ return value
+
+
+ # TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
+ return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
+
+
+ def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
@@ -109,11 +123,6 @@ class AnsibleCloudStack:
return my_dict
- # TODO: for backward compatibility only, remove if not used anymore
- def get_project_id(self):
- return self.get_project(key='id')
-
-
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
@@ -122,23 +131,17 @@ class AnsibleCloudStack:
if not project:
return None
args = {}
- args['listall'] = True
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
- if project in [ p['name'], p['displaytext'], p['id'] ]:
+ if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
- # TODO: for backward compatibility only, remove if not used anymore
- def get_ip_address_id(self):
- return self.get_ip_address(key='id')
-
-
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
@@ -161,11 +164,6 @@ class AnsibleCloudStack:
return self._get_by_key(key, self.ip_address)
- # TODO: for backward compatibility only, remove if not used anymore
- def get_vm_id(self):
- return self.get_vm(key='id')
-
-
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
@@ -188,11 +186,6 @@ class AnsibleCloudStack:
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
- # TODO: for backward compatibility only, remove if not used anymore
- def get_zone_id(self):
- return self.get_zone(key='id')
-
-
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
@@ -213,11 +206,6 @@ class AnsibleCloudStack:
self.module.fail_json(msg="zone '%s' not found" % zone)
- # TODO: for backward compatibility only, remove if not used anymore
- def get_os_type_id(self):
- return self.get_os_type(key='id')
-
-
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
@@ -286,12 +274,13 @@ class AnsibleCloudStack:
return None
args = {}
- args['name'] = domain
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
- self.domain = domains['domain'][0]
- return self._get_by_key(key, self.domain)
+ for d in domains['domain']:
+ if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
+ self.domain = d
+ return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
@@ -359,8 +348,13 @@ class AnsibleCloudStack:
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
- # TODO: rename to poll_job()
+
+ # TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
+ return self.poll_job(job=job, key=key)
+
+
+ def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py
index d02c3476f2e..4ba172cebe4 100644
--- a/lib/ansible/module_utils/ec2.py
+++ b/lib/ansible/module_utils/ec2.py
@@ -33,6 +33,19 @@ except:
HAS_LOOSE_VERSION = False
+def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ if conn_type not in ['both', 'resource', 'client']:
+ module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
+
+ resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ client = resource.meta.client
+
+ if conn_type == 'resource':
+ return resource
+ elif conn_type == 'client':
+ return client
+ else:
+ return client, resource
def aws_common_argument_spec():
return dict(
@@ -59,7 +72,7 @@ def boto_supports_profile_name():
return hasattr(boto.ec2.EC2Connection, 'profile_name')
-def get_aws_connection_info(module):
+def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
@@ -120,19 +133,31 @@ def get_aws_connection_info(module):
# in case security_token came in as empty string
security_token = None
- boto_params = dict(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- security_token=security_token)
+ if boto3:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ aws_session_token=security_token)
+ if validate_certs:
+ boto_params['verify'] = validate_certs
- # profile_name only works as a key in boto >= 2.24
- # so only set profile_name if passed as an argument
- if profile_name:
- if not boto_supports_profile_name():
- module.fail_json("boto does not support profile_name before 2.24")
- boto_params['profile_name'] = profile_name
+ if profile_name:
+ boto_params['profile_name'] = profile_name
- if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
- boto_params['validate_certs'] = validate_certs
+
+ else:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ security_token=security_token)
+
+ # profile_name only works as a key in boto >= 2.24
+ # so only set profile_name if passed as an argument
+ if profile_name:
+ if not boto_supports_profile_name():
+ module.fail_json("boto does not support profile_name before 2.24")
+ boto_params['profile_name'] = profile_name
+
+ if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
+ boto_params['validate_certs'] = validate_certs
return region, ec2_url, boto_params
diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py
new file mode 100644
index 00000000000..097a6370afe
--- /dev/null
+++ b/lib/ansible/module_utils/f5.py
@@ -0,0 +1,77 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Etienne Carrière ,2015
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+try:
+ import bigsuds
+except ImportError:
+ bigsuds_found = False
+else:
+ bigsuds_found = True
+
+
+def f5_argument_spec():
+ return dict(
+ server=dict(type='str', required=True),
+ user=dict(type='str', required=True),
+ password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
+ validate_certs = dict(default='yes', type='bool'),
+ state = dict(type='str', default='present', choices=['present', 'absent']),
+ partition = dict(type='str', default='Common')
+ )
+
+
+def f5_parse_arguments(module):
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+ if not module.params['validate_certs']:
+ disable_ssl_cert_validation()
+ return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition'],module.params['validate_certs'])
+
+def bigip_api(bigip, user, password):
+ api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
+ return api
+
+def disable_ssl_cert_validation():
+ # You probably only want to do this for testing and never in production.
+ # From https://www.python.org/dev/peps/pep-0476/#id29
+ import ssl
+ ssl._create_default_https_context = ssl._create_unverified_context
+
+# Fully Qualified name (with the partition)
+def fq_name(partition,name):
+ if name is not None and not name.startswith('/'):
+ return '/%s/%s' % (partition,name)
+ return name
+
+# Fully Qualified name (with partition) for a list
+def fq_list_names(partition,list_names):
+ if list_names is None:
+ return None
+ return map(lambda x: fq_name(partition,x),list_names)
+
+
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 3485690b83f..a3f8c05d651 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -16,6 +16,7 @@
# along with Ansible. If not, see .
import os
+import sys
import stat
import array
import errno
@@ -43,9 +44,17 @@ except ImportError:
try:
import json
+ # Detect python-json which is incompatible and fallback to simplejson in
+ # that case
+ try:
+ json.loads
+ json.dumps
+ except AttributeError:
+ raise ImportError
except ImportError:
import simplejson as json
+
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
@@ -99,8 +108,9 @@ class Facts(object):
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
+ ('/etc/lsb-release', 'Mandriva'),
('/etc/os-release', 'NA'),
- ('/etc/lsb-release', 'Mandriva'))
+ )
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
@@ -115,6 +125,7 @@ class Facts(object):
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
+ { 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
@@ -140,6 +151,7 @@ class Facts(object):
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
+ self.get_dns_facts()
def populate(self):
return self.facts
@@ -420,7 +432,9 @@ class Facts(object):
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
- break
+ break
+ elif 'Ubuntu' in data:
+ break # Ubuntu gets correct info from python functions
elif name == 'Mandriva':
data = get_file_content(path)
if 'Mandriva' in data:
@@ -435,12 +449,15 @@ class Facts(object):
elif name == 'NA':
data = get_file_content(path)
for line in data.splitlines():
- distribution = re.search("^NAME=(.*)", line)
- if distribution:
- self.facts['distribution'] = distribution.group(1).strip('"')
- version = re.search("^VERSION=(.*)", line)
- if version:
- self.facts['distribution_version'] = version.group(1).strip('"')
+ if self.facts['distribution'] == 'NA':
+ distribution = re.search("^NAME=(.*)", line)
+ if distribution:
+ self.facts['distribution'] = distribution.group(1).strip('"')
+ if self.facts['distribution_version'] == 'NA':
+ version = re.search("^VERSION=(.*)", line)
+ if version:
+ self.facts['distribution_version'] = version.group(1).strip('"')
+
if self.facts['distribution'].lower() == 'coreos':
data = get_file_content('/etc/coreos/update.conf')
release = re.search("^GROUP=(.*)", data)
@@ -471,29 +488,19 @@ class Facts(object):
pass
def get_public_ssh_host_keys(self):
- dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub'
+ keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
if self.facts['system'] == 'Darwin':
- dsa_filename = '/etc/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub'
- dsa = get_file_content(dsa_filename)
- rsa = get_file_content(rsa_filename)
- ecdsa = get_file_content(ecdsa_filename)
- if dsa is None:
- dsa = 'NA'
+ keydir = '/etc'
else:
- self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
- if rsa is None:
- rsa = 'NA'
- else:
- self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
- if ecdsa is None:
- ecdsa = 'NA'
- else:
- self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1]
+ keydir = '/etc/ssh'
+
+ for type_ in keytypes:
+ key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
+ keydata = get_file_content(key_filename)
+ if keydata is not None:
+ factname = 'ssh_host_key_%s_public' % type_
+ self.facts[factname] = keydata.split()[1]
def get_pkg_mgr_facts(self):
self.facts['pkg_mgr'] = 'unknown'
@@ -605,6 +612,8 @@ class Facts(object):
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
+ self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
@@ -624,6 +633,37 @@ class Facts(object):
for k,v in os.environ.iteritems():
self.facts['env'][k] = v
+ def get_dns_facts(self):
+ self.facts['dns'] = {}
+ for line in get_file_lines('/etc/resolv.conf'):
+ if line.startswith('#') or line.startswith(';') or line.strip() == '':
+ continue
+ tokens = line.split()
+ if len(tokens) == 0:
+ continue
+ if tokens[0] == 'nameserver':
+ self.facts['dns']['nameservers'] = []
+ for nameserver in tokens[1:]:
+ self.facts['dns']['nameservers'].append(nameserver)
+ elif tokens[0] == 'domain':
+ self.facts['dns']['domain'] = tokens[1]
+ elif tokens[0] == 'search':
+ self.facts['dns']['search'] = []
+ for suffix in tokens[1:]:
+ self.facts['dns']['search'].append(suffix)
+ elif tokens[0] == 'sortlist':
+ self.facts['dns']['sortlist'] = []
+ for address in tokens[1:]:
+ self.facts['dns']['sortlist'].append(address)
+ elif tokens[0] == 'options':
+ self.facts['dns']['options'] = {}
+ for option in tokens[1:]:
+ option_tokens = option.split(':', 1)
+ if len(option_tokens) == 0:
+ continue
+ val = len(option_tokens) == 2 and option_tokens[1] or True
+ self.facts['dns']['options'][option_tokens[0]] = val
+
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
@@ -774,7 +814,7 @@ class LinuxHardware(Hardware):
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
- if key == 'model name' or key == 'Processor' or key == 'vendor_id':
+ if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
@@ -978,7 +1018,7 @@ class LinuxHardware(Hardware):
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
- part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size")
+ part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
@@ -995,7 +1035,7 @@ class LinuxHardware(Hardware):
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
- d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size")
+ d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
@@ -1268,13 +1308,14 @@ class FreeBSDHardware(Hardware):
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
+ rc, out, err = module.run_command("/usr/sbin/swapinfo -k")
lines = out.split('\n')
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
- self.facts['swaptotal_mb'] = data[1]
- self.facts['swapfree_mb'] = data[3]
+ if data[0] != 'Device':
+ self.facts['swaptotal_mb'] = int(data[1]) / 1024
+ self.facts['swapfree_mb'] = int(data[3]) / 1024
@timeout(10)
def get_mount_facts(self):
@@ -1817,6 +1858,8 @@ class LinuxNetwork(Network):
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
+ if os.path.exists(os.path.join(path,'device')):
+ interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
@@ -2000,7 +2043,7 @@ class GenericBsdIfconfigNetwork(Network):
return interface['v4'], interface['v6']
- def get_interfaces_info(self, ifconfig_path):
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
@@ -2010,7 +2053,7 @@ class GenericBsdIfconfigNetwork(Network):
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
- rc, out, err = module.run_command([ifconfig_path, '-a'])
+ rc, out, err = module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
@@ -2147,6 +2190,57 @@ class GenericBsdIfconfigNetwork(Network):
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
+class HPUXNetwork(Network):
+ """
+ HP-UX-specifig subclass of Network. Defines networking facts:
+ - default_interface
+ - interfaces (a list of interface names)
+ - interface_ dictionary of ipv4 address information.
+ """
+ platform = 'HP-UX'
+
+ def __init__(self, module):
+ Network.__init__(self, module)
+
+ def populate(self):
+ netstat_path = self.module.get_bin_path('netstat')
+ if netstat_path is None:
+ return self.facts
+ self.get_default_interfaces()
+ interfaces = self.get_interfaces_info()
+ self.facts['interfaces'] = interfaces.keys()
+ for iface in interfaces:
+ self.facts[iface] = interfaces[iface]
+ return self.facts
+
+ def get_default_interfaces(self):
+ rc, out, err = module.run_command("/usr/bin/netstat -nr")
+ lines = out.split('\n')
+ for line in lines:
+ words = line.split()
+ if len(words) > 1:
+ if words[0] == 'default':
+ self.facts['default_interface'] = words[4]
+ self.facts['default_gateway'] = words[1]
+
+ def get_interfaces_info(self):
+ interfaces = {}
+ rc, out, err = module.run_command("/usr/bin/netstat -ni")
+ lines = out.split('\n')
+ for line in lines:
+ words = line.split()
+ for i in range(len(words) - 1):
+ if words[i][:3] == 'lan':
+ device = words[i]
+ interfaces[device] = { 'device': device }
+ address = words[i+3]
+ interfaces[device]['ipv4'] = { 'address': address }
+ network = words[i+2]
+ interfaces[device]['ipv4'] = { 'network': network,
+ 'interface': device,
+ 'address': address }
+ return interfaces
+
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the Mac OS X/Darwin Network Class.
@@ -2160,7 +2254,7 @@ class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
- current_if['media_type'] = words[2][1:]
+ current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
@@ -2180,14 +2274,14 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network):
platform = 'AIX'
# AIX 'ifconfig -a' does not have three words in the interface line
- def get_interfaces_info(self, ifconfig_path):
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
- rc, out, err = module.run_command([ifconfig_path, '-a'])
+ rc, out, err = module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
@@ -2221,7 +2315,7 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network):
rc, out, err = module.run_command([uname_path, '-W'])
# don't bother with wpars it does not work
# zero means not in wpar
- if out.split()[0] == '0':
+ if not rc and out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = module.get_bin_path('entstat')
if entstat_path:
@@ -2267,6 +2361,10 @@ class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
platform = 'OpenBSD'
+ # OpenBSD 'ifconfig -a' does not have information about aliases
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
+ return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
+
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
@@ -2418,6 +2516,12 @@ class LinuxVirtual(Virtual):
self.facts['virtualization_role'] = 'guest'
return
+ systemd_container = get_file_content('/run/systemd/container')
+ if systemd_container:
+ self.facts['virtualization_type'] = systemd_container
+ self.facts['virtualization_role'] = 'guest'
+ return
+
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
@@ -2737,12 +2841,16 @@ def get_all_facts(module):
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
- # Look for the path to the facter and ohai binary and set
+ # Look for the path to the facter, cfacter, and ohai binaries and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
+ cfacter_path = module.get_bin_path('cfacter')
ohai_path = module.get_bin_path('ohai')
+ # Prefer to use cfacter if available
+ if cfacter_path is not None:
+ facter_path = cfacter_path
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
@@ -2778,6 +2886,6 @@ def get_all_facts(module):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
- setup_result['verbose_override'] = True
+ setup_result['_ansible_verbose_override'] = True
return setup_result
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index b58cc534287..40694491443 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -93,11 +93,7 @@ def openstack_full_argument_spec(**kwargs):
def openstack_module_kwargs(**kwargs):
- ret = dict(
- required_one_of=[
- ['cloud', 'auth'],
- ],
- )
+ ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1
index ee7d3ddeca4..ee659162162 100644
--- a/lib/ansible/module_utils/powershell.ps1
+++ b/lib/ansible/module_utils/powershell.ps1
@@ -26,18 +26,14 @@
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
-# Helper function to parse Ansible JSON arguments from a file passed as
-# the single argument to the module
-# Example: $params = Parse-Args $args
-Function Parse-Args($arguments)
-{
- $parameters = New-Object psobject;
- If ($arguments.Length -gt 0)
- {
- $parameters = Get-Content $arguments[0] | ConvertFrom-Json;
- }
- $parameters;
-}
+# Ansible v2 will insert the module arguments below as a string containing
+# JSON; assign them to an environment variable and redefine $args so existing
+# modules will continue to work.
+$complex_args = @'
+<>
+'@
+Set-Content env:MODULE_COMPLEX_ARGS -Value $complex_args
+$args = @('env:MODULE_COMPLEX_ARGS')
# Helper function to set an "attribute" on a psobject instance in powershell.
# This is a convenience to make adding Members to the object easier and
@@ -65,7 +61,7 @@ Function Exit-Json($obj)
$obj = New-Object psobject
}
- echo $obj | ConvertTo-Json -Depth 99
+ echo $obj | ConvertTo-Json -Compress -Depth 99
Exit
}
@@ -89,7 +85,7 @@ Function Fail-Json($obj, $message = $null)
Set-Attr $obj "msg" $message
Set-Attr $obj "failed" $true
- echo $obj | ConvertTo-Json -Depth 99
+ echo $obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
@@ -142,6 +138,28 @@ Function ConvertTo-Bool
return
}
+# Helper function to parse Ansible JSON arguments from a "file" passed as
+# the single argument to the module.
+# Example: $params = Parse-Args $args
+Function Parse-Args($arguments, $supports_check_mode = $false)
+{
+ $parameters = New-Object psobject
+ If ($arguments.Length -gt 0)
+ {
+ $parameters = Get-Content $arguments[0] | ConvertFrom-Json
+ }
+ $check_mode = Get-Attr $parameters "_ansible_check_mode" $false | ConvertTo-Bool
+ If ($check_mode -and -not $supports_check_mode)
+ {
+ $obj = New-Object psobject
+ Set-Attr $obj "skipped" $true
+ Set-Attr $obj "changed" $false
+ Set-Attr $obj "msg" "remote module does not support check mode"
+ Exit-Json $obj
+ }
+ $parameters
+}
+
# Helper function to calculate a hash of a file in a way which powershell 3
# and above can handle:
Function Get-FileChecksum($path)
@@ -151,7 +169,7 @@ Function Get-FileChecksum($path)
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
- [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container $path)
diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py
index d56cc89395e..84f78f8d530 100644
--- a/lib/ansible/module_utils/urls.py
+++ b/lib/ansible/module_utils/urls.py
@@ -5,6 +5,7 @@
# to the complete work.
#
# Copyright (c), Michael DeHaan , 2012-2013
+# Copyright (c), Toshio Kuratomi , 2015
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
@@ -25,12 +26,60 @@
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-try:
- import urllib
- HAS_URLLIB = True
-except:
- HAS_URLLIB = False
+#
+# The match_hostname function and supporting code is under the terms and
+# conditions of the Python Software Foundation License. They were taken from
+# the Python3 standard library and adapted for use in Python2. See comments in the
+# source for which code precisely is under this License. PSF License text
+# follows:
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
+# retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
try:
import urllib2
@@ -46,15 +95,176 @@ except:
try:
import ssl
- HAS_SSL=True
+ HAS_SSL = True
except:
- HAS_SSL=False
+ HAS_SSL = False
+
+try:
+ # SNI Handling needs python2.7.9's SSLContext
+ from ssl import create_default_context, SSLContext
+ HAS_SSLCONTEXT = True
+except ImportError:
+ HAS_SSLCONTEXT = False
+
+# Select a protocol that includes all secure tls protocols
+# Exclude insecure ssl protocols if possible
+
+if HAS_SSL:
+ # If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
+ PROTOCOL = ssl.PROTOCOL_TLSv1
+if not HAS_SSLCONTEXT and HAS_SSL:
+ try:
+ import ctypes, ctypes.util
+ except ImportError:
+ # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
+ pass
+ else:
+ libssl_name = ctypes.util.find_library('ssl')
+ libssl = ctypes.CDLL(libssl_name)
+ for method in ('TLSv1_1_method', 'TLSv1_2_method'):
+ try:
+ libssl[method]
+ # Found something - we'll let openssl autonegotiate and hope
+ # the server has disabled sslv2 and 3. best we can do.
+ PROTOCOL = ssl.PROTOCOL_SSLv23
+ break
+ except AttributeError:
+ pass
+ del libssl
+
+
+
+HAS_MATCH_HOSTNAME = True
+try:
+ from ssl import match_hostname, CertificateError
+except ImportError:
+ try:
+ from backports.ssl_match_hostname import match_hostname, CertificateError
+ except ImportError:
+ HAS_MATCH_HOSTNAME = False
+
+if not HAS_MATCH_HOSTNAME:
+ ###
+ ### The following block of code is under the terms and conditions of the
+ ### Python Software Foundation License
+ ###
+
+ """The match_hostname() function from Python 3.4, essential when using SSL."""
+
+ import re
+
+ class CertificateError(ValueError):
+ pass
+
+
+ def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+
+ def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate")
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
+
+ ###
+ ### End of Python Software Foundation Licensed code
+ ###
+
+ HAS_MATCH_HOSTNAME = True
+
import httplib
import os
import re
+import sys
import socket
+import platform
import tempfile
+import base64
# This is a dummy cacert provided for Mac OS since you need at least 1
@@ -80,7 +290,35 @@ zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
"""
+#
+# Exceptions
+#
+
+class ConnectionError(Exception):
+ """Failed to connect to the server"""
+ pass
+
+class ProxyError(ConnectionError):
+ """Failure to connect because of a proxy"""
+ pass
+
+class SSLValidationError(ConnectionError):
+ """Failure to connect due to SSL validation failing"""
+ pass
+
+class NoSSLError(SSLValidationError):
+ """Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
+ pass
+
+
class CustomHTTPSConnection(httplib.HTTPSConnection):
+ def __init__(self, *args, **kwargs):
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+ if HAS_SSLCONTEXT:
+ self.context = create_default_context()
+ if self.cert_file:
+ self.context.load_cert_chain(self.cert_file, self.key_file)
+
def connect(self):
"Connect to a host on a given (SSL) port."
@@ -91,7 +329,10 @@ class CustomHTTPSConnection(httplib.HTTPSConnection):
if self._tunnel_host:
self.sock = sock
self._tunnel()
- self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
+ if HAS_SSLCONTEXT:
+ self.sock = self.context.wrap_socket(sock, server_hostname=self.host)
+ else:
+ self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib2.HTTPSHandler):
@@ -144,7 +385,7 @@ def generic_urlparse(parts):
username, password = auth.split(':', 1)
generic_parts['username'] = username
generic_parts['password'] = password
- generic_parts['hostname'] = hostnme
+ generic_parts['hostname'] = hostname
generic_parts['port'] = port
except:
generic_parts['username'] = None
@@ -180,8 +421,7 @@ class SSLValidationHandler(urllib2.BaseHandler):
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
- def __init__(self, module, hostname, port):
- self.module = module
+ def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
@@ -191,23 +431,22 @@ class SSLValidationHandler(urllib2.BaseHandler):
ca_certs = []
paths_checked = []
- platform = get_platform()
- distribution = get_distribution()
+ system = platform.system()
# build a list of paths to check for .crt/.pem files
# based on the platform type
paths_checked.append('/etc/ssl/certs')
- if platform == 'Linux':
+ if system == 'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
- elif platform == 'FreeBSD':
+ elif system == 'FreeBSD':
paths_checked.append('/usr/local/share/certs')
- elif platform == 'OpenBSD':
+ elif system == 'OpenBSD':
paths_checked.append('/etc/ssl')
- elif platform == 'NetBSD':
+ elif system == 'NetBSD':
ca_certs.append('/etc/openssl/certs')
- elif platform == 'SunOS':
+ elif system == 'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
@@ -217,9 +456,9 @@ class SSLValidationHandler(urllib2.BaseHandler):
tmp_fd, tmp_path = tempfile.mkstemp()
# Write the dummy ca cert if we are running on Mac OS X
- if platform == 'Darwin':
+ if system == 'Darwin':
os.write(tmp_fd, DUMMY_CA_CERT)
- # Default Homebrew path for OpenSSL certs
+ # Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
@@ -250,7 +489,7 @@ class SSLValidationHandler(urllib2.BaseHandler):
if int(resp_code) not in valid_codes:
raise Exception
except:
- self.module.fail_json(msg='Connection to proxy failed')
+ raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
@@ -268,9 +507,17 @@ class SSLValidationHandler(urllib2.BaseHandler):
return False
return True
+ def _make_context(self, tmp_ca_cert_path):
+ context = create_default_context()
+ context.load_verify_locations(tmp_ca_cert_path)
+ return context
+
def http_request(self, req):
tmp_ca_cert_path, paths_checked = self.get_ca_certs()
https_proxy = os.environ.get('https_proxy')
+ context = None
+ if HAS_SSLCONTEXT:
+ context = self._make_context(tmp_ca_cert_path)
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
@@ -292,25 +539,40 @@ class SSLValidationHandler(urllib2.BaseHandler):
s.sendall('\r\n')
connect_result = s.recv(4096)
self.validate_proxy_response(connect_result)
- ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
+ if context:
+ ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname'))
+ else:
+ ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
else:
- self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
+ raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s.connect((self.hostname, self.port))
- ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
+ if context:
+ ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
+ else:
+ ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
#ssl_s.unwrap()
s.close()
except (ssl.SSLError, socket.error), e:
# fail if we tried all of the certs but none worked
if 'connection refused' in str(e).lower():
- self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port))
+ raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port))
else:
- self.module.fail_json(
- msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \
- 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
- 'Paths checked for this platform: %s' % ", ".join(paths_checked)
+ raise SSLValidationError('Failed to validate the SSL certificate for %s:%s.'
+ ' Make sure your managed systems have a valid CA'
+ ' certificate installed. If the website serving the url'
+ ' uses SNI you need python >= 2.7.9 on your managed'
+ ' machine. You can use validate_certs=False if you do'
+ ' not need to confirm the server\s identity but this is'
+ ' unsafe and not recommended'
+ ' Paths checked for this platform: %s' % (self.hostname, self.port, ", ".join(paths_checked))
)
+ except CertificateError:
+ raise SSLValidationError("SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=False (insecure)" % self.hostname)
+
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
@@ -322,74 +584,41 @@ class SSLValidationHandler(urllib2.BaseHandler):
https_request = http_request
-
-def url_argument_spec():
- '''
- Creates an argument spec that can be used with any module
- that will be requesting content via urllib/urllib2
- '''
- return dict(
- url = dict(),
- force = dict(default='no', aliases=['thirsty'], type='bool'),
- http_agent = dict(default='ansible-httpget'),
- use_proxy = dict(default='yes', type='bool'),
- validate_certs = dict(default='yes', type='bool'),
- url_username = dict(required=False),
- url_password = dict(required=False),
- )
-
-
-def fetch_url(module, url, data=None, headers=None, method=None,
- use_proxy=True, force=False, last_mod_time=None, timeout=10):
+# Rewrite of fetch_url to not require the module environment
+def open_url(url, data=None, headers=None, method=None, use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=False):
'''
Fetches a file from an HTTP/FTP server using urllib2
'''
-
- if not HAS_URLLIB:
- module.fail_json(msg='urllib is not installed')
- if not HAS_URLLIB2:
- module.fail_json(msg='urllib2 is not installed')
- elif not HAS_URLPARSE:
- module.fail_json(msg='urlparse is not installed')
-
- r = None
handlers = []
- info = dict(url=url)
-
- distribution = get_distribution()
- # Get validate_certs from the module params
- validate_certs = module.params.get('validate_certs', True)
-
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse.urlparse(url)
- if parsed[0] == 'https':
- if not HAS_SSL and validate_certs:
- if distribution == 'Redhat':
- module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL')
- else:
- module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
-
- elif validate_certs:
- # do the cert validation
- netloc = parsed[1]
- if '@' in netloc:
- netloc = netloc.split('@', 1)[1]
- if ':' in netloc:
- hostname, port = netloc.split(':', 1)
- port = int(port)
- else:
- hostname = netloc
- port = 443
- # create the SSL validation handler and
- # add it to the list of handlers
- ssl_handler = SSLValidationHandler(module, hostname, port)
- handlers.append(ssl_handler)
+ if parsed[0] == 'https' and validate_certs:
+ if not HAS_SSL:
+ raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False, however this is unsafe and not recommended')
+
+ # do the cert validation
+ netloc = parsed[1]
+ if '@' in netloc:
+ netloc = netloc.split('@', 1)[1]
+ if ':' in netloc:
+ hostname, port = netloc.split(':', 1)
+ port = int(port)
+ else:
+ hostname = netloc
+ port = 443
+ # create the SSL validation handler and
+ # add it to the list of handlers
+ ssl_handler = SSLValidationHandler(hostname, port)
+ handlers.append(ssl_handler)
if parsed[0] != 'ftp':
- username = module.params.get('url_username', '')
+ username = url_username
+
if username:
- password = module.params.get('url_password', '')
+ password = url_password
netloc = parsed[1]
elif '@' in parsed[1]:
credentials, netloc = parsed[1].split('@', 1)
@@ -405,7 +634,7 @@ def fetch_url(module, url, data=None, headers=None, method=None,
# reconstruct url without credentials
url = urlparse.urlunparse(parsed)
- if username:
+ if username and not force_basic_auth:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
@@ -419,6 +648,12 @@ def fetch_url(module, url, data=None, headers=None, method=None,
# create the AuthHandler
handlers.append(authhandler)
+ elif username and force_basic_auth:
+ if headers is None:
+ headers = {}
+
+ headers["Authorization"] = "Basic %s" % base64.b64encode("%s:%s" % (username, password))
+
if not use_proxy:
proxyhandler = urllib2.ProxyHandler({})
handlers.append(proxyhandler)
@@ -433,16 +668,16 @@ def fetch_url(module, url, data=None, headers=None, method=None,
if method:
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
- module.fail_json(msg='invalid HTTP request method; %s' % method.upper())
+ raise ConnectionError('invalid HTTP request method; %s' % method.upper())
request = RequestWithMethod(url, method.upper(), data)
else:
request = urllib2.Request(url, data)
- # add the custom agent header, to help prevent issues
- # with sites that block the default urllib agent string
- request.add_header('User-agent', module.params.get('http_agent'))
+ # add the custom agent header, to help prevent issues
+ # with sites that block the default urllib agent string
+ request.add_header('User-agent', http_agent)
- # if we're ok with getting a 304, set the timestamp in the
+ # if we're ok with getting a 304, set the timestamp in the
# header, otherwise make sure we don't get a cached copy
if last_mod_time and not force:
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
@@ -453,20 +688,84 @@ def fetch_url(module, url, data=None, headers=None, method=None,
# user defined headers now, which may override things we've set above
if headers:
if not isinstance(headers, dict):
- module.fail_json("headers provided to fetch_url() must be a dict")
+ raise ValueError("headers provided to fetch_url() must be a dict")
for header in headers:
request.add_header(header, headers[header])
+ urlopen_args = [request, None]
+ if sys.version_info >= (2,6,0):
+ # urlopen in python prior to 2.6.0 did not
+ # have a timeout parameter
+ urlopen_args.append(timeout)
+
+ if HAS_SSLCONTEXT and not validate_certs:
+ # In 2.7.9, the default context validates certificates
+ context = SSLContext(ssl.PROTOCOL_SSLv23)
+ context.options |= ssl.OP_NO_SSLv2
+ context.options |= ssl.OP_NO_SSLv3
+ context.verify_mode = ssl.CERT_NONE
+ context.check_hostname = False
+ urlopen_args += (None, None, None, context)
+
+ r = urllib2.urlopen(*urlopen_args)
+ return r
+
+#
+# Module-related functions
+#
+
+def url_argument_spec():
+ '''
+ Creates an argument spec that can be used with any module
+ that will be requesting content via urllib/urllib2
+ '''
+ return dict(
+ url = dict(),
+ force = dict(default='no', aliases=['thirsty'], type='bool'),
+ http_agent = dict(default='ansible-httpget'),
+ use_proxy = dict(default='yes', type='bool'),
+ validate_certs = dict(default='yes', type='bool'),
+ url_username = dict(required=False),
+ url_password = dict(required=False),
+ force_basic_auth = dict(required=False, type='bool', default='no'),
+
+ )
+
+def fetch_url(module, url, data=None, headers=None, method=None,
+ use_proxy=True, force=False, last_mod_time=None, timeout=10):
+ '''
+ Fetches a file from an HTTP/FTP server using urllib2. Requires the module environment
+ '''
+
+ if not HAS_URLLIB2:
+ module.fail_json(msg='urllib2 is not installed')
+ elif not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ # Get validate_certs from the module params
+ validate_certs = module.params.get('validate_certs', True)
+
+ username = module.params.get('url_username', '')
+ password = module.params.get('url_password', '')
+ http_agent = module.params.get('http_agent', None)
+ force_basic_auth = module.params.get('force_basic_auth', '')
+
+ r = None
+ info = dict(url=url)
try:
- if sys.version_info < (2,6,0):
- # urlopen in python prior to 2.6.0 did not
- # have a timeout parameter
- r = urllib2.urlopen(request, None)
- else:
- r = urllib2.urlopen(request, None, timeout)
+ r = open_url(url, data=data, headers=headers, method=method,
+ use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
+ validate_certs=validate_certs, url_username=username,
+ url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth)
info.update(r.info())
info['url'] = r.geturl() # The URL goes in too, because of redirects.
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
+ except NoSSLError, e:
+ distribution = get_distribution()
+ if distribution.lower() == 'redhat':
+ module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e))
+ except (ConnectionError, ValueError), e:
+ module.fail_json(msg=str(e))
except urllib2.HTTPError, e:
info.update(dict(msg=str(e), status=e.code))
except urllib2.URLError, e:
@@ -478,4 +777,3 @@ def fetch_url(module, url, data=None, headers=None, method=None,
info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
return r, info
-
diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py
index e2d8c18ca48..6eb612de744 100644
--- a/lib/ansible/module_utils/vmware.py
+++ b/lib/ansible/module_utils/vmware.py
@@ -122,9 +122,9 @@ def connect_to_api(module, disconnect_atexit=True):
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
- except vim.fault.InvalidLogin as invalid_login:
+ except vim.fault.InvalidLogin, invalid_login:
module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login))
- except requests.ConnectionError as connection_error:
+ except requests.ConnectionError, connection_error:
module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error))
diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py
index e69de29bb2d..ae8ccff5952 100644
--- a/lib/ansible/modules/__init__.py
+++ b/lib/ansible/modules/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 44ef8b3bc66..549df99c5bf 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 44ef8b3bc66365a0ca89411041eb0d51c541d6db
+Subproject commit 549df99c5bf1729e9034d7348b166f2bb8ffab4c
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index b2e4f31bebf..27bf1934839 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit b2e4f31bebfec49380659b9d65b5828f1c1ed8d9
+Subproject commit 27bf1934839719ed032fc90d137453d3dc0ee99d
diff --git a/v2/ansible/new_inventory/__init__.py b/lib/ansible/new_inventory/__init__.py
similarity index 100%
rename from v2/ansible/new_inventory/__init__.py
rename to lib/ansible/new_inventory/__init__.py
diff --git a/v2/ansible/new_inventory/group.py b/lib/ansible/new_inventory/group.py
similarity index 100%
rename from v2/ansible/new_inventory/group.py
rename to lib/ansible/new_inventory/group.py
diff --git a/v2/ansible/new_inventory/host.py b/lib/ansible/new_inventory/host.py
similarity index 100%
rename from v2/ansible/new_inventory/host.py
rename to lib/ansible/new_inventory/host.py
diff --git a/v2/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py
similarity index 77%
rename from v2/ansible/parsing/__init__.py
rename to lib/ansible/parsing/__init__.py
index 9551343fbf4..a7d414c0434 100644
--- a/v2/ansible/parsing/__init__.py
+++ b/lib/ansible/parsing/__init__.py
@@ -31,6 +31,7 @@ from ansible.parsing.splitter import unquote
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
from ansible.utils.path import unfrackpath
+from ansible.utils.unicode import to_unicode
class DataLoader():
@@ -136,6 +137,8 @@ class DataLoader():
Reads the file contents from the given file name, and will decrypt them
if they are found to be vault-encrypted.
'''
+ if not file_name or not isinstance(file_name, basestring):
+ raise AnsibleParserError("Invalid filename: '%s'" % str(file_name))
if not self.path_exists(file_name) or not self.is_file(file_name):
raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name)
@@ -175,7 +178,7 @@ class DataLoader():
''' sets the base directory, used to find files when a relative path is given '''
if basedir is not None:
- self._basedir = basedir
+ self._basedir = to_unicode(basedir)
def path_dwim(self, given):
'''
@@ -191,32 +194,47 @@ class DataLoader():
else:
return os.path.abspath(os.path.join(self._basedir, given))
- def path_dwim_relative(self, role_path, dirname, source):
- ''' find one file in a directory one level up in a dir named dirname relative to current '''
+ def path_dwim_relative(self, path, dirname, source):
+ ''' find one file in a role/playbook dirs with/without dirname subdir '''
- basedir = os.path.dirname(role_path)
- if os.path.islink(basedir):
- basedir = unfrackpath(basedir)
- template2 = os.path.join(basedir, dirname, source)
+ search = []
+ isrole = False
+
+ # I have full path, nothing else needs to be looked at
+ if source.startswith('~') or source.startswith('/'):
+ search.append(self.path_dwim(source))
else:
- template2 = os.path.join(basedir, '..', dirname, source)
+ # base role/play path + templates/files/vars + relative filename
+ search.append(os.path.join(path, dirname, source))
+
+ basedir = unfrackpath(path)
+
+ # is it a role and if so make sure you get correct base path
+ if path.endswith('tasks') and os.path.exists(os.path.join(path,'main.yml')) \
+ or os.path.exists(os.path.join(path,'tasks/main.yml')):
+ isrole = True
+ if path.endswith('tasks'):
+ basedir = unfrackpath(os.path.dirname(path))
+
+ cur_basedir = self._basedir
+ self.set_basedir(basedir)
+ # resolved base role/play path + templates/files/vars + relative filename
+ search.append(self.path_dwim(os.path.join(basedir, dirname, source)))
+ self.set_basedir(cur_basedir)
- source1 = os.path.join(role_path, dirname, source)
- if os.path.exists(source1):
- return source1
+ if isrole and not source.endswith(dirname):
+ # look in role's tasks dir w/o dirname
+ search.append(self.path_dwim(os.path.join(basedir, 'tasks', source)))
- cur_basedir = self._basedir
- self.set_basedir(basedir)
- source2 = self.path_dwim(template2)
- if os.path.exists(source2):
- self.set_basedir(cur_basedir)
- return source2
+ # try to create absolute path for loader basedir + templates/files/vars + filename
+ search.append(self.path_dwim(os.path.join(dirname,source)))
- obvious_local_path = self.path_dwim(source)
- if os.path.exists(obvious_local_path):
- self.set_basedir(cur_basedir)
- return obvious_local_path
+ # try to create absolute path for loader basedir + filename
+ search.append(self.path_dwim(source))
+
+ for candidate in search:
+ if os.path.exists(candidate):
+ break
- self.set_basedir(cur_basedir)
- return source2 # which does not exist
+ return candidate
diff --git a/v2/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
similarity index 90%
rename from v2/ansible/parsing/mod_args.py
rename to lib/ansible/parsing/mod_args.py
index ed527f1b08f..8a084587d0e 100644
--- a/v2/ansible/parsing/mod_args.py
+++ b/lib/ansible/parsing/mod_args.py
@@ -23,7 +23,22 @@ from six import iteritems, string_types
from ansible.errors import AnsibleParserError
from ansible.plugins import module_loader
-from ansible.parsing.splitter import parse_kv
+from ansible.parsing.splitter import parse_kv, split_args
+from ansible.template import Templar
+
+# For filtering out modules correctly below
+RAW_PARAM_MODULES = ([
+ 'command',
+ 'shell',
+ 'script',
+ 'include',
+ 'include_vars',
+ 'add_host',
+ 'group_by',
+ 'set_fact',
+ 'raw',
+ 'meta',
+])
class ModuleArgsParser:
@@ -77,7 +92,7 @@ class ModuleArgsParser:
self._task_ds = task_ds
- def _split_module_string(self, str):
+ def _split_module_string(self, module_string):
'''
when module names are expressed like:
action: copy src=a dest=b
@@ -85,7 +100,7 @@ class ModuleArgsParser:
and the rest are strings pertaining to the arguments.
'''
- tokens = str.split()
+ tokens = split_args(module_string)
if len(tokens) > 1:
return (tokens[0], " ".join(tokens[1:]))
else:
@@ -166,7 +181,7 @@ class ModuleArgsParser:
args = thing
elif isinstance(thing, string_types):
# form is like: local_action: copy src=a dest=b ... pretty common
- check_raw = action in ('command', 'shell', 'script')
+ check_raw = action in ('command', 'shell', 'script', 'raw')
args = parse_kv(thing, check_raw=check_raw)
elif thing is None:
# this can happen with modules which take no params, like ping:
@@ -203,7 +218,7 @@ class ModuleArgsParser:
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b ... common shorthand throughout ansible
(action, args) = self._split_module_string(thing)
- check_raw = action in ('command', 'shell', 'script')
+ check_raw = action in ('command', 'shell', 'script', 'raw')
args = parse_kv(args, check_raw=check_raw)
else:
@@ -226,17 +241,13 @@ class ModuleArgsParser:
args = dict()
- #
- # We can have one of action, local_action, or module specified
- #
-
-
# this is the 'extra gross' scenario detailed above, so we grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
# FIXME: add test cases for this
additional_args = self._task_ds.get('args', dict())
+ # We can have one of action, local_action, or module specified
# action
if 'action' in self._task_ds:
# an old school 'action' statement
@@ -267,10 +278,13 @@ class ModuleArgsParser:
# if we didn't see any module in the task at all, it's not a task really
if action is None:
raise AnsibleParserError("no action detected in task", obj=self._task_ds)
- # FIXME: disabled for now, as there are other places besides the shell/script modules where
- # having variables as the sole param for the module is valid (include_vars, add_host, and group_by?)
- #elif args.get('_raw_params', '') != '' and action not in ('command', 'shell', 'script', 'include_vars'):
- # raise AnsibleParserError("this task has extra params, which is only allowed in the command, shell or script module.", obj=self._task_ds)
+ elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
+ templar = Templar(loader=None)
+ raw_params = args.pop('_raw_params')
+ if templar._contains_vars(raw_params):
+ args['_variable_params'] = raw_params
+ else:
+ raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES)), obj=self._task_ds)
# shell modules require special handling
(action, args) = self._handle_shell_weirdness(action, args)
diff --git a/v2/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
similarity index 98%
rename from v2/ansible/parsing/splitter.py
rename to lib/ansible/parsing/splitter.py
index a1dc051d24c..989c52e2035 100644
--- a/v2/ansible/parsing/splitter.py
+++ b/lib/ansible/parsing/splitter.py
@@ -29,7 +29,6 @@ _ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
- | \\[0-7]{{1,3}} # Octal escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
@@ -264,7 +263,7 @@ def split_args(args):
return params
def is_quoted(data):
- return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
+ return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
diff --git a/v2/ansible/parsing/utils/__init__.py b/lib/ansible/parsing/utils/__init__.py
similarity index 100%
rename from v2/ansible/parsing/utils/__init__.py
rename to lib/ansible/parsing/utils/__init__.py
diff --git a/v2/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py
similarity index 100%
rename from v2/ansible/parsing/utils/jsonify.py
rename to lib/ansible/parsing/utils/jsonify.py
diff --git a/v2/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
similarity index 84%
rename from v2/ansible/parsing/vault/__init__.py
rename to lib/ansible/parsing/vault/__init__.py
index e45fddc1970..f3cee27ea47 100644
--- a/v2/ansible/parsing/vault/__init__.py
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -29,15 +29,28 @@ import shutil
import tempfile
from io import BytesIO
from subprocess import call
-from ansible import errors
+from ansible.errors import AnsibleError
from hashlib import sha256
+from binascii import hexlify
+from binascii import unhexlify
+from six import binary_type, PY3, text_type
+
# Note: Only used for loading obsolete VaultAES files. All files are written
# using the newer VaultAES256 which does not require md5
from hashlib import md5
-from binascii import hexlify
-from binascii import unhexlify
-from six import binary_type, byte2int, PY2, text_type
-from ansible import constants as C
+
+
+try:
+ from six import byte2int
+except ImportError:
+ # bytes2int added in six-1.4.0
+ if PY3:
+ import operator
+ byte2int = operator.itemgetter(0)
+ else:
+ def byte2int(bs):
+ return ord(bs[0])
+
from ansible.utils.unicode import to_unicode, to_bytes
@@ -74,6 +87,11 @@ HEADER=u'$ANSIBLE_VAULT'
CIPHER_WHITELIST=['AES', 'AES256']
+def check_prereqs():
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise AnsibleError(CRYPTO_UPGRADE)
+
class VaultLib(object):
def __init__(self, password):
@@ -92,17 +110,17 @@ class VaultLib(object):
data = to_unicode(data)
if self.is_encrypted(data):
- raise errors.AnsibleError("data is already encrypted")
+ raise AnsibleError("data is already encrypted")
if not self.cipher_name:
self.cipher_name = "AES256"
- # raise errors.AnsibleError("the cipher must be set before encrypting data")
+ # raise AnsibleError("the cipher must be set before encrypting data")
if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
cipher = globals()['Vault' + self.cipher_name]
this_cipher = cipher()
else:
- raise errors.AnsibleError("{} cipher could not be found".format(self.cipher_name))
+ raise AnsibleError("{0} cipher could not be found".format(self.cipher_name))
"""
# combine sha + data
@@ -121,10 +139,10 @@ class VaultLib(object):
data = to_bytes(data)
if self.password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt data")
+ raise AnsibleError("A vault password must be specified to decrypt data")
if not self.is_encrypted(data):
- raise errors.AnsibleError("data is not encrypted")
+ raise AnsibleError("data is not encrypted")
# clean out header
data = self._split_header(data)
@@ -135,12 +153,12 @@ class VaultLib(object):
cipher = globals()['Vault' + ciphername]
this_cipher = cipher()
else:
- raise errors.AnsibleError("{} cipher could not be found".format(ciphername))
+ raise AnsibleError("{0} cipher could not be found".format(ciphername))
# try to unencrypt data
data = this_cipher.decrypt(data, self.password)
if data is None:
- raise errors.AnsibleError("Decryption failed")
+ raise AnsibleError("Decryption failed")
return data
@@ -150,7 +168,7 @@ class VaultLib(object):
#tmpdata = hexlify(data)
tmpdata = [to_bytes(data[i:i+80]) for i in range(0, len(data), 80)]
if not self.cipher_name:
- raise errors.AnsibleError("the cipher must be set before adding a header")
+ raise AnsibleError("the cipher must be set before adding a header")
dirty_data = to_bytes(HEADER + ";" + self.version + ";" + self.cipher_name + "\n")
for l in tmpdata:
@@ -209,6 +227,10 @@ class VaultEditor(object):
call(self._editor_shell_command(tmp_path))
tmpdata = self.read_data(tmp_path)
+ # Do nothing if the content has not changed
+ if existing_data == tmpdata:
+ return
+
# create new vault
this_vault = VaultLib(self.password)
if cipher:
@@ -227,38 +249,35 @@ class VaultEditor(object):
def create_file(self):
""" create a new encrypted file """
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ check_prereqs()
if os.path.isfile(self.filename):
- raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename)
+ raise AnsibleError("%s exists, please use 'edit' instead" % self.filename)
# Let the user specify contents and save file
self._edit_file_helper(cipher=self.cipher_name)
def decrypt_file(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ check_prereqs()
if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
+ raise AnsibleError("%s does not exist" % self.filename)
tmpdata = self.read_data(self.filename)
this_vault = VaultLib(self.password)
if this_vault.is_encrypted(tmpdata):
dec_data = this_vault.decrypt(tmpdata)
if dec_data is None:
- raise errors.AnsibleError("Decryption failed")
+ raise AnsibleError("Decryption failed")
else:
self.write_data(dec_data, self.filename)
else:
- raise errors.AnsibleError("%s is not encrypted" % self.filename)
+ raise AnsibleError("%s is not encrypted" % self.filename)
def edit_file(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ check_prereqs()
# decrypt to tmpfile
tmpdata = self.read_data(self.filename)
@@ -274,8 +293,7 @@ class VaultEditor(object):
def view_file(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ check_prereqs()
# decrypt to tmpfile
tmpdata = self.read_data(self.filename)
@@ -290,11 +308,10 @@ class VaultEditor(object):
def encrypt_file(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ check_prereqs()
if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
+ raise AnsibleError("%s does not exist" % self.filename)
tmpdata = self.read_data(self.filename)
this_vault = VaultLib(self.password)
@@ -303,12 +320,11 @@ class VaultEditor(object):
enc_data = this_vault.encrypt(tmpdata)
self.write_data(enc_data, self.filename)
else:
- raise errors.AnsibleError("%s is already encrypted" % self.filename)
+ raise AnsibleError("%s is already encrypted" % self.filename)
def rekey_file(self, new_password):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ check_prereqs()
# decrypt
tmpdata = self.read_data(self.filename)
@@ -358,6 +374,48 @@ class VaultEditor(object):
return pager
+class VaultFile(object):
+
+ def __init__(self, password, filename):
+ self.password = password
+
+ self.filename = filename
+ if not os.path.isfile(self.filename):
+ raise AnsibleError("%s does not exist" % self.filename)
+ try:
+ self.filehandle = open(filename, "rb")
+ except Exception as e:
+ raise AnsibleError("Could not open %s: %s" % (self.filename, str(e)))
+
+ _, self.tmpfile = tempfile.mkstemp()
+
+ def __del__(self):
+ self.filehandle.close()
+ os.unlink(self.tmplfile)
+
+ def is_encrypted(self):
+ peak = self.filehandler.readline()
+ if peak.startswith(HEADER):
+ return True
+ else:
+ return False
+
+ def get_decrypted(self):
+
+ check_prereqs()
+
+ if self.is_encrypted():
+ tmpdata = self.filehandle.read()
+ this_vault = VaultLib(self.password)
+ dec_data = this_vault.decrypt(tmpdata)
+ if dec_data is None:
+ raise AnsibleError("Decryption failed")
+ else:
+ self.tempfile.write(dec_data)
+ return self.tmpfile
+ else:
+ return self.filename
+
########################################
# CIPHERS #
########################################
@@ -371,7 +429,7 @@ class VaultAES(object):
def __init__(self):
if not HAS_AES:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ raise AnsibleError(CRYPTO_UPGRADE)
def aes_derive_key_and_iv(self, password, salt, key_length, iv_length):
@@ -379,7 +437,7 @@ class VaultAES(object):
d = d_i = b''
while len(d) < key_length + iv_length:
- text = "{}{}{}".format(d_i, password, salt)
+ text = "{0}{1}{2}".format(d_i, password, salt)
d_i = md5(to_bytes(text)).digest()
d += d_i
@@ -451,10 +509,10 @@ class VaultAES(object):
while not finished:
chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
if len(next_chunk) == 0:
- if PY2:
- padding_length = ord(chunk[-1])
- else:
+ if PY3:
padding_length = chunk[-1]
+ else:
+ padding_length = ord(chunk[-1])
chunk = chunk[:-padding_length]
finished = True
@@ -475,7 +533,7 @@ class VaultAES(object):
test_sha = sha256(to_bytes(this_data)).hexdigest()
if this_sha != test_sha:
- raise errors.AnsibleError("Decryption failed")
+ raise AnsibleError("Decryption failed")
return this_data
@@ -491,8 +549,7 @@ class VaultAES256(object):
def __init__(self):
- if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
+ check_prereqs()
def gen_key_initctr(self, password, salt):
# 16 for AES 128, 32 for AES256
@@ -596,8 +653,9 @@ class VaultAES256(object):
result = 0
for x, y in zip(a, b):
- if PY2:
- result |= ord(x) ^ ord(y)
- else:
+ if PY3:
result |= x ^ y
+ else:
+ result |= ord(x) ^ ord(y)
return result == 0
+
diff --git a/v2/ansible/parsing/yaml/__init__.py b/lib/ansible/parsing/yaml/__init__.py
similarity index 100%
rename from v2/ansible/parsing/yaml/__init__.py
rename to lib/ansible/parsing/yaml/__init__.py
diff --git a/v2/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py
similarity index 100%
rename from v2/ansible/parsing/yaml/constructor.py
rename to lib/ansible/parsing/yaml/constructor.py
diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py
new file mode 100644
index 00000000000..dc498acd066
--- /dev/null
+++ b/lib/ansible/parsing/yaml/dumper.py
@@ -0,0 +1,37 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import yaml
+
+from ansible.parsing.yaml.objects import AnsibleUnicode
+
+class AnsibleDumper(yaml.SafeDumper):
+ '''
+ A simple stub class that allows us to add representers
+ for our overridden object types.
+ '''
+ pass
+
+AnsibleDumper.add_representer(
+ AnsibleUnicode,
+ yaml.representer.SafeRepresenter.represent_unicode
+)
+
diff --git a/v2/ansible/parsing/yaml/loader.py b/lib/ansible/parsing/yaml/loader.py
similarity index 100%
rename from v2/ansible/parsing/yaml/loader.py
rename to lib/ansible/parsing/yaml/loader.py
diff --git a/v2/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py
similarity index 100%
rename from v2/ansible/parsing/yaml/objects.py
rename to lib/ansible/parsing/yaml/objects.py
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
index 24ba2d3c6e0..ab3732b41fe 100644
--- a/lib/ansible/playbook/__init__.py
+++ b/lib/ansible/playbook/__init__.py
@@ -15,860 +15,84 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import ansible.inventory
-import ansible.constants as C
-import ansible.runner
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.callbacks
-import ansible.cache
-import os
-import shlex
-import collections
-from play import Play
-import StringIO
-import pipes
-
-# the setup cache stores all variables about a host
-# gathered during the setup step, while the vars cache
-# holds all other variables about a host
-SETUP_CACHE = ansible.cache.FactCache()
-VARS_CACHE = collections.defaultdict(dict)
-RESERVED_TAGS = ['all','tagged','untagged','always']
-
-
-class PlayBook(object):
- '''
- runs an ansible playbook, given as a datastructure or YAML filename.
- A playbook is a deployment, config management, or automation based
- set of commands to run in series.
-
- multiple plays/tasks do not execute simultaneously, but tasks in each
- pattern do execute in parallel (according to the number of forks
- requested) among the hosts they address
- '''
-
- # *****************************************************
-
- def __init__(self,
- playbook = None,
- host_list = C.DEFAULT_HOST_LIST,
- module_path = None,
- forks = C.DEFAULT_FORKS,
- timeout = C.DEFAULT_TIMEOUT,
- remote_user = C.DEFAULT_REMOTE_USER,
- remote_pass = C.DEFAULT_REMOTE_PASS,
- remote_port = None,
- transport = C.DEFAULT_TRANSPORT,
- private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
- callbacks = None,
- runner_callbacks = None,
- stats = None,
- extra_vars = None,
- only_tags = None,
- skip_tags = None,
- subset = C.DEFAULT_SUBSET,
- inventory = None,
- check = False,
- diff = False,
- any_errors_fatal = False,
- vault_password = False,
- force_handlers = False,
- # privilege escalation
- become = C.DEFAULT_BECOME,
- become_method = C.DEFAULT_BECOME_METHOD,
- become_user = C.DEFAULT_BECOME_USER,
- become_pass = None,
- ):
-
- """
- playbook: path to a playbook file
- host_list: path to a file like /etc/ansible/hosts
- module_path: path to ansible modules, like /usr/share/ansible/
- forks: desired level of parallelism
- timeout: connection timeout
- remote_user: run as this user if not specified in a particular play
- remote_pass: use this remote password (for all plays) vs using SSH keys
- remote_port: default remote port to use if not specified with the host or play
- transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
- callbacks output callbacks for the playbook
- runner_callbacks: more callbacks, this time for the runner API
- stats: holds aggregrate data about events occurring to each host
- inventory: can be specified instead of host_list to use a pre-existing inventory object
- check: don't change anything, just try to detect some potential changes
- any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
- force_handlers: continue to notify and run handlers even if a task fails
- """
-
- self.SETUP_CACHE = SETUP_CACHE
- self.VARS_CACHE = VARS_CACHE
-
- arguments = []
- if playbook is None:
- arguments.append('playbook')
- if callbacks is None:
- arguments.append('callbacks')
- if runner_callbacks is None:
- arguments.append('runner_callbacks')
- if stats is None:
- arguments.append('stats')
- if arguments:
- raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
-
- if extra_vars is None:
- extra_vars = {}
- if only_tags is None:
- only_tags = [ 'all' ]
- if skip_tags is None:
- skip_tags = []
-
- self.check = check
- self.diff = diff
- self.module_path = module_path
- self.forks = forks
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.transport = transport
- self.callbacks = callbacks
- self.runner_callbacks = runner_callbacks
- self.stats = stats
- self.extra_vars = extra_vars
- self.global_vars = {}
- self.private_key_file = private_key_file
- self.only_tags = only_tags
- self.skip_tags = skip_tags
- self.any_errors_fatal = any_errors_fatal
- self.vault_password = vault_password
- self.force_handlers = force_handlers
-
- self.become = become
- self.become_method = become_method
- self.become_user = become_user
- self.become_pass = become_pass
-
- self.callbacks.playbook = self
- self.runner_callbacks.playbook = self
-
- if inventory is None:
- self.inventory = ansible.inventory.Inventory(host_list)
- self.inventory.subset(subset)
- else:
- self.inventory = inventory
-
- if self.module_path is not None:
- utils.plugins.module_finder.add_directory(self.module_path)
-
- self.basedir = os.path.dirname(playbook) or '.'
- utils.plugins.push_basedir(self.basedir)
-
- # let inventory know the playbook basedir so it can load more vars
- self.inventory.set_playbook_basedir(self.basedir)
-
- vars = extra_vars.copy()
- vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.inventory.basedir() is not None:
- vars['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- vars['inventory_file'] = self.inventory.src()
-
- self.filename = playbook
- (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
- ansible.callbacks.load_callback_plugins()
- ansible.callbacks.set_playbook(self.callbacks, self)
-
- self._ansible_version = utils.version_info(gitinfo=True)
-
- # *****************************************************
-
- def _get_playbook_vars(self, play_ds, existing_vars):
- '''
- Gets the vars specified with the play and blends them
- with any existing vars that have already been read in
- '''
- new_vars = existing_vars.copy()
- if 'vars' in play_ds:
- if isinstance(play_ds['vars'], dict):
- new_vars.update(play_ds['vars'])
- elif isinstance(play_ds['vars'], list):
- for v in play_ds['vars']:
- new_vars.update(v)
- return new_vars
-
- # *****************************************************
-
- def _get_include_info(self, play_ds, basedir, existing_vars={}):
- '''
- Gets any key=value pairs specified with the included file
- name and returns the merged vars along with the path
- '''
- new_vars = existing_vars.copy()
- tokens = split_args(play_ds.get('include', ''))
- for t in tokens[1:]:
- try:
- (k,v) = unquote(t).split("=", 1)
- new_vars[k] = template(basedir, v, new_vars)
- except ValueError, e:
- raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
-
- return (new_vars, unquote(tokens[0]))
-
- # *****************************************************
-
- def _get_playbook_vars_files(self, play_ds, existing_vars_files):
- new_vars_files = list(existing_vars_files)
- if 'vars_files' in play_ds:
- new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
- return new_vars_files
-
- # *****************************************************
-
- def _extend_play_vars(self, play, vars={}):
- '''
- Extends the given play's variables with the additional specified vars.
- '''
-
- if 'vars' not in play or not play['vars']:
- # someone left out or put an empty "vars:" entry in their playbook
- return vars.copy()
-
- play_vars = None
- if isinstance(play['vars'], dict):
- play_vars = play['vars'].copy()
- play_vars.update(vars)
- elif isinstance(play['vars'], list):
- # nobody should really do this, but handle vars: a=1 b=2
- play_vars = play['vars'][:]
- play_vars.extend([{k:v} for k,v in vars.iteritems()])
-
- return play_vars
-
- # *****************************************************
-
- def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
- '''
- run top level error checking on playbooks and allow them to include other playbooks.
- '''
-
- playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
- accumulated_plays = []
- play_basedirs = []
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
- if type(playbook_data) != list:
- raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
-
- basedir = os.path.dirname(path) or '.'
- utils.plugins.push_basedir(basedir)
- for play in playbook_data:
- if type(play) != dict:
- raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
-
- if 'include' in play:
- # a playbook (list of plays) decided to include some other list of plays
- # from another file. The result is a flat list of plays in the end.
-
- play_vars = self._get_playbook_vars(play, vars)
- play_vars_files = self._get_playbook_vars_files(play, vars_files)
- inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
- play_vars.update(inc_vars)
-
- included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
- (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
- for p in plays:
- # support for parameterized play includes works by passing
- # those variables along to the subservient play
- p['vars'] = self._extend_play_vars(p, play_vars)
- # now add in the vars_files
- p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
-
- accumulated_plays.extend(plays)
- play_basedirs.extend(basedirs)
-
- else:
-
- # this is a normal (non-included play)
- accumulated_plays.append(play)
- play_basedirs.append(basedir)
-
- return (accumulated_plays, play_basedirs)
-
- # *****************************************************
-
- def run(self):
- ''' run all patterns in the playbook '''
- plays = []
- matched_tags_all = set()
- unmatched_tags_all = set()
-
- # loop through all patterns and run them
- self.callbacks.on_start()
- for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
- play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
- assert play is not None
-
- matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
-
- matched_tags_all = matched_tags_all | matched_tags
- unmatched_tags_all = unmatched_tags_all | unmatched_tags
-
- # Remove tasks we wish to skip
- matched_tags = matched_tags - set(self.skip_tags)
-
- # if we have matched_tags, the play must be run.
- # if the play contains no tasks, assume we just want to gather facts
- # in this case there are actually 3 meta tasks (handler flushes) not 0
- # tasks, so that's why there's a check against 3
- if (len(matched_tags) > 0 or len(play.tasks()) == 3):
- plays.append(play)
-
- # if the playbook is invoked with --tags or --skip-tags that don't
- # exist at all in the playbooks then we need to raise an error so that
- # the user can correct the arguments.
- unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
- (matched_tags_all | unmatched_tags_all))
-
- for t in RESERVED_TAGS:
- unknown_tags.discard(t)
-
- if len(unknown_tags) > 0:
- for t in RESERVED_TAGS:
- unmatched_tags_all.discard(t)
- msg = 'tag(s) not found in playbook: %s. possible values: %s'
- unknown = ','.join(sorted(unknown_tags))
- unmatched = ','.join(sorted(unmatched_tags_all))
- raise errors.AnsibleError(msg % (unknown, unmatched))
-
- for play in plays:
- ansible.callbacks.set_play(self.callbacks, play)
- ansible.callbacks.set_play(self.runner_callbacks, play)
- if not self._run_play(play):
- break
-
- ansible.callbacks.set_play(self.callbacks, None)
- ansible.callbacks.set_play(self.runner_callbacks, None)
-
- # summarize the results
- results = {}
- for host in self.stats.processed.keys():
- results[host] = self.stats.summarize(host)
- return results
-
- # *****************************************************
-
- def _async_poll(self, poller, async_seconds, async_poll_interval):
- ''' launch an async job, if poll_interval is set, wait for completion '''
-
- results = poller.wait(async_seconds, async_poll_interval)
-
- # mark any hosts that are still listed as started as failed
- # since these likely got killed by async_wrapper
- for host in poller.hosts_to_poll:
- reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
- self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
- results['contacted'][host] = reason
-
- return results
-
- # *****************************************************
-
- def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
- ''' returns a list of hosts that haven't failed and aren't dark '''
-
- return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
-
- # *****************************************************
-
- def _run_task_internal(self, task, include_failed=False):
- ''' run a particular module step in a playbook '''
-
- hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
- self.inventory.restrict_to(hosts)
-
- runner = ansible.runner.Runner(
- pattern=task.play.hosts,
- inventory=self.inventory,
- module_name=task.module_name,
- module_args=task.module_args,
- forks=self.forks,
- remote_pass=self.remote_pass,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=task.remote_user,
- remote_port=task.play.remote_port,
- module_vars=task.module_vars,
- play_vars=task.play_vars,
- play_file_vars=task.play_file_vars,
- role_vars=task.role_vars,
- role_params=task.role_params,
- default_vars=task.default_vars,
- extra_vars=self.extra_vars,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- basedir=task.play.basedir,
- conditional=task.when,
- callbacks=self.runner_callbacks,
- transport=task.transport,
- is_playbook=True,
- check=self.check,
- diff=self.diff,
- environment=task.environment,
- complex_args=task.args,
- accelerate=task.play.accelerate,
- accelerate_port=task.play.accelerate_port,
- accelerate_ipv6=task.play.accelerate_ipv6,
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
- vault_pass = self.vault_password,
- run_hosts=hosts,
- no_log=task.no_log,
- run_once=task.run_once,
- become=task.become,
- become_method=task.become_method,
- become_user=task.become_user,
- become_pass=task.become_pass,
- )
-
- runner.module_vars.update({'play_hosts': hosts})
- runner.module_vars.update({'ansible_version': self._ansible_version})
-
- if task.async_seconds == 0:
- results = runner.run()
- else:
- results, poller = runner.run_async(task.async_seconds)
- self.stats.compute(results)
- if task.async_poll_interval > 0:
- # if not polling, playbook requested fire and forget, so don't poll
- results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
- else:
- for (host, res) in results.get('contacted', {}).iteritems():
- self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
-
- contacted = results.get('contacted',{})
- dark = results.get('dark', {})
-
- self.inventory.lift_restriction()
-
- if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
- return None
-
- return results
-
- # *****************************************************
-
- def _run_task(self, play, task, is_handler):
- ''' run a single task in the playbook and recursively run any subtasks. '''
-
- ansible.callbacks.set_task(self.callbacks, task)
- ansible.callbacks.set_task(self.runner_callbacks, task)
-
- if task.role_name:
- name = '%s | %s' % (task.role_name, task.name)
- else:
- name = task.name
-
- try:
- # v1 HACK: we don't have enough information to template many names
- # at this point. Rather than making this work for all cases in
- # v1, just make this degrade gracefully. Will fix in v2
- name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
- except:
- pass
-
- self.callbacks.on_task_start(name, is_handler)
- if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return True
-
- # template ignore_errors
- # TODO: Is this needed here? cond is templated again in
- # check_conditional after some more manipulations.
- # TODO: we don't have enough information here to template cond either
- # (see note on templating name above)
- cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
- task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
-
- # load up an appropriate ansible runner to run the task in parallel
- include_failed = is_handler and play.force_handlers
- results = self._run_task_internal(task, include_failed=include_failed)
-
- # if no hosts are matched, carry on
- hosts_remaining = True
- if results is None:
- hosts_remaining = False
- results = {}
-
- contacted = results.get('contacted', {})
- self.stats.compute(results, ignore_errors=task.ignore_errors)
-
- def _register_play_vars(host, result):
- # when 'register' is used, persist the result in the vars cache
- # rather than the setup cache - vars should be transient between
- # playbook executions
- if 'stdout' in result and 'stdout_lines' not in result:
- result['stdout_lines'] = result['stdout'].splitlines()
- utils.update_hash(self.VARS_CACHE, host, {task.register: result})
-
- def _save_play_facts(host, facts):
- # saves play facts in SETUP_CACHE, unless the module executed was
- # set_fact, in which case we add them to the VARS_CACHE
- if task.module_name in ('set_fact', 'include_vars'):
- utils.update_hash(self.VARS_CACHE, host, facts)
- else:
- utils.update_hash(self.SETUP_CACHE, host, facts)
-
- # add facts to the global setup cache
- for host, result in contacted.iteritems():
- if 'results' in result:
- # task ran with_ lookup plugin, so facts are encapsulated in
- # multiple list items in the results key
- for res in result['results']:
- if type(res) == dict:
- facts = res.get('ansible_facts', {})
- _save_play_facts(host, facts)
- else:
- # when facts are returned, persist them in the setup cache
- facts = result.get('ansible_facts', {})
- _save_play_facts(host, facts)
-
- # if requested, save the result into the registered variable name
- if task.register:
- _register_play_vars(host, result)
-
- # also have to register some failed, but ignored, tasks
- if task.ignore_errors and task.register:
- failed = results.get('failed', {})
- for host, result in failed.iteritems():
- _register_play_vars(host, result)
-
- # flag which notify handlers need to be run
- if len(task.notify) > 0:
- for host, results in results.get('contacted',{}).iteritems():
- if results.get('changed', False):
- for handler_name in task.notify:
- self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return hosts_remaining
-
- # *****************************************************
-
- def _flag_handler(self, play, handler_name, host):
- '''
- if a task has any notify elements, flag handlers for run
- at end of execution cycle for hosts that have indicated
- changes have been made
- '''
-
- found = False
- for x in play.handlers():
- if handler_name == template(play.basedir, x.name, x.module_vars):
- found = True
- self.callbacks.on_notify(host, x.name)
- x.notified_by.append(host)
- if not found:
- raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
-
- # *****************************************************
-
- def _do_setup_step(self, play):
- ''' get facts from the remote system '''
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
- host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
- if len(host_list) == 0:
- return {}
- elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
- return {}
-
- self.callbacks.on_setup()
- self.inventory.restrict_to(host_list)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
-
- # push any variables down to the system
- setup_results = ansible.runner.Runner(
- basedir=self.basedir,
- pattern=play.hosts,
- module_name='setup',
- module_args={},
- inventory=self.inventory,
- forks=self.forks,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=play.remote_user,
- remote_pass=self.remote_pass,
- remote_port=play.remote_port,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- callbacks=self.runner_callbacks,
- become=play.become,
- become_method=play.become_method,
- become_user=play.become_user,
- become_pass=self.become_pass,
- vault_pass=self.vault_password,
- transport=play.transport,
- is_playbook=True,
- module_vars=play.vars,
- play_vars=play.vars,
- play_file_vars=play.vars_file_vars,
- role_vars=play.role_vars,
- default_vars=play.default_vars,
- check=self.check,
- diff=self.diff,
- accelerate=play.accelerate,
- accelerate_port=play.accelerate_port,
- ).run()
- self.stats.compute(setup_results, setup=True)
-
- self.inventory.lift_restriction()
-
- # now for each result, load into the setup cache so we can
- # let runner template out future commands
- setup_ok = setup_results.get('contacted', {})
- for (host, result) in setup_ok.iteritems():
- utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
- utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
- return setup_results
-
- # *****************************************************
-
-
- def generate_retry_inventory(self, replay_hosts):
- '''
- called by /usr/bin/ansible when a playbook run fails. It generates an inventory
- that allows re-running on ONLY the failed hosts. This may duplicate some
- variable information in group_vars/host_vars but that is ok, and expected.
- '''
-
- buf = StringIO.StringIO()
- for x in replay_hosts:
- buf.write("%s\n" % x)
- basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
- filename = "%s.retry" % os.path.basename(self.filename)
- filename = filename.replace(".yml","")
- filename = os.path.join(basedir, filename)
-
- try:
- if not os.path.exists(basedir):
- os.makedirs(basedir)
-
- fd = open(filename, 'w')
- fd.write(buf.getvalue())
- fd.close()
- except:
- ansible.callbacks.display(
- "\nERROR: could not create retry file. Check the value of \n"
- + "the configuration variable 'retry_files_save_path' or set \n"
- + "'retry_files_enabled' to False to avoid this message.\n",
- color='red'
- )
- return None
-
- return filename
-
- # *****************************************************
- def tasks_to_run_in_play(self, play):
-
- tasks = []
-
- for task in play.tasks():
- # only run the task if the requested tags match or has 'always' tag
- u = set(['untagged'])
- task_set = set(task.tags)
-
- if 'always' in task.tags:
- should_run = True
- else:
- if 'all' in self.only_tags:
- should_run = True
- else:
- should_run = False
- if 'tagged' in self.only_tags:
- if task_set != u:
- should_run = True
- elif 'untagged' in self.only_tags:
- if task_set == u:
- should_run = True
- else:
- if task_set.intersection(self.only_tags):
- should_run = True
-
- # Check for tags that we need to skip
- if 'all' in self.skip_tags:
- should_run = False
- else:
- if 'tagged' in self.skip_tags:
- if task_set != u:
- should_run = False
- elif 'untagged' in self.skip_tags:
- if task_set == u:
- should_run = False
- else:
- if should_run:
- if task_set.intersection(self.skip_tags):
- should_run = False
+import os
- if should_run:
- tasks.append(task)
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.parsing import DataLoader
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.play import Play
+from ansible.playbook.playbook_include import PlaybookInclude
+from ansible.plugins import get_all_plugin_loaders
- return tasks
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
- # *****************************************************
- def _run_play(self, play):
- ''' run a list of tasks for a given pattern, in order '''
- self.callbacks.on_play_start(play.name)
- # Get the hosts for this play
- play._play_hosts = self.inventory.list_hosts(play.hosts)
- # if no hosts matches this play, drop out
- if not play._play_hosts:
- self.callbacks.on_no_hosts_matched()
- return True
+__all__ = ['Playbook']
- # get facts from system
- self._do_setup_step(play)
- # now with that data, handle contentional variable file imports!
- all_hosts = self._trim_unavailable_hosts(play._play_hosts)
- play.update_vars_files(all_hosts, vault_password=self.vault_password)
- hosts_count = len(all_hosts)
+class Playbook:
- if play.serial.endswith("%"):
+ def __init__(self, loader):
+ # Entries in the datastructure of a playbook may
+ # be either a play or an include statement
+ self._entries = []
+ self._basedir = os.getcwd()
+ self._loader = loader
- # This is a percentage, so calculate it based on the
- # number of hosts
- serial_pct = int(play.serial.replace("%",""))
- serial = int((serial_pct/100.0) * len(all_hosts))
+ @staticmethod
+ def load(file_name, variable_manager=None, loader=None):
+ pb = Playbook(loader=loader)
+ pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
+ return pb
- # Ensure that no matter how small the percentage, serial
- # can never fall below 1, so that things actually happen
- serial = max(serial, 1)
- else:
- serial = int(play.serial)
+ def _load_playbook_data(self, file_name, variable_manager):
- serialized_batch = []
- if serial <= 0:
- serialized_batch = [all_hosts]
+ if os.path.isabs(file_name):
+ self._basedir = os.path.dirname(file_name)
else:
- # do N forks all the way through before moving to next
- while len(all_hosts) > 0:
- play_hosts = []
- for x in range(serial):
- if len(all_hosts) > 0:
- play_hosts.append(all_hosts.pop(0))
- serialized_batch.append(play_hosts)
-
- task_errors = False
- for on_hosts in serialized_batch:
-
- # restrict the play to just the hosts we have in our on_hosts block that are
- # available.
- play._play_hosts = self._trim_unavailable_hosts(on_hosts)
- self.inventory.also_restrict_to(on_hosts)
-
- for task in self.tasks_to_run_in_play(play):
-
- if task.meta is not None:
- # meta tasks can force handlers to run mid-play
- if task.meta == 'flush_handlers':
- self.run_handlers(play)
-
- # skip calling the handler till the play is finished
- continue
-
- if not self._run_task(play, task, False):
- # whether no hosts matched is fatal or not depends if it was on the initial step.
- # if we got exactly no hosts on the first step (setup!) then the host group
- # just didn't match anything and that's ok
- return False
-
- # Get a new list of what hosts are left as available, the ones that
- # did not go fail/dark during the task
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- # Set max_fail_pct to 0, So if any hosts fails, bail out
- if task.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
-
- # If threshold for max nodes failed is exceeded, bail out.
- if play.serial > 0:
- # if serial is set, we need to shorten the size of host_count
- play_count = len(play._play_hosts)
- if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
- host_list = None
+ self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
+
+ # set the loaders basedir
+ self._loader.set_basedir(self._basedir)
+
+ # dynamically load any plugins from the role directory
+ for name, obj in get_all_plugin_loaders():
+ if obj.subdir:
+ plugin_path = os.path.join(self._basedir, obj.subdir)
+ if os.path.isdir(plugin_path):
+ obj.add_directory(plugin_path)
+
+ ds = self._loader.load_from_file(os.path.basename(file_name))
+ if not isinstance(ds, list):
+ raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
+
+ # Parse the playbook entries. For plays, we simply parse them
+ # using the Play() object, and includes are parsed using the
+ # PlaybookInclude() object
+ for entry in ds:
+ if not isinstance(entry, dict):
+ raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
+
+ if 'include' in entry:
+ pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
+ if pb is not None:
+ self._entries.extend(pb._entries)
else:
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
-
- # if no hosts remain, drop out
- if not host_list:
- if play.force_handlers:
- task_errors = True
- break
- else:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- # lift restrictions after each play finishes
- self.inventory.lift_also_restriction()
-
- if task_errors and not play.force_handlers:
- # if there were failed tasks and handler execution
- # is not forced, quit the play with an error
- return False
+ display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan')
else:
- # no errors, go ahead and execute all handlers
- if not self.run_handlers(play):
- return False
-
- return True
-
-
- def run_handlers(self, play):
- on_hosts = play._play_hosts
- hosts_count = len(on_hosts)
- for task in play.tasks():
- if task.meta is not None:
-
- fired_names = {}
- for handler in play.handlers():
- if len(handler.notified_by) > 0:
- self.inventory.restrict_to(handler.notified_by)
-
- # Resolve the variables first
- handler_name = template(play.basedir, handler.name, handler.module_vars)
- if handler_name not in fired_names:
- self._run_task(play, handler, True)
- # prevent duplicate handler includes from running more than once
- fired_names[handler_name] = 1
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
- if handler.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
- if not host_list and not play.force_handlers:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- self.inventory.lift_restriction()
- new_list = handler.notified_by[:]
- for host in handler.notified_by:
- if host in on_hosts:
- while host in new_list:
- new_list.remove(host)
- handler.notified_by = new_list
+ entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
+ self._entries.append(entry_obj)
- continue
+ def get_loader(self):
+ return self._loader
- return True
+ def get_plays(self):
+ return self._entries[:]
diff --git a/v2/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
similarity index 95%
rename from v2/ansible/playbook/attribute.py
rename to lib/ansible/playbook/attribute.py
index 8a727a01930..b2e89c7733e 100644
--- a/v2/ansible/playbook/attribute.py
+++ b/lib/ansible/playbook/attribute.py
@@ -21,12 +21,13 @@ __metaclass__ = type
class Attribute:
- def __init__(self, isa=None, private=False, default=None, required=False):
+ def __init__(self, isa=None, private=False, default=None, required=False, listof=None):
self.isa = isa
self.private = private
self.default = default
self.required = required
+ self.listof = listof
class FieldAttribute(Attribute):
pass
diff --git a/v2/ansible/playbook/base.py b/lib/ansible/playbook/base.py
similarity index 83%
rename from v2/ansible/playbook/base.py
rename to lib/ansible/playbook/base.py
index ecd217c1e8f..48f62b57df3 100644
--- a/v2/ansible/playbook/base.py
+++ b/lib/ansible/playbook/base.py
@@ -19,6 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import itertools
import uuid
from functools import partial
@@ -46,9 +47,8 @@ class Base:
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string')
- # vars and flags
- _vars = FieldAttribute(isa='dict', default=dict())
- _environment = FieldAttribute(isa='dict', default=dict())
+ # flags and misc. settings
+ _environment = FieldAttribute(isa='list', default=[])
_no_log = FieldAttribute(isa='bool', default=False)
def __init__(self):
@@ -153,8 +153,11 @@ class Base:
else:
self._loader = DataLoader()
- if isinstance(ds, string_types) or isinstance(ds, FileIO):
- ds = self._loader.load(ds)
+ # FIXME: is this required anymore? This doesn't seem to do anything
+ # helpful, and was added in very early stages of the base class
+ # development.
+ #if isinstance(ds, string_types) or isinstance(ds, FileIO):
+ # ds = self._loader.load(ds)
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
@@ -232,6 +235,10 @@ class Base:
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
+ # if the ds value was set on the object, copy it to the new copy too
+ if hasattr(self, '_ds'):
+ new_me._ds = self._ds
+
return new_me
def post_validate(self, templar):
@@ -245,6 +252,9 @@ class Base:
if self._loader is not None:
basedir = self._loader.get_basedir()
+ # save the omit value for later checking
+ omit_value = templar._available_variables.get('omit')
+
for (name, attribute) in iteritems(self._get_base_attributes()):
if getattr(self, name) is None:
@@ -254,15 +264,23 @@ class Base:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
try:
- # if the attribute contains a variable, template it now
- value = templar.template(getattr(self, name))
-
- # run the post-validator if present
+ # Run the post-validator if present. These methods are responsible for
+ # using the given templar to template the values, if required.
method = getattr(self, '_post_validate_%s' % name, None)
if method:
- value = method(attribute, value, all_vars, templar._fail_on_undefined_errors)
+ value = method(attribute, getattr(self, name), templar)
else:
- # otherwise, just make sure the attribute is of the type it should be
+ # if the attribute contains a variable, template it now
+ value = templar.template(getattr(self, name))
+
+ # if this evaluated to the omit value, set the value back to
+ # the default specified in the FieldAttribute and move on
+ if omit_value is not None and value == omit_value:
+ value = attribute.default
+ continue
+
+ # and make sure the attribute is of the type it should be
+ if value is not None:
if attribute.isa == 'string':
value = unicode(value)
elif attribute.isa == 'int':
@@ -272,8 +290,17 @@ class Base:
elif attribute.isa == 'list':
if not isinstance(value, list):
value = [ value ]
+ if attribute.listof is not None:
+ for item in value:
+ if not isinstance(item, attribute.listof):
+ raise AnsibleParserError("the field '%s' should be a list of %s, but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
+ elif attribute.isa == 'set':
+ if not isinstance(value, (list, set)):
+ value = [ value ]
+ if not isinstance(value, set):
+ value = set(value)
elif attribute.isa == 'dict' and not isinstance(value, dict):
- raise TypeError()
+ raise TypeError("%s is not a dictionary" % value)
# and assign the massaged value back to the attribute field
setattr(self, name, value)
@@ -281,7 +308,7 @@ class Base:
except (TypeError, ValueError) as e:
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
except UndefinedError as e:
- if templar._fail_on_undefined_errors:
+ if templar._fail_on_undefined_errors and name != 'name':
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
def serialize(self):
@@ -334,7 +361,8 @@ class Base:
if not isinstance(new_value, list):
new_value = [ new_value ]
- return list(set(value + new_value))
+ #return list(set(value + new_value))
+ return [i for i,_ in itertools.groupby(value + new_value)]
def __getstate__(self):
return self.serialize()
diff --git a/v2/ansible/playbook/become.py b/lib/ansible/playbook/become.py
similarity index 88%
rename from v2/ansible/playbook/become.py
rename to lib/ansible/playbook/become.py
index daa8c80ba94..f01b48512fa 100644
--- a/v2/ansible/playbook/become.py
+++ b/lib/ansible/playbook/become.py
@@ -27,10 +27,9 @@ from ansible.playbook.attribute import Attribute, FieldAttribute
class Become:
# Privlege escalation
- _become = FieldAttribute(isa='bool', default=False)
+ _become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
- _become_pass = FieldAttribute(isa='string')
def __init__(self):
return super(Become, self).__init__()
@@ -61,10 +60,6 @@ class Become:
self._detect_privilege_escalation_conflict(ds)
- # Setting user implies setting become/sudo/su to true
- if 'become_user' in ds and not ds.get('become', False):
- ds['become'] = True
-
# Privilege escalation, backwards compatibility for sudo/su
if 'sudo' in ds or 'sudo_user' in ds:
ds['become_method'] = 'sudo'
@@ -128,14 +123,3 @@ class Become:
return self._get_parent_attribute('become_user')
else:
return self._attributes['become_user']
-
- def _get_attr_become_password(self):
- '''
- Override for the 'become_password' getattr fetcher, used from Base.
- '''
- if hasattr(self, '_get_parent_attribute'):
- return self._get_parent_attribute('become_password')
- else:
- return self._attributes['become_password']
-
-
diff --git a/v2/ansible/playbook/block.py b/lib/ansible/playbook/block.py
similarity index 85%
rename from v2/ansible/playbook/block.py
rename to lib/ansible/playbook/block.py
index e6ad8e5745f..7bba9f325c3 100644
--- a/v2/ansible/playbook/block.py
+++ b/lib/ansible/playbook/block.py
@@ -56,17 +56,16 @@ class Block(Base, Become, Conditional, Taggable):
all_vars = dict()
if self._role:
- all_vars.update(self._role.get_vars())
+ all_vars.update(self._role.get_vars(self._dep_chain))
if self._parent_block:
all_vars.update(self._parent_block.get_vars())
if self._task_include:
all_vars.update(self._task_include.get_vars())
- all_vars.update(self.vars)
return all_vars
@staticmethod
- def load(data, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
@@ -225,21 +224,21 @@ class Block(Base, Become, Conditional, Taggable):
ti.deserialize(ti_data)
self._task_include = ti
- def evaluate_conditional(self, all_vars):
+ def evaluate_conditional(self, templar, all_vars):
if len(self._dep_chain):
for dep in self._dep_chain:
- if not dep.evaluate_conditional(all_vars):
+ if not dep.evaluate_conditional(templar, all_vars):
return False
if self._task_include is not None:
- if not self._task_include.evaluate_conditional(all_vars):
+ if not self._task_include.evaluate_conditional(templar, all_vars):
return False
if self._parent_block is not None:
- if not self._parent_block.evaluate_conditional(all_vars):
+ if not self._parent_block.evaluate_conditional(templar, all_vars):
return False
elif self._role is not None:
- if not self._role.evaluate_conditional(all_vars):
+ if not self._role.evaluate_conditional(templar, all_vars):
return False
- return super(Block, self).evaluate_conditional(all_vars)
+ return super(Block, self).evaluate_conditional(templar, all_vars)
def set_loader(self, loader):
self._loader = loader
@@ -260,33 +259,39 @@ class Block(Base, Become, Conditional, Taggable):
'''
value = self._attributes[attr]
- if self._parent_block and (not value or extend):
+ if self._parent_block and (value is None or extend):
parent_value = getattr(self._parent_block, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
- if self._task_include and (not value or extend):
+ if self._task_include and (value is None or extend):
parent_value = getattr(self._task_include, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
- if self._role and (not value or extend):
+ if self._role and (value is None or extend):
parent_value = getattr(self._role, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
+ else:
+ value = parent_value
+
if len(self._dep_chain) and (not value or extend):
reverse_dep_chain = self._dep_chain[:]
reverse_dep_chain.reverse()
for dep in reverse_dep_chain:
dep_value = getattr(dep, attr)
if extend:
- value = self._extend_value(value, parent_value)
+ value = self._extend_value(value, dep_value)
else:
- value = parent_value
+ value = dep_value
- if value and not extend:
+ if value is not None and not extend:
break
- if self._play and (not value or extend):
+
+ if self._play and (value is None or extend):
parent_value = getattr(self._play, attr)
if extend:
value = self._extend_value(value, parent_value)
@@ -295,16 +300,28 @@ class Block(Base, Become, Conditional, Taggable):
return value
- def filter_tagged_tasks(self, connection_info, all_vars):
+ def _get_attr_environment(self):
+ '''
+ Override for the 'tags' getattr fetcher, used from Base.
+ '''
+ environment = self._attributes['tags']
+ if environment is None:
+ environment = dict()
+
+ environment = self._get_parent_attribute('environment', extend=True)
+
+ return environment
+
+ def filter_tagged_tasks(self, play_context, all_vars):
'''
Creates a new block, with task lists filtered based on the tags contained
- within the connection_info object.
+ within the play_context object.
'''
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
- if task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, all_vars=all_vars):
+ if task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
diff --git a/v2/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
similarity index 78%
rename from v2/ansible/playbook/conditional.py
rename to lib/ansible/playbook/conditional.py
index 2233f3fa9ea..ae7a5f0ba4c 100644
--- a/v2/ansible/playbook/conditional.py
+++ b/lib/ansible/playbook/conditional.py
@@ -19,6 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from jinja2.exceptions import UndefinedError
+
from ansible.errors import *
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
@@ -47,16 +49,28 @@ class Conditional:
if not isinstance(value, list):
setattr(self, name, [ value ])
- def evaluate_conditional(self, all_vars):
+ def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
- templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
- for conditional in self.when:
- if not self._check_conditional(conditional, templar, all_vars):
- return False
+ # since this is a mixin, it may not have an underlying datastructure
+ # associated with it, so we pull it out now in case we need it for
+ # error reporting below
+ ds = None
+ if hasattr(self, 'get_ds'):
+ ds = self.get_ds()
+
+ try:
+ for conditional in self.when:
+ if not self._check_conditional(conditional, templar, all_vars):
+ return False
+ except UndefinedError, e:
+ raise AnsibleError("The conditional check '%s' failed due to an undefined variable. The error was: %s" % (conditional, e), obj=ds)
+ except Exception, e:
+ raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (conditional, e), obj=ds)
+
return True
def _check_conditional(self, conditional, templar, all_vars):
@@ -73,6 +87,9 @@ class Conditional:
if conditional in all_vars and '-' not in unicode(all_vars[conditional]):
conditional = all_vars[conditional]
+ # make sure the templar is using the variables specifed to this method
+ templar.set_available_variables(variables=all_vars)
+
conditional = templar.template(conditional)
if not isinstance(conditional, basestring) or conditional == "":
return conditional
diff --git a/v2/ansible/playbook/handler.py b/lib/ansible/playbook/handler.py
similarity index 100%
rename from v2/ansible/playbook/handler.py
rename to lib/ansible/playbook/handler.py
diff --git a/v2/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
similarity index 81%
rename from v2/ansible/playbook/helpers.py
rename to lib/ansible/playbook/helpers.py
index 302e14a6e09..ca9326b8141 100644
--- a/v2/ansible/playbook/helpers.py
+++ b/lib/ansible/playbook/helpers.py
@@ -36,7 +36,8 @@ def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=Non
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
- assert ds is None or isinstance(ds, list), 'block has bad type: %s' % type(ds)
+ if not isinstance(ds, (list, type(None))):
+ raise AnsibleParserError('block has bad type: "%s". Expecting "list"' % type(ds).__name__, obj=ds)
block_list = []
if ds:
@@ -67,12 +68,13 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
- assert isinstance(ds, list), 'task has bad type: %s' % type(ds)
+ if not isinstance(ds, list):
+ raise AnsibleParserError('task has bad type: "%s". Expected "list"' % type(ds).__name__, obj=ds)
task_list = []
for task in ds:
if not isinstance(task, dict):
- raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds)
+ raise AnsibleParserError('task/handler has bad type: "%s". Expected "dict"' % type(task).__name__, obj=task)
if 'block' in task:
t = Block.load(
@@ -96,7 +98,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
return task_list
-def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader=None):
+def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions
@@ -105,11 +107,12 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader
# we import here to prevent a circular dependency with imports
from ansible.playbook.role.include import RoleInclude
- assert isinstance(ds, list), 'roles has bad type: %s' % type(ds)
+ if not isinstance(ds, list):
+ raise AnsibleParserError('roles has bad type: "%s". Expectes "list"' % type(ds).__name__, obj=ds)
roles = []
for role_def in ds:
- i = RoleInclude.load(role_def, current_role_path=current_role_path, variable_manager=variable_manager, loader=loader)
+ i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager, loader=loader)
roles.append(i)
return roles
diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py
new file mode 100644
index 00000000000..75df9f6c250
--- /dev/null
+++ b/lib/ansible/playbook/included_file.py
@@ -0,0 +1,103 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.template import Templar
+
+class IncludedFile:
+
+ def __init__(self, filename, args, task):
+ self._filename = filename
+ self._args = args
+ self._task = task
+ self._hosts = []
+
+ def add_host(self, host):
+ if host not in self._hosts:
+ self._hosts.append(host)
+
+ def __eq__(self, other):
+ return other._filename == self._filename and other._args == self._args
+
+ def __repr__(self):
+ return "%s (%s): %s" % (self._filename, self._args, self._hosts)
+
+ @staticmethod
+ def process_include_results(results, tqm, iterator, loader, variable_manager):
+ included_files = []
+
+ for res in results:
+ if res._host in tqm._failed_hosts:
+ raise AnsibleError("host is failed, not including files")
+
+ if res._task.action == 'include':
+ if res._task.loop:
+ include_results = res._result['results']
+ else:
+ include_results = [ res._result ]
+
+ for include_result in include_results:
+ # if the task result was skipped or failed, continue
+ if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
+ continue
+
+ original_task = iterator.get_original_task(res._host, res._task)
+ if original_task:
+ if original_task._role:
+ include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
+ elif original_task._task_include:
+ # handle relative includes by walking up the list of parent include
+ # tasks and checking the relative result to see if it exists
+ parent_include = original_task._task_include
+ while parent_include is not None:
+ parent_include_dir = os.path.dirname(parent_include.args.get('_raw_params'))
+ include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_result['include'])
+ if os.path.exists(include_file):
+ break
+ else:
+ parent_include = parent_include._task_include
+ else:
+ include_file = loader.path_dwim(res._task.args.get('_raw_params'))
+ else:
+ include_file = loader.path_dwim(res._task.args.get('_raw_params'))
+
+ task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task)
+ #task_vars = tqm.add_tqm_variables(task_vars, play=iterator._play)
+ templar = Templar(loader=loader, variables=task_vars)
+
+ include_variables = include_result.get('include_variables', dict())
+ if 'item' in include_result:
+ include_variables['item'] = include_result['item']
+ task_vars['item'] = include_result['item']
+
+ include_file = templar.template(include_file)
+ inc_file = IncludedFile(include_file, include_variables, original_task)
+
+ try:
+ pos = included_files.index(inc_file)
+ inc_file = included_files[pos]
+ except ValueError:
+ included_files.append(inc_file)
+
+ inc_file.add_host(res._host)
+
+ return included_files
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 6ee85e0bf48..b1f5440d743 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -15,935 +15,340 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-#############################################
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
+from six import string_types
+
+from ansible.errors import AnsibleError, AnsibleParserError
+
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.block import Block
+from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
from ansible.playbook.task import Task
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.constants as C
-import pipes
-import shlex
-import os
-import sys
-import uuid
-
-
-class Play(object):
-
- _pb_common = [
- 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
- 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
- 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
- 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
- 'vault_password',
- ]
-
- __slots__ = _pb_common + [
- '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
- 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
- 'role_vars', 'transport', 'vars_file_vars',
- ]
-
- # to catch typos and so forth -- these are userland names
- # and don't line up 1:1 with how they are stored
- VALID_KEYS = frozenset(_pb_common + [
- 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
- 'pre_tasks', 'role_names', 'tasks', 'user',
- ])
-
- # *************************************************
-
- def __init__(self, playbook, ds, basedir, vault_password=None):
- ''' constructor loads from a play datastructure '''
-
- for x in ds.keys():
- if not x in Play.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
-
- # allow all playbook keys to be set by --extra-vars
- self.vars = ds.get('vars', {})
- self.vars_prompt = ds.get('vars_prompt', {})
- self.playbook = playbook
- self.vars = self._get_vars()
- self.vars_file_vars = dict() # these are vars read in from vars_files:
- self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
- self.basedir = basedir
- self.roles = ds.get('roles', None)
- self.tags = ds.get('tags', None)
- self.vault_password = vault_password
- self.environment = ds.get('environment', {})
-
- if self.tags is None:
- self.tags = []
- elif type(self.tags) in [ str, unicode ]:
- self.tags = self.tags.split(",")
- elif type(self.tags) != list:
- self.tags = []
-
- # make sure we have some special internal variables set, which
- # we use later when loading tasks and handlers
- load_vars = dict()
- load_vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.playbook.inventory.basedir() is not None:
- load_vars['inventory_dir'] = self.playbook.inventory.basedir()
- if self.playbook.inventory.src() is not None:
- load_vars['inventory_file'] = self.playbook.inventory.src()
-
- # We first load the vars files from the datastructure
- # so we have the default variables to pass into the roles
- self.vars_files = ds.get('vars_files', [])
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
- processed_vars_files = self._update_vars_files_for_host(None)
-
- # now we load the roles into the datastructure
- self.included_roles = []
- ds = self._load_roles(self.roles, ds)
-
- # and finally re-process the vars files as they may have been updated
- # by the included roles, but exclude any which have been processed
- self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
-
- self._update_vars_files_for_host(None)
-
- # template everything to be efficient, but do not pre-mature template
- # tasks/handlers as they may have inventory scope overrides. We also
- # create a set of temporary variables for templating, so we don't
- # trample on the existing vars structures
- _tasks = ds.pop('tasks', [])
- _handlers = ds.pop('handlers', [])
-
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
- try:
- ds = template(basedir, ds, temp_vars)
- except errors.AnsibleError, e:
- utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
-
- ds['tasks'] = _tasks
- ds['handlers'] = _handlers
-
- self._ds = ds
-
- hosts = ds.get('hosts')
- if hosts is None:
- raise errors.AnsibleError('hosts declaration is required')
- elif isinstance(hosts, list):
- try:
- hosts = ';'.join(hosts)
- except TypeError,e:
- raise errors.AnsibleError('improper host declaration: %s' % str(e))
-
- self.serial = str(ds.get('serial', 0))
- self.hosts = hosts
- self.name = ds.get('name', self.hosts)
- self._tasks = ds.get('tasks', [])
- self._handlers = ds.get('handlers', [])
- self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
- self.remote_port = ds.get('port', self.playbook.remote_port)
- self.transport = ds.get('connection', self.playbook.transport)
- self.remote_port = self.remote_port
- self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
- self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
- self.accelerate_port = ds.get('accelerate_port', None)
- self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
- self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
- self.no_log = utils.boolean(ds.get('no_log', 'false'))
- self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
-
- # Fail out if user specifies conflicting privilege escalations
- if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
- if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
- if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
-
- # become settings are inherited and updated normally
- self.become = ds.get('become', self.playbook.become)
- self.become_method = ds.get('become_method', self.playbook.become_method)
- self.become_user = ds.get('become_user', self.playbook.become_user)
-
- # Make sure current play settings are reflected in become fields
- if 'sudo' in ds:
- self.become=ds['sudo']
- self.become_method='sudo'
- if 'sudo_user' in ds:
- self.become_user=ds['sudo_user']
- elif 'su' in ds:
- self.become=True
- self.become=ds['su']
- self.become_method='su'
- if 'su_user' in ds:
- self.become_user=ds['su_user']
-
- # gather_facts is not a simple boolean, as None means that a 'smart'
- # fact gathering mode will be used, so we need to be careful here as
- # calling utils.boolean(None) returns False
- self.gather_facts = ds.get('gather_facts', None)
- if self.gather_facts is not None:
- self.gather_facts = utils.boolean(self.gather_facts)
-
- load_vars['role_names'] = ds.get('role_names', [])
-
- self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
- self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
-
- # apply any missing tags to role tasks
- self._late_merge_role_tags()
-
- # place holder for the discovered hosts to be used in this play
- self._play_hosts = None
-
- # *************************************************
-
- def _get_role_path(self, role):
- """
- Returns the path on disk to the directory containing
- the role directories like tasks, templates, etc. Also
- returns any variables that were included with the role
- """
- orig_path = template(self.basedir,role,self.vars)
-
- role_vars = {}
- if type(orig_path) == dict:
- # what, not a path?
- role_name = orig_path.get('role', None)
- if role_name is None:
- raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
- role_vars = orig_path
- else:
- role_name = utils.role_spec_parse(orig_path)["name"]
-
- role_path = None
-
- possible_paths = [
- utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
- utils.path_dwim(self.basedir, role_name)
- ]
-
- if C.DEFAULT_ROLES_PATH:
- search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
- for loc in search_locations:
- loc = os.path.expanduser(loc)
- possible_paths.append(utils.path_dwim(loc, role_name))
-
- for path_option in possible_paths:
- if os.path.isdir(path_option):
- role_path = path_option
- break
-
- if role_path is None:
- raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
-
- return (role_path, role_vars)
-
- def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
- # this number is arbitrary, but it seems sane
- if level > 20:
- raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
- for role in roles:
- role_path,role_vars = self._get_role_path(role)
-
- # save just the role params for this role, which exclude the special
- # keywords 'role', 'tags', and 'when'.
- role_params = role_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in role_params:
- del role_params[item]
-
- role_vars = utils.combine_vars(passed_vars, role_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- if not isinstance(vars_data, dict):
- raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
- role_vars = utils.combine_vars(vars_data, role_vars)
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
- defaults_data = {}
- if os.path.isfile(defaults):
- defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
-
- # the meta directory contains the yaml that should
- # hold the list of dependencies (if any)
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
- if os.path.isfile(meta):
- data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if data:
- dependencies = data.get('dependencies',[])
- if dependencies is None:
- dependencies = []
- for dep in dependencies:
- allow_dupes = False
- (dep_path,dep_vars) = self._get_role_path(dep)
-
- # save the dep params, just as we did above
- dep_params = dep_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in dep_params:
- del dep_params[item]
-
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
- if os.path.isfile(meta):
- meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if meta_data:
- allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
-
- # if any tags were specified as role/dep variables, merge
- # them into the current dep_vars so they're passed on to any
- # further dependencies too, and so we only have one place
- # (dep_vars) to look for tags going forward
- def __merge_tags(var_obj):
- old_tags = dep_vars.get('tags', [])
- if isinstance(old_tags, basestring):
- old_tags = [old_tags, ]
- if isinstance(var_obj, dict):
- new_tags = var_obj.get('tags', [])
- if isinstance(new_tags, basestring):
- new_tags = [new_tags, ]
- else:
- new_tags = []
- return list(set(old_tags).union(set(new_tags)))
-
- dep_vars['tags'] = __merge_tags(role_vars)
- dep_vars['tags'] = __merge_tags(passed_vars)
-
- # if tags are set from this role, merge them
- # into the tags list for the dependent role
- if "tags" in passed_vars:
- for included_role_dep in dep_stack:
- included_dep_name = included_role_dep[0]
- included_dep_vars = included_role_dep[2]
- if included_dep_name == dep:
- if "tags" in included_dep_vars:
- included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
- else:
- included_dep_vars["tags"] = passed_vars["tags"][:]
-
- dep_vars = utils.combine_vars(passed_vars, dep_vars)
- dep_vars = utils.combine_vars(role_vars, dep_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- dep_vars = utils.combine_vars(dep_vars, vars_data)
- pass
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
- dep_defaults_data = {}
- if os.path.isfile(defaults):
- dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
- if 'role' in dep_vars:
- del dep_vars['role']
-
- if not allow_dupes:
- if dep in self.included_roles:
- # skip back to the top, since we don't want to
- # do anything else with this role
- continue
- else:
- self.included_roles.append(dep)
-
- def _merge_conditional(cur_conditionals, new_conditionals):
- if isinstance(new_conditionals, (basestring, bool)):
- cur_conditionals.append(new_conditionals)
- elif isinstance(new_conditionals, list):
- cur_conditionals.extend(new_conditionals)
-
- # pass along conditionals from roles to dep roles
- passed_when = passed_vars.get('when')
- role_when = role_vars.get('when')
- dep_when = dep_vars.get('when')
-
- tmpcond = []
- _merge_conditional(tmpcond, passed_when)
- _merge_conditional(tmpcond, role_when)
- _merge_conditional(tmpcond, dep_when)
-
- if len(tmpcond) > 0:
- dep_vars['when'] = tmpcond
-
- self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
- dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
-
- # only add the current role when we're at the top level,
- # otherwise we'll end up in a recursive loop
- if level == 0:
- self.included_roles.append(role)
- dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
- return dep_stack
-
- def _load_role_vars_files(self, vars_files):
- # process variables stored in vars/main.yml files
- role_vars = {}
- for filename in vars_files:
- if os.path.exists(filename):
- new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_vars:
- if type(new_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
- role_vars = utils.combine_vars(role_vars, new_vars)
-
- return role_vars
-
- def _load_role_defaults(self, defaults_files):
- # process default variables
- default_vars = {}
- for filename in defaults_files:
- if os.path.exists(filename):
- new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_default_vars:
- if type(new_default_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
- default_vars = utils.combine_vars(default_vars, new_default_vars)
-
- return default_vars
-
- def _load_roles(self, roles, ds):
- # a role is a name that auto-includes the following if they exist
- # /tasks/main.yml
- # /handlers/main.yml
- # /vars/main.yml
- # /library
- # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
-
- if roles is None:
- roles = []
- if type(roles) != list:
- raise errors.AnsibleError("value of 'roles:' must be a list")
-
- new_tasks = []
- new_handlers = []
- role_vars_files = []
- defaults_files = []
-
- pre_tasks = ds.get('pre_tasks', None)
- if type(pre_tasks) != list:
- pre_tasks = []
- for x in pre_tasks:
- new_tasks.append(x)
-
- # flush handlers after pre_tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- roles = self._build_role_dependencies(roles, [], {})
-
- # give each role an uuid and
- # make role_path available as variable to the task
- for idx, val in enumerate(roles):
- this_uuid = str(uuid.uuid4())
- roles[idx][-3]['role_uuid'] = this_uuid
- roles[idx][-3]['role_path'] = roles[idx][1]
-
- role_names = []
-
- for (role, role_path, role_vars, role_params, default_vars) in roles:
- # special vars must be extracted from the dict to the included tasks
- special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
- special_vars = {}
- for k in special_keys:
- if k in role_vars:
- special_vars[k] = role_vars[k]
-
- task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
- handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
- vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
- meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
- defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
-
- task = self._resolve_main(task_basepath)
- handler = self._resolve_main(handler_basepath)
- vars_file = self._resolve_main(vars_basepath)
- meta_file = self._resolve_main(meta_basepath)
- defaults_file = self._resolve_main(defaults_basepath)
-
- library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
-
- missing = lambda f: not os.path.isfile(f)
- if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
- raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
-
- if isinstance(role, dict):
- role_name = role['role']
- else:
- role_name = utils.role_spec_parse(role)["name"]
-
- role_names.append(role_name)
- if os.path.isfile(task):
- nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_tasks.append(nt)
- if os.path.isfile(handler):
- nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_handlers.append(nt)
- if os.path.isfile(vars_file):
- role_vars_files.append(vars_file)
- if os.path.isfile(defaults_file):
- defaults_files.append(defaults_file)
- if os.path.isdir(library):
- utils.plugins.module_finder.add_directory(library)
-
- tasks = ds.get('tasks', None)
- post_tasks = ds.get('post_tasks', None)
- handlers = ds.get('handlers', None)
- vars_files = ds.get('vars_files', None)
-
- if type(tasks) != list:
- tasks = []
- if type(handlers) != list:
- handlers = []
- if type(vars_files) != list:
- vars_files = []
- if type(post_tasks) != list:
- post_tasks = []
-
- new_tasks.extend(tasks)
- # flush handlers after tasks + role tasks
- new_tasks.append(dict(meta='flush_handlers'))
- new_tasks.extend(post_tasks)
- # flush handlers after post tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- new_handlers.extend(handlers)
-
- ds['tasks'] = new_tasks
- ds['handlers'] = new_handlers
- ds['role_names'] = role_names
-
- self.role_vars = self._load_role_vars_files(role_vars_files)
- self.default_vars = self._load_role_defaults(defaults_files)
+from ansible.utils.vars import combine_vars
- return ds
- # *************************************************
-
- def _resolve_main(self, basepath):
- ''' flexibly handle variations in main filenames '''
- # these filenames are acceptable:
- mains = (
- os.path.join(basepath, 'main'),
- os.path.join(basepath, 'main.yml'),
- os.path.join(basepath, 'main.yaml'),
- os.path.join(basepath, 'main.json'),
- )
- if sum([os.path.isfile(x) for x in mains]) > 1:
- raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
- else:
- for m in mains:
- if os.path.isfile(m):
- return m # exactly one main file
- return mains[0] # zero mains (we still need to return something)
-
- # *************************************************
-
- def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
- additional_conditions=None, original_file=None, role_name=None):
- ''' handle task and handler include statements '''
-
- results = []
- if tasks is None:
- # support empty handler files, and the like.
- tasks = []
- if additional_conditions is None:
- additional_conditions = []
- if vars is None:
- vars = {}
- if role_params is None:
- role_params = {}
- if default_vars is None:
- default_vars = {}
- if become_vars is None:
- become_vars = {}
-
- old_conditions = list(additional_conditions)
-
- for x in tasks:
-
- # prevent assigning the same conditions to each task on an include
- included_additional_conditions = list(old_conditions)
-
- if not isinstance(x, dict):
- raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
-
- # evaluate privilege escalation vars for current and child tasks
- included_become_vars = {}
- for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
- if k in x:
- included_become_vars[k] = x[k]
- elif k in become_vars:
- included_become_vars[k] = become_vars[k]
- x[k] = become_vars[k]
-
- task_vars = vars.copy()
- if original_file:
- task_vars['_original_file'] = original_file
-
- if 'meta' in x:
- if x['meta'] == 'flush_handlers':
- if role_name and 'role_name' not in x:
- x['role_name'] = role_name
- results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
- continue
-
- if 'include' in x:
- tokens = split_args(str(x['include']))
- included_additional_conditions = list(additional_conditions)
- include_vars = {}
- for k in x:
- if k.startswith("with_"):
- if original_file:
- offender = " (in %s)" % original_file
- else:
- offender = ""
- utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
- elif k.startswith("when_"):
- utils.deprecated("\"when_:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
- elif k == 'when':
- if isinstance(x[k], (basestring, bool)):
- included_additional_conditions.append(x[k])
- elif type(x[k]) is list:
- included_additional_conditions.extend(x[k])
- elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
- continue
- else:
- include_vars[k] = x[k]
-
- # get any role parameters specified
- role_params = x.get('role_params', {})
-
- # get any role default variables specified
- default_vars = x.get('default_vars', {})
- if not default_vars:
- default_vars = self.default_vars
- else:
- default_vars = utils.combine_vars(self.default_vars, default_vars)
-
- # append the vars defined with the include (from above)
- # as well as the old-style 'vars' element. The old-style
- # vars are given higher precedence here (just in case)
- task_vars = utils.combine_vars(task_vars, include_vars)
- if 'vars' in x:
- task_vars = utils.combine_vars(task_vars, x['vars'])
-
- new_role = None
- if 'role_name' in x:
- new_role = x['role_name']
-
- mv = task_vars.copy()
- for t in tokens[1:]:
- (k,v) = t.split("=", 1)
- v = unquote(v)
- mv[k] = template(self.basedir, v, mv)
- dirname = self.basedir
- if original_file:
- dirname = os.path.dirname(original_file)
-
- # temp vars are used here to avoid trampling on the existing vars structures
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, mv)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
- include_file = template(dirname, tokens[0], temp_vars)
- include_filename = utils.path_dwim(dirname, include_file)
-
- data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
- if 'role_name' in x and data is not None:
- for y in data:
- if isinstance(y, dict) and 'include' in y:
- y['role_name'] = new_role
- loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
- results += loaded
- elif type(x) == dict:
- task = Task(
- self, x,
- module_vars=task_vars,
- play_vars=self.vars,
- play_file_vars=self.vars_file_vars,
- role_vars=self.role_vars,
- role_params=role_params,
- default_vars=default_vars,
- additional_conditions=list(additional_conditions),
- role_name=role_name
- )
- results.append(task)
- else:
- raise Exception("unexpected task type")
+__all__ = ['Play']
+
+
+class Play(Base, Taggable, Become):
+
+ """
+ A play is a language feature that represents a list of roles and/or
+ task/handler blocks to execute on a given set of hosts.
+
+ Usage:
+
+ Play.load(datastructure) -> Play
+ Play.something(...)
+ """
- for x in results:
- if self.tags is not None:
- x.tags.extend(self.tags)
+ # =================================================================================
+ # Connection-Related Attributes
- return results
+ # TODO: generalize connection
+ _accelerate = FieldAttribute(isa='bool', default=False)
+ _accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
+ _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
- # *************************************************
+ # Connection
+ _gather_facts = FieldAttribute(isa='bool', default=None)
+ _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types)
+ _name = FieldAttribute(isa='string', default='')
- def tasks(self):
- ''' return task objects for this play '''
- return self._tasks
+ # Variable Attributes
+ _vars = FieldAttribute(isa='dict', default=dict())
+ _vars_files = FieldAttribute(isa='list', default=[])
+ _vars_prompt = FieldAttribute(isa='list', default=[])
+ _vault_password = FieldAttribute(isa='string')
- def handlers(self):
- ''' return handler objects for this play '''
- return self._handlers
+ # Block (Task) Lists Attributes
+ _handlers = FieldAttribute(isa='list', default=[])
+ _pre_tasks = FieldAttribute(isa='list', default=[])
+ _post_tasks = FieldAttribute(isa='list', default=[])
+ _tasks = FieldAttribute(isa='list', default=[])
- # *************************************************
+ # Role Attributes
+ _roles = FieldAttribute(isa='list', default=[])
- def _get_vars(self):
- ''' load the vars section from a play, accounting for all sorts of variable features
- including loading from yaml files, prompting, and conditional includes of the first
- file found in a list. '''
+ # Flag/Setting Attributes
+ _any_errors_fatal = FieldAttribute(isa='bool', default=False)
+ _force_handlers = FieldAttribute(isa='bool')
+ _max_fail_percentage = FieldAttribute(isa='string', default='0')
+ _serial = FieldAttribute(isa='int', default=0)
+ _strategy = FieldAttribute(isa='string', default='linear')
- if self.vars is None:
- self.vars = {}
+ # =================================================================================
- if type(self.vars) not in [dict, list]:
- raise errors.AnsibleError("'vars' section must contain only key/value pairs")
+ def __init__(self):
+ super(Play, self).__init__()
- vars = {}
+ self.ROLE_CACHE = {}
- # translate a list of vars into a dict
- if type(self.vars) == list:
- for item in self.vars:
- if getattr(item, 'items', None) is None:
- raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
- k, v = item.items()[0]
- vars[k] = v
- else:
- vars.update(self.vars)
+ def __repr__(self):
+ return self.get_name()
- if type(self.vars_prompt) == list:
- for var in self.vars_prompt:
- if not 'name' in var:
- raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
+ def get_name(self):
+ ''' return the name of the Play '''
+ return self._attributes.get('name')
- vname = var['name']
- prompt = var.get("prompt", vname)
- default = var.get("default", None)
- private = var.get("private", True)
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ p = Play()
+ return p.load_data(data, variable_manager=variable_manager, loader=loader)
- confirm = var.get("confirm", False)
- encrypt = var.get("encrypt", None)
- salt_size = var.get("salt_size", None)
- salt = var.get("salt", None)
-
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- vname, private, prompt, encrypt, confirm, salt_size, salt, default
- )
+ def preprocess_data(self, ds):
+ '''
+ Adjusts play datastructure to cleanup old/legacy items
+ '''
- elif type(self.vars_prompt) == dict:
- for (vname, prompt) in self.vars_prompt.iteritems():
- prompt_msg = "%s: " % prompt
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- varname=vname, private=False, prompt=prompt_msg, default=None
- )
+ assert isinstance(ds, dict)
- else:
- raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
+ # The use of 'user' in the Play datastructure was deprecated to
+ # line up with the same change for Tasks, due to the fact that
+ # 'user' conflicted with the user module.
+ if 'user' in ds:
+ # this should never happen, but error out with a helpful message
+ # to the user if it does...
+ if 'remote_user' in ds:
+ raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
- if type(self.playbook.extra_vars) == dict:
- vars = utils.combine_vars(vars, self.playbook.extra_vars)
+ ds['remote_user'] = ds['user']
+ del ds['user']
- return vars
+ if 'vars_prompt' in ds and not isinstance(ds['vars_prompt'], list):
+ ds['vars_prompt'] = [ ds['vars_prompt'] ]
- # *************************************************
+ return super(Play, self).preprocess_data(ds)
- def update_vars_files(self, hosts, vault_password=None):
- ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
-
- # now loop through all the hosts...
- for h in hosts:
- self._update_vars_files_for_host(h, vault_password=vault_password)
-
- # *************************************************
-
- def compare_tags(self, tags):
- ''' given a list of tags that the user has specified, return two lists:
- matched_tags: tags were found within the current play and match those given
- by the user
- unmatched_tags: tags that were found within the current play but do not match
- any provided by the user '''
-
- # gather all the tags in all the tasks and handlers into one list
- # FIXME: isn't this in self.tags already?
-
- all_tags = []
- for task in self._tasks:
- if not task.meta:
- all_tags.extend(task.tags)
- for handler in self._handlers:
- all_tags.extend(handler.tags)
-
- # compare the lists of tags using sets and return the matched and unmatched
- all_tags_set = set(all_tags)
- tags_set = set(tags)
-
- matched_tags = all_tags_set.intersection(tags_set)
- unmatched_tags = all_tags_set.difference(tags_set)
-
- a = set(['always'])
- u = set(['untagged'])
- if 'always' in all_tags_set:
- matched_tags = matched_tags.union(a)
- unmatched_tags = all_tags_set.difference(a)
-
- if 'all' in tags_set:
- matched_tags = matched_tags.union(all_tags_set)
- unmatched_tags = set()
-
- if 'tagged' in tags_set:
- matched_tags = all_tags_set.difference(u)
- unmatched_tags = u
-
- if 'untagged' in tags_set and 'untagged' in all_tags_set:
- matched_tags = matched_tags.union(u)
- unmatched_tags = unmatched_tags.difference(u)
-
- return matched_tags, unmatched_tags
-
- # *************************************************
-
- def _late_merge_role_tags(self):
- # build a local dict of tags for roles
- role_tags = {}
- for task in self._ds['tasks']:
- if 'role_name' in task:
- this_role = task['role_name'] + "-" + task['vars']['role_uuid']
-
- if this_role not in role_tags:
- role_tags[this_role] = []
-
- if 'tags' in task['vars']:
- if isinstance(task['vars']['tags'], basestring):
- role_tags[this_role] += shlex.split(task['vars']['tags'])
- else:
- role_tags[this_role] += task['vars']['tags']
-
- # apply each role's tags to its tasks
- for idx, val in enumerate(self._tasks):
- if getattr(val, 'role_name', None) is not None:
- this_role = val.role_name + "-" + val.module_vars['role_uuid']
- if this_role in role_tags:
- self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
-
- # *************************************************
-
- def _update_vars_files_for_host(self, host, vault_password=None):
+ def _load_hosts(self, attr, ds):
+ '''
+ Loads the hosts from the given datastructure, which might be a list
+ or a simple string. We also switch integers in this list back to strings,
+ as the YAML parser will turn things that look like numbers into numbers.
+ '''
- def generate_filenames(host, inject, filename):
-
- """ Render the raw filename into 3 forms """
+ if isinstance(ds, (string_types, int)):
+ ds = [ ds ]
- # filename2 is the templated version of the filename, which will
- # be fully rendered if any variables contained within it are
- # non-inventory related
- filename2 = template(self.basedir, filename, self.vars)
+ if not isinstance(ds, list):
+ raise AnsibleParserError("'hosts' must be specified as a list or a single pattern", obj=ds)
+
+ # YAML parsing of things that look like numbers may have
+ # resulted in integers showing up in the list, so convert
+ # them back to strings to prevent problems
+ for idx,item in enumerate(ds):
+ if isinstance(item, int):
+ ds[idx] = "%s" % item
- # filename3 is the same as filename2, but when the host object is
- # available, inventory variables will be expanded as well since the
- # name is templated with the injected variables
- filename3 = filename2
- if host is not None:
- filename3 = template(self.basedir, filename2, inject)
+ return ds
+
+ def _load_vars(self, attr, ds):
+ '''
+ Vars in a play can be specified either as a dictionary directly, or
+ as a list of dictionaries. If the later, this method will turn the
+ list into a single dictionary.
+ '''
- # filename4 is the dwim'd path, but may also be mixed-scope, so we use
- # both play scoped vars and host scoped vars to template the filepath
- if utils.contains_vars(filename3) and host is not None:
- inject.update(self.vars)
- filename4 = template(self.basedir, filename3, inject)
- filename4 = utils.path_dwim(self.basedir, filename4)
+ try:
+ if isinstance(ds, dict):
+ return ds
+ elif isinstance(ds, list):
+ all_vars = dict()
+ for item in ds:
+ if not isinstance(item, dict):
+ raise ValueError
+ all_vars = combine_vars(all_vars, item)
+ return all_vars
+ elif ds is None:
+ return {}
else:
- filename4 = utils.path_dwim(self.basedir, filename3)
-
- return filename2, filename3, filename4
-
-
- def update_vars_cache(host, data, target_filename=None):
-
- """ update a host's varscache with new var data """
-
- self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
- if target_filename:
- self.playbook.callbacks.on_import_for_host(host, target_filename)
-
- def process_files(filename, filename2, filename3, filename4, host=None):
-
- """ pseudo-algorithm for deciding where new vars should go """
-
- data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
- if data:
- if type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
- if host is not None:
- target_filename = None
- if utils.contains_vars(filename2):
- if not utils.contains_vars(filename3):
- target_filename = filename3
- else:
- target_filename = filename4
- update_vars_cache(host, data, target_filename=target_filename)
- else:
- self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
- # we did process this file
- return True
- # we did not process this file
- return False
-
- # Enforce that vars_files is always a list
- if type(self.vars_files) != list:
- self.vars_files = [ self.vars_files ]
-
- # Build an inject if this is a host run started by self.update_vars_files
- if host is not None:
- inject = {}
- inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
- inject.update(self.playbook.SETUP_CACHE.get(host, {}))
- inject.update(self.playbook.VARS_CACHE.get(host, {}))
- else:
- inject = None
-
- processed = []
- for filename in self.vars_files:
- if type(filename) == list:
- # loop over all filenames, loading the first one, and failing if none found
- found = False
- sequence = []
- for real_filename in filename:
- filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
- sequence.append(filename4)
- if os.path.exists(filename4):
- found = True
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
- elif host is not None:
- self.playbook.callbacks.on_not_import_for_host(host, filename4)
- if found:
- break
- if not found and host is not None:
- raise errors.AnsibleError(
- "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
- )
+ raise ValueError
+ except ValueError:
+ raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds)
+
+ def _load_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_pre_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_post_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_handlers(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed handlers/blocks.
+ Bare handlers outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_roles(self, attr, ds):
+ '''
+ Loads and returns a list of RoleInclude objects from the datastructure
+ list of role definitions and creates the Role from those objects
+ '''
+
+ if ds is None:
+ ds = []
+
+ role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+
+ roles = []
+ for ri in role_includes:
+ roles.append(Role.load(ri, play=self))
+ return roles
+
+ def _post_validate_vars(self, attr, value, templar):
+ '''
+ Override post validation of vars on the play, as we don't want to
+ template these too early.
+ '''
+ return value
+
+ def _post_validate_vars_files(self, attr, value, templar):
+ '''
+ Override post validation of vars_files on the play, as we don't want to
+ template these too early.
+ '''
+ return value
+
+ # disable validation on various fields which will be validated later in other objects
+ def _post_validate_become(self, attr, value, templar):
+ return value
+ def _post_validate_become_user(self, attr, value, templar):
+ return value
+ def _post_validate_become_method(self, attr, value, templar):
+ return value
+
+ # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
+
+ def _compile_roles(self):
+ '''
+ Handles the role compilation step, returning a flat list of tasks
+ with the lowest level dependencies first. For example, if a role R
+ has a dependency D1, which also has a dependency D2, the tasks from
+ D2 are merged first, followed by D1, and lastly by the tasks from
+ the parent role R last. This is done for all roles in the Play.
+ '''
+
+ block_list = []
+
+ if len(self.roles) > 0:
+ for r in self.roles:
+ block_list.extend(r.compile(play=self))
+
+ return block_list
+
+ def compile_roles_handlers(self):
+ '''
+ Handles the role handler compilation step, returning a flat list of Handlers
+ This is done for all roles in the Play.
+ '''
+
+ block_list = []
+
+ if len(self.roles) > 0:
+ for r in self.roles:
+ block_list.extend(r.get_handler_blocks())
+
+ return block_list
+
+ def compile(self):
+ '''
+ Compiles and returns the task list for this play, compiled from the
+ roles (which are themselves compiled recursively) and/or the list of
+ tasks specified in the play.
+ '''
+
+ # create a block containing a single flush handlers meta
+ # task, so we can be sure to run handlers at certain points
+ # of the playbook execution
+ flush_block = Block.load(
+ data={'meta': 'flush_handlers'},
+ play=self,
+ variable_manager=self._variable_manager,
+ loader=self._loader
+ )
+
+ block_list = []
+
+ block_list.extend(self.pre_tasks)
+ block_list.append(flush_block)
+ block_list.extend(self._compile_roles())
+ block_list.extend(self.tasks)
+ block_list.append(flush_block)
+ block_list.extend(self.post_tasks)
+ block_list.append(flush_block)
+
+ return block_list
+
+ def get_vars(self):
+ return self.vars.copy()
+
+ def get_vars_files(self):
+ return self.vars_files
+
+ def get_handlers(self):
+ return self.handlers[:]
+
+ def get_roles(self):
+ return self.roles[:]
+
+ def get_tasks(self):
+ tasklist = []
+ for task in self.pre_tasks + self.tasks + self.post_tasks:
+ if isinstance(task, Block):
+ tasklist.append(task.block + task.rescue + task.always)
else:
- # just one filename supplied, load it!
- filename2, filename3, filename4 = generate_filenames(host, inject, filename)
- if utils.contains_vars(filename4):
- continue
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
-
- return processed
+ tasklist.append(task)
+ return tasklist
+
+ def serialize(self):
+ data = super(Play, self).serialize()
+
+ roles = []
+ for role in self.get_roles():
+ roles.append(role.serialize())
+ data['roles'] = roles
+
+ return data
+
+ def deserialize(self, data):
+ super(Play, self).deserialize(data)
+
+ if 'roles' in data:
+ role_data = data.get('roles', [])
+ roles = []
+ for role in role_data:
+ r = Role()
+ r.deserialize(role)
+ roles.append(r)
+
+ setattr(self, 'roles', roles)
+ del data['roles']
+
+ def copy(self):
+ new_me = super(Play, self).copy()
+ new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
+ return new_me
+
diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py
new file mode 100644
index 00000000000..466f59702c6
--- /dev/null
+++ b/lib/ansible/playbook/play_context.py
@@ -0,0 +1,398 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pipes
+import random
+import re
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.template import Templar
+from ansible.utils.boolean import boolean
+from ansible.utils.unicode import to_unicode
+
+__all__ = ['PlayContext']
+
+SU_PROMPT_LOCALIZATIONS = [
+ 'Password',
+ '암호',
+ 'パスワード',
+ 'Adgangskode',
+ 'Contraseña',
+ 'Contrasenya',
+ 'Hasło',
+ 'Heslo',
+ 'Jelszó',
+ 'Lösenord',
+ 'Mật khẩu',
+ 'Mot de passe',
+ 'Parola',
+ 'Parool',
+ 'Pasahitza',
+ 'Passord',
+ 'Passwort',
+ 'Salasana',
+ 'Sandi',
+ 'Senha',
+ 'Wachtwoord',
+ 'ססמה',
+ 'Лозинка',
+ 'Парола',
+ 'Пароль',
+ 'गुप्तशब्द',
+ 'शब्दकूट',
+ 'సంకేతపదము',
+ 'හස්පදය',
+ '密码',
+ '密碼',
+]
+
+# the magic variable mapping dictionary below is used to translate
+# host/inventory variables to fields in the PlayContext
+# object. The dictionary values are tuples, to account for aliases
+# in variable names.
+
+MAGIC_VARIABLE_MAPPING = dict(
+ connection = ('ansible_connection',),
+ remote_addr = ('ansible_ssh_host', 'ansible_host'),
+ remote_user = ('ansible_ssh_user', 'ansible_user'),
+ port = ('ansible_ssh_port', 'ansible_port'),
+ password = ('ansible_ssh_pass', 'ansible_password'),
+ private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'),
+ shell = ('ansible_shell_type',),
+ become = ('ansible_become',),
+ become_method = ('ansible_become_method',),
+ become_user = ('ansible_become_user',),
+ become_pass = ('ansible_become_password','ansible_become_pass'),
+ become_exe = ('ansible_become_exe',),
+ become_flags = ('ansible_become_flags',),
+ sudo = ('ansible_sudo',),
+ sudo_user = ('ansible_sudo_user',),
+ sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'),
+ sudo_exe = ('ansible_sudo_exe',),
+ sudo_flags = ('ansible_sudo_flags',),
+ su = ('ansible_su',),
+ su_user = ('ansible_su_user',),
+ su_pass = ('ansible_su_password', 'ansible_su_pass'),
+ su_exe = ('ansible_su_exe',),
+ su_flags = ('ansible_su_flags',),
+)
+
+SU_PROMPT_LOCALIZATIONS = [
+ 'Password',
+ '암호',
+ 'パスワード',
+ 'Adgangskode',
+ 'Contraseña',
+ 'Contrasenya',
+ 'Hasło',
+ 'Heslo',
+ 'Jelszó',
+ 'Lösenord',
+ 'Mật khẩu',
+ 'Mot de passe',
+ 'Parola',
+ 'Parool',
+ 'Pasahitza',
+ 'Passord',
+ 'Passwort',
+ 'Salasana',
+ 'Sandi',
+ 'Senha',
+ 'Wachtwoord',
+ 'ססמה',
+ 'Лозинка',
+ 'Парола',
+ 'Пароль',
+ 'गुप्तशब्द',
+ 'शब्दकूट',
+ 'సంకేతపదము',
+ 'හස්පදය',
+ '密码',
+ '密碼',
+]
+
+TASK_ATTRIBUTE_OVERRIDES = (
+ 'become',
+ 'become_user',
+ 'become_pass',
+ 'become_method',
+ 'connection',
+ 'delegate_to',
+ 'no_log',
+ 'remote_user',
+)
+
+
+class PlayContext(Base):
+
+ '''
+ This class is used to consolidate the connection information for
+ hosts in a play and child tasks, where the task may override some
+ connection/authentication information.
+ '''
+
+ # connection fields, some are inherited from Base:
+ # (connection, port, remote_user, environment, no_log)
+ _remote_addr = FieldAttribute(isa='string')
+ _password = FieldAttribute(isa='string')
+ _private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
+ _timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
+ _shell = FieldAttribute(isa='string')
+
+ # privilege escalation fields
+ _become = FieldAttribute(isa='bool')
+ _become_method = FieldAttribute(isa='string')
+ _become_user = FieldAttribute(isa='string')
+ _become_pass = FieldAttribute(isa='string')
+ _become_exe = FieldAttribute(isa='string')
+ _become_flags = FieldAttribute(isa='string')
+ _prompt = FieldAttribute(isa='string')
+
+ # backwards compatibility fields for sudo/su
+ _sudo_exe = FieldAttribute(isa='string')
+ _sudo_flags = FieldAttribute(isa='string')
+ _sudo_pass = FieldAttribute(isa='string')
+ _su_exe = FieldAttribute(isa='string')
+ _su_flags = FieldAttribute(isa='string')
+ _su_pass = FieldAttribute(isa='string')
+
+ # general flags
+ _verbosity = FieldAttribute(isa='int', default=0)
+ _only_tags = FieldAttribute(isa='set', default=set())
+ _skip_tags = FieldAttribute(isa='set', default=set())
+ _check_mode = FieldAttribute(isa='bool', default=False)
+ _force_handlers = FieldAttribute(isa='bool', default=False)
+ _start_at_task = FieldAttribute(isa='string')
+ _step = FieldAttribute(isa='bool', default=False)
+ _diff = FieldAttribute(isa='bool', default=False)
+
+ def __init__(self, play=None, options=None, passwords=None):
+
+ super(PlayContext, self).__init__()
+
+ if passwords is None:
+ passwords = {}
+
+ self.password = passwords.get('conn_pass','')
+ self.become_pass = passwords.get('become_pass','')
+
+ # set options before play to allow play to override them
+ if options:
+ self.set_options(options)
+
+ if play:
+ self.set_play(play)
+
+ def set_play(self, play):
+ '''
+ Configures this connection information instance with data from
+ the play class.
+ '''
+
+ if play.connection:
+ self.connection = play.connection
+
+ if play.remote_user:
+ self.remote_user = play.remote_user
+
+ if play.port:
+ self.port = int(play.port)
+
+ if play.become is not None:
+ self.become = play.become
+ if play.become_method:
+ self.become_method = play.become_method
+ if play.become_user:
+ self.become_user = play.become_user
+
+ # non connection related
+ self.no_log = play.no_log
+
+ if play.force_handlers is not None:
+ self.force_handlers = play.force_handlers
+
+ def set_options(self, options):
+ '''
+ Configures this connection information instance with data from
+ options specified by the user on the command line. These have a
+ lower precedence than those set on the play or host.
+ '''
+
+ if options.connection:
+ self.connection = options.connection
+
+ self.remote_user = options.remote_user
+ self.private_key_file = options.private_key_file
+
+ # privilege escalation
+ self.become = options.become
+ self.become_method = options.become_method
+ self.become_user = options.become_user
+
+ # general flags (should we move out?)
+ if options.verbosity:
+ self.verbosity = options.verbosity
+ #if options.no_log:
+ # self.no_log = boolean(options.no_log)
+ if options.check:
+ self.check_mode = boolean(options.check)
+ if hasattr(options, 'force_handlers') and options.force_handlers:
+ self.force_handlers = boolean(options.force_handlers)
+ if hasattr(options, 'step') and options.step:
+ self.step = boolean(options.step)
+ if hasattr(options, 'start_at_task') and options.start_at_task:
+ self.start_at_task = to_unicode(options.start_at_task)
+ if hasattr(options, 'diff') and options.diff:
+ self.diff = boolean(options.diff)
+
+ # get the tag info from options, converting a comma-separated list
+ # of values into a proper list if need be. We check to see if the
+ # options have the attribute, as it is not always added via the CLI
+ if hasattr(options, 'tags'):
+ if isinstance(options.tags, list):
+ self.only_tags.update(options.tags)
+ elif isinstance(options.tags, basestring):
+ self.only_tags.update(options.tags.split(','))
+
+ if len(self.only_tags) == 0:
+ self.only_tags = set(['all'])
+
+ if hasattr(options, 'skip_tags'):
+ if isinstance(options.skip_tags, list):
+ self.skip_tags.update(options.skip_tags)
+ elif isinstance(options.skip_tags, basestring):
+ self.skip_tags.update(options.skip_tags.split(','))
+
+ def set_task_and_variable_override(self, task, variables):
+ '''
+ Sets attributes from the task if they are set, which will override
+ those from the play.
+ '''
+
+ new_info = self.copy()
+
+ # loop through a subset of attributes on the task object and set
+ # connection fields based on their values
+ for attr in TASK_ATTRIBUTE_OVERRIDES:
+ if hasattr(task, attr):
+ attr_val = getattr(task, attr)
+ if attr_val is not None:
+ setattr(new_info, attr, attr_val)
+
+ # finally, use the MAGIC_VARIABLE_MAPPING dictionary to update this
+ # connection info object with 'magic' variables from the variable list
+ for (attr, variable_names) in MAGIC_VARIABLE_MAPPING.iteritems():
+ for variable_name in variable_names:
+ if variable_name in variables:
+ setattr(new_info, attr, variables[variable_name])
+
+ # make sure we get port defaults if needed
+ if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
+ new_info.port = int(C.DEFAULT_REMOTE_PORT)
+
+ # become legacy updates
+ if not new_info.become_pass:
+ if new_info.become_method == 'sudo' and new_info.sudo_pass:
+ setattr(new_info, 'become_pass', new_info.sudo_pass)
+ elif new_info.become_method == 'su' and new_info.su_pass:
+ setattr(new_info, 'become_pass', new_info.su_pass)
+
+ return new_info
+
+ def make_become_cmd(self, cmd, executable=None):
+ """ helper function to create privilege escalation commands """
+
+ prompt = None
+ success_key = None
+
+ if executable is None:
+ executable = C.DEFAULT_EXECUTABLE
+
+ if self.become:
+
+ becomecmd = None
+ randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
+ success_key = 'BECOME-SUCCESS-%s' % randbits
+ success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
+
+ if self.become_method == 'sudo':
+ # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
+ # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
+ # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
+ # string to the user's shell. We loop reading output until we see the randomly-generated
+ # sudo prompt set with the -p option.
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ exe = self.become_exe or self.sudo_exe or 'sudo'
+ flags = self.become_flags or self.sudo_flags or ''
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd)
+
+ elif self.become_method == 'su':
+
+ def detect_su_prompt(data):
+ SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
+ return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
+
+ prompt = detect_su_prompt
+ exe = self.become_exe or self.su_exe or 'su'
+ flags = self.become_flags or self.su_flags or ''
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
+
+ elif self.become_method == 'pbrun':
+
+ prompt='assword:'
+ exe = self.become_exe or 'pbrun'
+ flags = self.become_flags or ''
+ becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
+
+ elif self.become_method == 'pfexec':
+
+ exe = self.become_exe or 'pfexec'
+ flags = self.become_flags or ''
+ # No user as it uses it's own exec_attr to figure it out
+ becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
+
+ else:
+ raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
+
+ self.prompt = prompt
+ self.success_key = success_key
+ return ('%s -c %s' % (executable, pipes.quote(becomecmd)))
+
+ return cmd
+
+ def update_vars(self, variables):
+ '''
+ Adds 'magic' variables relating to connections to the variable dictionary provided.
+ In case users need to access from the play, this is a legacy from runner.
+ '''
+
+ #FIXME: remove password? possibly add become/sudo settings
+ for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file']:
+ if special_var not in variables:
+ for prop, varnames in MAGIC_VARIABLE_MAPPING.items():
+ if special_var in varnames:
+ variables[special_var] = getattr(self, prop)
diff --git a/v2/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
similarity index 88%
rename from v2/ansible/playbook/playbook_include.py
rename to lib/ansible/playbook/playbook_include.py
index 5c91dd14adb..5e83f0d3a33 100644
--- a/v2/ansible/playbook/playbook_include.py
+++ b/lib/ansible/playbook/playbook_include.py
@@ -21,15 +21,16 @@ __metaclass__ = type
import os
+from ansible.errors import AnsibleParserError
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
-from ansible.errors import AnsibleParserError
+from ansible.template import Templar
-class PlaybookInclude(Base):
+class PlaybookInclude(Base, Conditional, Taggable):
_name = FieldAttribute(isa='string')
_include = FieldAttribute(isa='string')
@@ -53,6 +54,14 @@ class PlaybookInclude(Base):
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
+ all_vars = dict()
+ if variable_manager:
+ all_vars = variable_manager.get_vars(loader=loader)
+
+ templar = Templar(loader=loader, variables=all_vars)
+ if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars):
+ return None
+
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
@@ -62,10 +71,11 @@ class PlaybookInclude(Base):
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
- # finally, playbook includes can specify a list of variables, which are simply
- # used to update the vars of each play in the playbook
+ # finally, update each loaded playbook entry with any variables specified
+ # on the included playbook and/or any tags which may have been set
for entry in pb._entries:
entry.vars.update(new_obj.vars)
+ entry.tags = list(set(entry.tags).union(new_obj.tags))
return pb
@@ -118,6 +128,8 @@ class PlaybookInclude(Base):
# rejoin the parameter portion of the arguments and
# then use parse_kv() to get a dict of params back
params = parse_kv(" ".join(items[1:]))
+ if 'tags' in params:
+ new_ds['tags'] = params.pop('tags')
if 'vars' in new_ds:
# FIXME: see fixme above regarding merging vars
raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
diff --git a/v2/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
similarity index 87%
rename from v2/ansible/playbook/role/__init__.py
rename to lib/ansible/playbook/role/__init__.py
index bea61147ae8..f46014f60bc 100644
--- a/v2/ansible/playbook/role/__init__.py
+++ b/lib/ansible/playbook/role/__init__.py
@@ -41,7 +41,7 @@ from ansible.plugins import get_all_plugin_loaders
from ansible.utils.vars import combine_vars
-__all__ = ['Role', 'ROLE_CACHE', 'hash_params']
+__all__ = ['Role', 'hash_params']
# FIXME: this should be a utility function, but can't be a member of
# the role due to the fact that it would require the use of self
@@ -64,23 +64,16 @@ def hash_params(params):
s.update((k, v))
return frozenset(s)
-# The role cache is used to prevent re-loading roles, which
-# may already exist. Keys into this cache are the SHA1 hash
-# of the role definition (for dictionary definitions, this
-# will be based on the repr() of the dictionary object)
-ROLE_CACHE = dict()
-
-
class Role(Base, Become, Conditional, Taggable):
- def __init__(self):
+ def __init__(self, play=None):
self._role_name = None
self._role_path = None
self._role_params = dict()
self._loader = None
self._metadata = None
- self._play = None
+ self._play = play
self._parents = []
self._dependencies = []
self._task_blocks = []
@@ -99,30 +92,43 @@ class Role(Base, Become, Conditional, Taggable):
return self._role_name
@staticmethod
- def load(role_include, parent_role=None):
- # FIXME: add back in the role caching support
+ def load(role_include, play, parent_role=None):
try:
# The ROLE_CACHE is a dictionary of role names, with each entry
# containing another dictionary corresponding to a set of parameters
# specified for a role as the key and the Role() object itself.
# We use frozenset to make the dictionary hashable.
- #hashed_params = frozenset(role_include.get_role_params().iteritems())
- hashed_params = hash_params(role_include.get_role_params())
- if role_include.role in ROLE_CACHE:
- for (entry, role_obj) in ROLE_CACHE[role_include.role].iteritems():
+ params = role_include.get_role_params()
+ if role_include.when is not None:
+ params['when'] = role_include.when
+ if role_include.tags is not None:
+ params['tags'] = role_include.tags
+ hashed_params = hash_params(params)
+ if role_include.role in play.ROLE_CACHE:
+ for (entry, role_obj) in play.ROLE_CACHE[role_include.role].iteritems():
if hashed_params == entry:
if parent_role:
role_obj.add_parent(parent_role)
return role_obj
- r = Role()
+ r = Role(play=play)
r._load_role_data(role_include, parent_role=parent_role)
- if role_include.role not in ROLE_CACHE:
- ROLE_CACHE[role_include.role] = dict()
+ if role_include.role not in play.ROLE_CACHE:
+ play.ROLE_CACHE[role_include.role] = dict()
- ROLE_CACHE[role_include.role][hashed_params] = r
+ if parent_role:
+ if parent_role.when:
+ new_when = parent_role.when[:]
+ new_when.extend(r.when or [])
+ r.when = new_when
+ if parent_role.tags:
+ new_tags = parent_role.tags[:]
+ new_tags.extend(r.tags or [])
+ r.tags = new_tags
+
+ play.ROLE_CACHE[role_include.role][hashed_params] = r
return r
except RuntimeError:
@@ -165,14 +171,16 @@ class Role(Base, Become, Conditional, Taggable):
if metadata:
self._metadata = RoleMetadata.load(metadata, owner=self, loader=self._loader)
self._dependencies = self._load_dependencies()
+ else:
+ self._metadata = RoleMetadata()
task_data = self._load_role_yaml('tasks')
if task_data:
- self._task_blocks = load_list_of_blocks(task_data, play=None, role=self, loader=self._loader)
+ self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader)
handler_data = self._load_role_yaml('handlers')
if handler_data:
- self._handler_blocks = load_list_of_blocks(handler_data, play=None, role=self, loader=self._loader)
+ self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader)
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars')
@@ -221,7 +229,7 @@ class Role(Base, Become, Conditional, Taggable):
deps = []
if self._metadata:
for role_include in self._metadata.dependencies:
- r = Role.load(role_include, parent_role=self)
+ r = Role.load(role_include, play=self._play, parent_role=self)
deps.append(r)
return deps
@@ -247,16 +255,16 @@ class Role(Base, Become, Conditional, Taggable):
default_vars = combine_vars(default_vars, self._default_vars)
return default_vars
- def get_inherited_vars(self):
+ def get_inherited_vars(self, dep_chain=[]):
inherited_vars = dict()
- for parent in self._parents:
- inherited_vars = combine_vars(inherited_vars, parent.get_inherited_vars())
+
+ for parent in dep_chain:
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
inherited_vars = combine_vars(inherited_vars, parent._role_params)
return inherited_vars
- def get_vars(self):
- all_vars = self.get_inherited_vars()
+ def get_vars(self, dep_chain=[]):
+ all_vars = self.get_inherited_vars(dep_chain)
for dep in self.get_all_dependencies():
all_vars = combine_vars(all_vars, dep.get_vars())
@@ -288,7 +296,12 @@ class Role(Base, Become, Conditional, Taggable):
return self._task_blocks[:]
def get_handler_blocks(self):
- return self._handler_blocks[:]
+ block_list = []
+ for dep in self.get_direct_dependencies():
+ dep_blocks = dep.get_handler_blocks()
+ block_list.extend(dep_blocks)
+ block_list.extend(self._handler_blocks)
+ return block_list
def has_run(self):
'''
@@ -296,7 +309,7 @@ class Role(Base, Become, Conditional, Taggable):
at least one task was run
'''
- return self._had_task_run and self._completed
+ return self._had_task_run and self._completed and not self._metadata.allow_duplicates
def compile(self, play, dep_chain=[]):
'''
diff --git a/v2/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
similarity index 82%
rename from v2/ansible/playbook/role/definition.py
rename to lib/ansible/playbook/role/definition.py
index 0cb1e45760d..a54febe1feb 100644
--- a/v2/ansible/playbook/role/definition.py
+++ b/lib/ansible/playbook/role/definition.py
@@ -31,6 +31,7 @@ from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
+from ansible.template import Templar
from ansible.utils.path import unfrackpath
@@ -41,7 +42,11 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
- def __init__(self, role_basedir=None):
+ def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
+ self._play = play
+ self._variable_manager = variable_manager
+ self._loader = loader
+
self._role_path = None
self._role_basedir = role_basedir
self._role_params = dict()
@@ -55,8 +60,12 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
raise AnsibleError("not implemented")
def preprocess_data(self, ds):
+ # role names that are simply numbers can be parsed by PyYAML
+ # as integers even when quoted, so turn it into a string type
+ if isinstance(ds, int):
+ ds = "%s" % ds
- assert isinstance(ds, dict) or isinstance(ds, string_types)
+ assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject)
if isinstance(ds, dict):
ds = super(RoleDefinition, self).preprocess_data(ds)
@@ -105,9 +114,17 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
return ds
role_name = ds.get('role', ds.get('name'))
- if not role_name:
+ if not role_name or not isinstance(role_name, string_types):
raise AnsibleError('role definitions must contain a role name', obj=ds)
+ # if we have the required datastructures, and if the role_name
+ # contains a variable, try and template it now
+ if self._play and self._variable_manager:
+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ if templar._contains_vars(role_name):
+ role_name = templar.template(role_name)
+
return role_name
def _load_role_path(self, role_name):
@@ -125,7 +142,12 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
return (role_name, role_path)
else:
# we always start the search for roles in the base directory of the playbook
- role_search_paths = [os.path.join(self._loader.get_basedir(), 'roles'), './roles', './']
+ role_search_paths = [
+ os.path.join(self._loader.get_basedir(), u'roles'),
+ u'./roles',
+ self._loader.get_basedir(),
+ u'./'
+ ]
# also search in the configured roles path
if C.DEFAULT_ROLES_PATH:
@@ -146,7 +168,7 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
# FIXME: make the parser smart about list/string entries in
# the yaml so the error line/file can be reported here
- raise AnsibleError("the role '%s' was not found" % role_name)
+ raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)))
def _split_role_params(self, ds):
'''
diff --git a/v2/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py
similarity index 70%
rename from v2/ansible/playbook/role/include.py
rename to lib/ansible/playbook/role/include.py
index b063aecc350..07ebf3f0d62 100644
--- a/v2/ansible/playbook/role/include.py
+++ b/lib/ansible/playbook/role/include.py
@@ -24,6 +24,7 @@ from six import iteritems, string_types
import os
from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.role.definition import RoleDefinition
@@ -37,13 +38,14 @@ class RoleInclude(RoleDefinition):
FIXME: docstring
"""
- def __init__(self, role_basedir=None):
- super(RoleInclude, self).__init__(role_basedir=role_basedir)
+ def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
+ super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader)
@staticmethod
- def load(data, current_role_path=None, parent_role=None, variable_manager=None, loader=None):
- assert isinstance(data, string_types) or isinstance(data, dict)
+ def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None):
- ri = RoleInclude(role_basedir=current_role_path)
+ assert isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)
+
+ ri = RoleInclude(play=play, role_basedir=current_role_path, variable_manager=variable_manager, loader=loader)
return ri.load_data(data, variable_manager=variable_manager, loader=loader)
diff --git a/v2/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
similarity index 93%
rename from v2/ansible/playbook/role/metadata.py
rename to lib/ansible/playbook/role/metadata.py
index 461a9a4a627..cd56e606331 100644
--- a/v2/ansible/playbook/role/metadata.py
+++ b/lib/ansible/playbook/role/metadata.py
@@ -65,11 +65,14 @@ class RoleMetadata(Base):
which returns a list of RoleInclude objects
'''
+ if ds is None:
+ ds = []
+
current_role_path = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
- return load_list_of_roles(ds, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
+ return load_list_of_roles(ds, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
def _load_galaxy_info(self, attr, ds):
'''
diff --git a/v2/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py
similarity index 100%
rename from v2/ansible/playbook/role/requirement.py
rename to lib/ansible/playbook/role/requirement.py
diff --git a/v2/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py
similarity index 83%
rename from v2/ansible/playbook/taggable.py
rename to lib/ansible/playbook/taggable.py
index 40e05d1817a..1f55f95a2e7 100644
--- a/v2/ansible/playbook/taggable.py
+++ b/lib/ansible/playbook/taggable.py
@@ -19,14 +19,17 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import itertools
+from six import string_types
+
from ansible.errors import AnsibleError
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Taggable:
- untagged = set(['untagged'])
- _tags = FieldAttribute(isa='list', default=[])
+ untagged = frozenset(['untagged'])
+ _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int))
def __init__(self):
super(Taggable, self).__init__()
@@ -65,10 +68,10 @@ class Taggable:
else:
tags = set([tags])
else:
- tags = set(tags)
+ tags = set([i for i,_ in itertools.groupby(tags)])
else:
- # this makes intersection work for untagged
- tags = self.__class__.untagged
+ # this makes isdisjoint work for untagged
+ tags = self.untagged
if only_tags:
@@ -76,9 +79,9 @@ class Taggable:
if 'always' in tags or 'all' in only_tags:
should_run = True
- elif tags.intersection(only_tags):
+ elif not tags.isdisjoint(only_tags):
should_run = True
- elif 'tagged' in only_tags and tags != self.__class__.untagged:
+ elif 'tagged' in only_tags and tags != self.untagged:
should_run = True
if should_run and skip_tags:
@@ -87,9 +90,9 @@ class Taggable:
if 'all' in skip_tags:
if 'always' not in tags or 'always' in skip_tags:
should_run = False
- elif tags.intersection(skip_tags):
+ elif not tags.isdisjoint(skip_tags):
should_run = False
- elif 'tagged' in skip_tags and tags != self.__class__.untagged:
+ elif 'tagged' in skip_tags and tags != self.untagged:
should_run = False
return should_run
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index 70c1bc8df6b..68a399027a1 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -15,332 +15,313 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-from ansible import errors
-from ansible import utils
-from ansible.module_utils.splitter import split_args
-import os
-import ansible.utils.template as template
-import sys
-
-class Task(object):
-
- _t_common = [
- 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
- 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
- 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
- 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
- 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
- ]
-
- __slots__ = [
- 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
- 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
- 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
- ] + _t_common
-
- # to prevent typos and such
- VALID_KEYS = frozenset([
- 'async', 'connection', 'include', 'poll',
- ] + _t_common)
-
- def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
- ''' constructor loads from a task or handler datastructure '''
-
- # meta directives are used to tell things like ansible/playbook to run
- # operations like handler execution. Meta tasks are not executed
- # normally.
- if 'meta' in ds:
- self.meta = ds['meta']
- self.tags = []
- self.module_vars = module_vars
- self.role_name = role_name
- return
- else:
- self.meta = None
-
-
- library = os.path.join(play.basedir, 'library')
- if os.path.exists(library):
- utils.plugins.module_finder.add_directory(library)
-
- for x in ds.keys():
-
- # code to allow for saying "modulename: args" versus "action: modulename args"
- if x in utils.plugins.module_finder:
-
- if 'action' in ds:
- raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action'])))
- if isinstance(ds[x], dict):
- if 'args' in ds:
- raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
- ds['args'] = ds[x]
- ds[x] = ''
- elif ds[x] is None:
- ds[x] = ''
- if not isinstance(ds[x], basestring):
- raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
- ds['action'] = x + " " + ds[x]
- ds.pop(x)
-
- # code to allow "with_glob" and to reference a lookup plugin named glob
- elif x.startswith("with_"):
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
-
- plugin_name = x.replace("with_","")
- if plugin_name in utils.plugins.lookup_loader:
- ds['items_lookup_plugin'] = plugin_name
- ds['items_lookup_terms'] = ds[x]
- ds.pop(x)
- else:
- raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
-
- elif x in [ 'changed_when', 'failed_when', 'when']:
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
- # Only a variable, no logic
- if (param.startswith('{{') and
- param.find('}}') == len(ds[x]) - 2 and
- param.find('|') == -1):
- utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.")
- elif x.startswith("when_"):
- utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True)
-
- if 'when' in ds:
- raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
- when_name = x.replace("when_","")
- ds['when'] = "%s %s" % (when_name, ds[x])
- ds.pop(x)
- elif not x in Task.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
-
- self.module_vars = module_vars
- self.play_vars = play_vars
- self.play_file_vars = play_file_vars
- self.role_vars = role_vars
- self.role_params = role_params
- self.default_vars = default_vars
- self.play = play
-
- # load various attributes
- self.name = ds.get('name', None)
- self.tags = [ 'untagged' ]
- self.register = ds.get('register', None)
- self.environment = ds.get('environment', play.environment)
- self.role_name = role_name
- self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
- self.run_once = utils.boolean(ds.get('run_once', 'false'))
-
- #Code to allow do until feature in a Task
- if 'until' in ds:
- if not ds.get('register'):
- raise errors.AnsibleError("register keyword is mandatory when using do until feature")
- self.module_vars['delay'] = ds.get('delay', 5)
- self.module_vars['retries'] = ds.get('retries', 3)
- self.module_vars['register'] = ds.get('register', None)
- self.until = ds.get('until')
- self.module_vars['until'] = self.until
-
- # rather than simple key=value args on the options line, these represent structured data and the values
- # can be hashes and lists, not just scalars
- self.args = ds.get('args', {})
-
- # get remote_user for task, then play, then playbook
- if ds.get('remote_user') is not None:
- self.remote_user = ds.get('remote_user')
- elif ds.get('remote_user', play.remote_user) is not None:
- self.remote_user = ds.get('remote_user', play.remote_user)
- else:
- self.remote_user = ds.get('remote_user', play.playbook.remote_user)
-
- # Fail out if user specifies privilege escalation params in conflict
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- self.become = utils.boolean(ds.get('become', play.become))
- self.become_method = ds.get('become_method', play.become_method)
- self.become_user = ds.get('become_user', play.become_user)
- self.become_pass = ds.get('become_pass', play.playbook.become_pass)
-
- # set only if passed in current task data
- if 'sudo' in ds or 'sudo_user' in ds:
- self.become_method='sudo'
-
- if 'sudo' in ds:
- self.become=ds['sudo']
- del ds['sudo']
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.parsing.splitter import parse_kv
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+
+from ansible.plugins import module_loader, lookup_loader
+
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.block import Block
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+
+__all__ = ['Task']
+
+class Task(Base, Conditional, Taggable, Become):
+
+ """
+ A task is a language feature that represents a call to a module, with given arguments and other parameters.
+ A handler is a subclass of a task.
+
+ Usage:
+
+ Task.load(datastructure) -> Task
+ Task.something(...)
+ """
+
+ # =================================================================================
+ # ATTRIBUTES
+ # load_ and
+ # validate_
+ # will be used if defined
+ # might be possible to define others
+
+ _args = FieldAttribute(isa='dict', default=dict())
+ _action = FieldAttribute(isa='string')
+
+ _always_run = FieldAttribute(isa='bool')
+ _any_errors_fatal = FieldAttribute(isa='bool')
+ _async = FieldAttribute(isa='int', default=0)
+ _changed_when = FieldAttribute(isa='string')
+ _delay = FieldAttribute(isa='int', default=5)
+ _delegate_to = FieldAttribute(isa='string')
+ _failed_when = FieldAttribute(isa='string')
+ _first_available_file = FieldAttribute(isa='list')
+ _ignore_errors = FieldAttribute(isa='bool')
+ _loop = FieldAttribute(isa='string', private=True)
+ _loop_args = FieldAttribute(isa='list', private=True)
+ _local_action = FieldAttribute(isa='string')
+ _name = FieldAttribute(isa='string', default='')
+ _notify = FieldAttribute(isa='list')
+ _poll = FieldAttribute(isa='int')
+ _register = FieldAttribute(isa='string')
+ _retries = FieldAttribute(isa='int', default=1)
+ _run_once = FieldAttribute(isa='bool')
+ _until = FieldAttribute(isa='list') # ?
+ _vars = FieldAttribute(isa='dict', default=dict())
+
+ def __init__(self, block=None, role=None, task_include=None):
+ ''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
+
+ self._block = block
+ self._role = role
+ self._task_include = task_include
+
+ super(Task, self).__init__()
+
+ def get_name(self):
+ ''' return the name of the task '''
+
+ if self._role and self.name:
+ return "%s : %s" % (self._role.get_name(), self.name)
+ elif self.name:
+ return self.name
+ else:
+ flattened_args = self._merge_kv(self.args)
+ if self._role:
+ return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
+ else:
+ return "%s %s" % (self.action, flattened_args)
+
+ def _merge_kv(self, ds):
+ if ds is None:
+ return ""
+ elif isinstance(ds, basestring):
+ return ds
+ elif isinstance(ds, dict):
+ buf = ""
+ for (k,v) in ds.iteritems():
+ if k.startswith('_'):
+ continue
+ buf = buf + "%s=%s " % (k,v)
+ buf = buf.strip()
+ return buf
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Task(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def __repr__(self):
+ ''' returns a human readable representation of the task '''
+ return "TASK: %s" % self.get_name()
+
+ def _preprocess_loop(self, ds, new_ds, k, v):
+ ''' take a lookup plugin name and store it correctly '''
+
+ loop_name = k.replace("with_", "")
+ if new_ds.get('loop') is not None:
+ raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
+ if v is None:
+ raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
+ new_ds['loop'] = loop_name
+ new_ds['loop_args'] = v
+
+ def preprocess_data(self, ds):
+ '''
+ tasks are especially complex arguments so need pre-processing.
+ keep it short.
+ '''
+
+ assert isinstance(ds, dict)
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure suitable for the
+ # attributes of the task class
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # use the args parsing class to determine the action, args,
+ # and the delegate_to value from the various possible forms
+ # supported as legacy
+ args_parser = ModuleArgsParser(task_ds=ds)
+ (action, args, delegate_to) = args_parser.parse()
+
+ new_ds['action'] = action
+ new_ds['args'] = args
+ new_ds['delegate_to'] = delegate_to
+
+ for (k,v) in ds.iteritems():
+ if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
+ # we don't want to re-assign these values, which were
+ # determined by the ModuleArgsParser() above
+ continue
+ elif k.replace("with_", "") in lookup_loader:
+ self._preprocess_loop(ds, new_ds, k, v)
else:
- self.become=True
- if 'sudo_user' in ds:
- self.become_user = ds['sudo_user']
- del ds['sudo_user']
- if 'sudo_pass' in ds:
- self.become_pass = ds['sudo_pass']
- del ds['sudo_pass']
-
- elif 'su' in ds or 'su_user' in ds:
- self.become_method='su'
-
- if 'su' in ds:
- self.become=ds['su']
+ new_ds[k] = v
+
+ return super(Task, self).preprocess_data(new_ds)
+
+ def post_validate(self, templar):
+ '''
+ Override of base class post_validate, to also do final validation on
+ the block and task include (if any) to which this task belongs.
+ '''
+
+ if self._block:
+ self._block.post_validate(templar)
+ if self._task_include:
+ self._task_include.post_validate(templar)
+
+ super(Task, self).post_validate(templar)
+
+ def _post_validate_loop_args(self, attr, value, templar):
+ '''
+ Override post validation for the loop args field, which is templated
+ specially in the TaskExecutor class when evaluating loops.
+ '''
+ return value
+
+ def get_vars(self):
+ all_vars = self.vars.copy()
+ if self._block:
+ all_vars.update(self._block.get_vars())
+ if self._task_include:
+ all_vars.update(self._task_include.get_vars())
+
+ #if isinstance(self.args, dict):
+ # all_vars.update(self.args)
+
+ if 'tags' in all_vars:
+ del all_vars['tags']
+ if 'when' in all_vars:
+ del all_vars['when']
+ return all_vars
+
+ def copy(self, exclude_block=False):
+ new_me = super(Task, self).copy()
+
+ new_me._block = None
+ if self._block and not exclude_block:
+ new_me._block = self._block.copy()
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ new_me._task_include = None
+ if self._task_include:
+ new_me._task_include = self._task_include.copy()
+
+ return new_me
+
+ def serialize(self):
+ data = super(Task, self).serialize()
+
+ if self._block:
+ data['block'] = self._block.serialize()
+
+ if self._role:
+ data['role'] = self._role.serialize()
+
+ if self._task_include:
+ data['task_include'] = self._task_include.serialize()
+
+ return data
+
+ def deserialize(self, data):
+
+ # import is here to avoid import loops
+ #from ansible.playbook.task_include import TaskInclude
+
+ block_data = data.get('block')
+
+ if block_data:
+ b = Block()
+ b.deserialize(block_data)
+ self._block = b
+ del data['block']
+
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+ del data['role']
+
+ ti_data = data.get('task_include')
+ if ti_data:
+ #ti = TaskInclude()
+ ti = Task()
+ ti.deserialize(ti_data)
+ self._task_include = ti
+ del data['task_include']
+
+ super(Task, self).deserialize(data)
+
+ def evaluate_conditional(self, templar, all_vars):
+ if self._block is not None:
+ if not self._block.evaluate_conditional(templar, all_vars):
+ return False
+ if self._task_include is not None:
+ if not self._task_include.evaluate_conditional(templar, all_vars):
+ return False
+ return super(Task, self).evaluate_conditional(templar, all_vars)
+
+ def set_loader(self, loader):
+ '''
+ Sets the loader on this object and recursively on parent, child objects.
+ This is used primarily after the Task has been serialized/deserialized, which
+ does not preserve the loader.
+ '''
+
+ self._loader = loader
+
+ if self._block:
+ self._block.set_loader(loader)
+ if self._task_include:
+ self._task_include.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, extend=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a task value.
+ '''
+ value = self._attributes[attr]
+ if self._block and (value is None or extend):
+ parent_value = getattr(self._block, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
else:
- self.become=True
- del ds['su']
- if 'su_user' in ds:
- self.become_user = ds['su_user']
- del ds['su_user']
- if 'su_pass' in ds:
- self.become_pass = ds['su_pass']
- del ds['su_pass']
-
- # Both are defined
- if ('action' in ds) and ('local_action' in ds):
- raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
- # Both are NOT defined
- elif (not 'action' in ds) and (not 'local_action' in ds):
- raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', ''))
- # Only one of them is defined
- elif 'local_action' in ds:
- self.action = ds.get('local_action', '')
- self.delegate_to = '127.0.0.1'
- else:
- self.action = ds.get('action', '')
- self.delegate_to = ds.get('delegate_to', None)
- self.transport = ds.get('connection', ds.get('transport', play.transport))
-
- if isinstance(self.action, dict):
- if 'module' not in self.action:
- raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
- if self.args:
- raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
- self.args = self.action
- self.action = self.args.pop('module')
-
- # delegate_to can use variables
- if not (self.delegate_to is None):
- # delegate_to: localhost should use local transport
- if self.delegate_to in ['127.0.0.1', 'localhost']:
- self.transport = 'local'
-
- # notified by is used by Playbook code to flag which hosts
- # need to run a notifier
- self.notified_by = []
-
- # if no name is specified, use the action line as the name
- if self.name is None:
- self.name = self.action
-
- # load various attributes
- self.when = ds.get('when', None)
- self.changed_when = ds.get('changed_when', None)
- self.failed_when = ds.get('failed_when', None)
-
- # combine the default and module vars here for use in templating
- all_vars = self.default_vars.copy()
- all_vars = utils.combine_vars(all_vars, self.play_vars)
- all_vars = utils.combine_vars(all_vars, self.play_file_vars)
- all_vars = utils.combine_vars(all_vars, self.role_vars)
- all_vars = utils.combine_vars(all_vars, self.module_vars)
- all_vars = utils.combine_vars(all_vars, self.role_params)
-
- self.async_seconds = ds.get('async', 0) # not async by default
- self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
- self.async_seconds = int(self.async_seconds)
- self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
- self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
- self.async_poll_interval = int(self.async_poll_interval)
- self.notify = ds.get('notify', [])
- self.first_available_file = ds.get('first_available_file', None)
-
- self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
- self.items_lookup_terms = ds.get('items_lookup_terms', None)
-
-
- self.ignore_errors = ds.get('ignore_errors', False)
- self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
-
- self.always_run = ds.get('always_run', False)
-
- # action should be a string
- if not isinstance(self.action, basestring):
- raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
-
- # notify can be a string or a list, store as a list
- if isinstance(self.notify, basestring):
- self.notify = [ self.notify ]
-
- # split the action line into a module name + arguments
- try:
- tokens = split_args(self.action)
- except Exception, e:
- if "unbalanced" in str(e):
- raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
- "Make sure quotes are matched or escaped properly")
+ value = parent_value
+ if self._task_include and (value is None or extend):
+ parent_value = getattr(self._task_include, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
else:
- raise
- if len(tokens) < 1:
- raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
- self.module_name = tokens[0]
- self.module_args = ''
- if len(tokens) > 1:
- self.module_args = " ".join(tokens[1:])
-
- import_tags = self.module_vars.get('tags',[])
- if type(import_tags) in [int,float]:
- import_tags = str(import_tags)
- elif type(import_tags) in [str,unicode]:
- # allow the user to list comma delimited tags
- import_tags = import_tags.split(",")
-
- # handle mutually incompatible options
- incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
- if len(incompatibles) > 1:
- raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
-
- # make first_available_file accessible to Runner code
- if self.first_available_file:
- self.module_vars['first_available_file'] = self.first_available_file
- # make sure that the 'item' variable is set when using
- # first_available_file (issue #8220)
- if 'item' not in self.module_vars:
- self.module_vars['item'] = ''
-
- if self.items_lookup_plugin is not None:
- self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
- self.module_vars['items_lookup_terms'] = self.items_lookup_terms
-
- # allow runner to see delegate_to option
- self.module_vars['delegate_to'] = self.delegate_to
-
- # make some task attributes accessible to Runner code
- self.module_vars['ignore_errors'] = self.ignore_errors
- self.module_vars['register'] = self.register
- self.module_vars['changed_when'] = self.changed_when
- self.module_vars['failed_when'] = self.failed_when
- self.module_vars['always_run'] = self.always_run
-
- # tags allow certain parts of a playbook to be run without running the whole playbook
- apply_tags = ds.get('tags', None)
- if apply_tags is not None:
- if type(apply_tags) in [ str, unicode ]:
- self.tags.append(apply_tags)
- elif type(apply_tags) in [ int, float ]:
- self.tags.append(str(apply_tags))
- elif type(apply_tags) == list:
- self.tags.extend(apply_tags)
- self.tags.extend(import_tags)
-
- if len(self.tags) > 1:
- self.tags.remove('untagged')
-
- if additional_conditions:
- new_conditions = additional_conditions[:]
- if self.when:
- new_conditions.append(self.when)
- self.when = new_conditions
+ value = parent_value
+ return value
+
+ def _get_attr_environment(self):
+ '''
+ Override for the 'tags' getattr fetcher, used from Base.
+ '''
+ environment = self._attributes['tags']
+ if environment is None:
+ environment = dict()
+
+ environment = self._get_parent_attribute('environment', extend=True)
+
+ return environment
+
diff --git a/v2/ansible/playbook/vars.py b/lib/ansible/playbook/vars.py
similarity index 100%
rename from v2/ansible/playbook/vars.py
rename to lib/ansible/playbook/vars.py
diff --git a/v2/ansible/playbook/vars_file.py b/lib/ansible/playbook/vars_file.py
similarity index 100%
rename from v2/ansible/playbook/vars_file.py
rename to lib/ansible/playbook/vars_file.py
diff --git a/v2/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py
similarity index 75%
rename from v2/ansible/plugins/__init__.py
rename to lib/ansible/plugins/__init__.py
index 5791677bd26..4054c616335 100644
--- a/v2/ansible/plugins/__init__.py
+++ b/lib/ansible/plugins/__init__.py
@@ -28,17 +28,28 @@ import os.path
import sys
from ansible import constants as C
-from ansible.utils.display import Display
+from ansible.utils.unicode import to_unicode
from ansible import errors
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
MODULE_CACHE = {}
PATH_CACHE = {}
PLUGIN_PATH_CACHE = {}
_basedirs = []
+# FIXME: the _basedirs code may be dead, and no longer needed, as
+# we now use add_directory for all plugin types here instead
+# of relying on this global variable (which also causes problems
+# with forked processes). See the Playbook() and Role() classes
+# for how we now ue get_all_plugin_loaders() below.
def push_basedir(basedir):
# avoid pushing the same absolute dir more than once
- basedir = os.path.realpath(basedir)
+ basedir = to_unicode(os.path.realpath(basedir))
if basedir not in _basedirs:
_basedirs.insert(0, basedir)
@@ -55,9 +66,10 @@ class PluginLoader:
The first match is used.
'''
- def __init__(self, class_name, package, config, subdir, aliases={}):
+ def __init__(self, class_name, package, config, subdir, aliases={}, required_base_class=None):
self.class_name = class_name
+ self.base_class = required_base_class
self.package = package
self.config = config
self.subdir = subdir
@@ -77,6 +89,43 @@ class PluginLoader:
self._extra_dirs = []
self._searched_paths = set()
+ def __setstate__(self, data):
+ '''
+ Deserializer.
+ '''
+
+ class_name = data.get('class_name')
+ package = data.get('package')
+ config = data.get('config')
+ subdir = data.get('subdir')
+ aliases = data.get('aliases')
+ base_class = data.get('base_class')
+
+ PATH_CACHE[class_name] = data.get('PATH_CACHE')
+ PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
+
+ self.__init__(class_name, package, config, subdir, aliases, base_class)
+ self._extra_dirs = data.get('_extra_dirs', [])
+ self._searched_paths = data.get('_searched_paths', set())
+
+ def __getstate__(self):
+ '''
+ Serializer.
+ '''
+
+ return dict(
+ class_name = self.class_name,
+ base_class = self.base_class,
+ package = self.package,
+ config = self.config,
+ subdir = self.subdir,
+ aliases = self.aliases,
+ _extra_dirs = self._extra_dirs,
+ _searched_paths = self._searched_paths,
+ PATH_CACHE = PATH_CACHE[self.class_name],
+ PLUGIN_PATH_CACHE = PLUGIN_PATH_CACHE[self.class_name],
+ )
+
def print_paths(self):
''' Returns a string suitable for printing of the search path '''
@@ -186,8 +235,7 @@ class PluginLoader:
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
except OSError as e:
- d = Display()
- d.warning("Error accessing plugin paths: %s" % str(e))
+ display.warning("Error accessing plugin paths: %s" % str(e))
for full_path in (f for f in full_paths if os.path.isfile(f)):
for suffix in suffixes:
if full_path.endswith(suffix):
@@ -209,6 +257,12 @@ class PluginLoader:
for alias_name in ('_%s' % n for n in potential_names):
# We've already cached all the paths at this point
if alias_name in self._plugin_path_cache:
+ if not os.path.islink(self._plugin_path_cache[alias_name]):
+ display.deprecated('%s is kept for backwards compatibility '
+ 'but usage is discouraged. The module '
+ 'documentation details page may explain '
+ 'more about this rationale.' %
+ name.lstrip('_'))
return self._plugin_path_cache[alias_name]
return None
@@ -228,12 +282,18 @@ class PluginLoader:
path = self.find_plugin(name)
if path is None:
return None
- elif kwargs.get('class_only', False):
- return getattr(self._module_cache[path], self.class_name)
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
- return getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
+
+ if kwargs.get('class_only', False):
+ obj = getattr(self._module_cache[path], self.class_name)
+ else:
+ obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
+ if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
+ return None
+
+ return obj
def all(self, *args, **kwargs):
''' instantiates all plugins with the same arguments '''
@@ -245,12 +305,18 @@ class PluginLoader:
name, ext = os.path.splitext(os.path.basename(path))
if name.startswith("_"):
continue
+
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
+
if kwargs.get('class_only', False):
obj = getattr(self._module_cache[path], self.class_name)
else:
obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
+
+ if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
+ continue
+
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
yield obj
@@ -259,21 +325,22 @@ action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
- 'action_plugins'
+ 'action_plugins',
+ required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
- 'cache_plugins'
+ 'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
- 'callback_plugins'
+ 'callback_plugins',
)
connection_loader = PluginLoader(
@@ -281,7 +348,8 @@ connection_loader = PluginLoader(
'ansible.plugins.connections',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
- aliases={'paramiko': 'paramiko_ssh'}
+ aliases={'paramiko': 'paramiko_ssh'},
+ required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
@@ -295,28 +363,36 @@ module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
- 'library'
+ 'library',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
- 'lookup_plugins'
+ 'lookup_plugins',
+ required_base_class='LookupBase',
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
- 'vars_plugins'
+ 'vars_plugins',
)
filter_loader = PluginLoader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
- 'filter_plugins'
+ 'filter_plugins',
+)
+
+test_loader = PluginLoader(
+ 'TestModule',
+ 'ansible.plugins.test',
+ C.DEFAULT_TEST_PLUGIN_PATH,
+ 'test_plugins'
)
fragment_loader = PluginLoader(
@@ -331,4 +407,5 @@ strategy_loader = PluginLoader(
'ansible.plugins.strategies',
None,
'strategy_plugins',
+ required_base_class='StrategyBase',
)
diff --git a/v2/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
similarity index 57%
rename from v2/ansible/plugins/action/__init__.py
rename to lib/ansible/plugins/action/__init__.py
index 62036cc7068..b648f0edae2 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/lib/ansible/plugins/action/__init__.py
@@ -20,10 +20,12 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import StringIO
+import base64
import json
import os
import random
-import sys # FIXME: probably not needed
+import stat
+import sys
import tempfile
import time
@@ -31,9 +33,13 @@ from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.module_common import modify_module
from ansible.parsing.utils.jsonify import jsonify
-from ansible.plugins import shell_loader
+from ansible.utils.unicode import to_bytes
-from ansible.utils.debug import debug
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
class ActionBase:
@@ -44,28 +50,18 @@ class ActionBase:
action in use.
'''
- def __init__(self, task, connection, connection_info, loader, shared_loader_obj):
+ def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
- self._connection_info = connection_info
+ self._play_context = play_context
self._loader = loader
+ self._templar = templar
self._shared_loader_obj = shared_loader_obj
- self._shell = self.get_shell()
+ self._display = display
self._supports_check_mode = True
- def get_shell(self):
-
- if hasattr(self._connection, '_shell'):
- shell_plugin = getattr(self._connection, '_shell', '')
- else:
- shell_plugin = shell_loader.get(os.path.basename(C.DEFAULT_EXECUTABLE))
- if shell_plugin is None:
- shell_plugin = shell_loader.get('sh')
-
- return shell_plugin
-
- def _configure_module(self, module_name, module_args):
+ def _configure_module(self, module_name, module_args, task_vars=dict()):
'''
Handles the loading and templating of the module code through the
modify_module() function.
@@ -73,9 +69,29 @@ class ActionBase:
# Search module path(s) for named module.
module_suffixes = getattr(self._connection, 'default_suffixes', None)
+
+ # Check to determine if PowerShell modules are supported, and apply
+ # some fixes (hacks) to module name + args.
+ if module_suffixes and '.ps1' in module_suffixes:
+ # Use Windows versions of stat/file/copy modules when called from
+ # within other action plugins.
+ if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
+ module_name = 'win_%s' % module_name
+ # Remove extra quotes surrounding path parameters before sending to module.
+ if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
+ for key in ('src', 'dest', 'path'):
+ if key in module_args:
+ module_args[key] = self._connection._shell._unquote(module_args[key])
+
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, module_suffixes)
if module_path is None:
- module_path2 = self._shared_loader_obj.module_loader.find_plugin('ping', module_suffixes)
+ # Use Windows version of ping module to check module paths when
+ # using a connection that supports .ps1 suffixes.
+ if module_suffixes and '.ps1' in module_suffixes:
+ ping_module = 'win_ping'
+ else:
+ ping_module = 'ping'
+ module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, module_suffixes)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
@@ -84,7 +100,7 @@ class ActionBase:
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
- (module_data, module_style, module_shebang) = modify_module(module_path, module_args)
+ (module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars)
return (module_style, module_shebang, module_data)
@@ -93,16 +109,20 @@ class ActionBase:
Builds the environment string to be used when executing the remote task.
'''
- enviro = {}
+ final_environment = dict()
+ if self._task.environment is not None:
+ environments = self._task.environment
+ if not isinstance(environments, list):
+ environments = [ environments ]
- # FIXME: not sure where this comes from, probably task but maybe also the play?
- #if self.environment:
- # enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
- # enviro = utils.safe_eval(enviro)
- # if type(enviro) != dict:
- # raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
+ for environment in environments:
+ if not isinstance(environment, dict):
+ raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment)))
+ # very deliberatly using update here instead of combine_vars, as
+ # these environment settings should not need to merge sub-dicts
+ final_environment.update(environment)
- return self._shell.env_prefix(**enviro)
+ return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
@@ -120,10 +140,7 @@ class ActionBase:
if tmp and "tmp" in tmp:
# tmp has already been created
return False
- if not self._connection.__class__.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become:
- # tmp is necessary to store module source code
- return True
- if not self._connection.__class__.has_pipelining:
+ if not self._connection.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become:
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
@@ -142,29 +159,29 @@ class ActionBase:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
- if self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._play_context.become and self._play_context.become_user != 'root':
use_system_tmp = True
tmp_mode = None
- if self._connection_info.remote_user != 'root' or self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root':
tmp_mode = 'a+rx'
- cmd = self._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
- debug("executing _low_level_execute_command to create the tmp path")
+ cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
+ self._display.debug("executing _low_level_execute_command to create the tmp path")
result = self._low_level_execute_command(cmd, None, sudoable=False)
- debug("done with creation of tmp path")
+ self._display.debug("done with creation of tmp path")
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
- # FIXME: more utils.VERBOSITY
- #if utils.VERBOSITY > 3:
- # output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
- #else:
- # output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
- output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
+
+ if self._play_context.verbosity > 3:
+ output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
+ else:
+ output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
+
elif 'No space left on device' in result['stderr']:
output = result['stderr']
else:
@@ -174,8 +191,8 @@ class ActionBase:
raise AnsibleError(output)
# FIXME: do we still need to do this?
- #rc = self._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
- rc = self._shell.join_path(result['stdout'].strip(), '').splitlines()[-1]
+ #rc = self._connection._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
+ rc = self._connection._shell.join_path(result['stdout'].strip(), '').splitlines()[-1]
# Catch failure conditions, files should never be
# written to locations in /.
@@ -188,30 +205,25 @@ class ActionBase:
'''Remove a temporary path we created. '''
if tmp_path and "-tmp-" in tmp_path:
- cmd = self._shell.remove(tmp_path, recurse=True)
+ cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
- debug("calling _low_level_execute_command to remove the tmp path")
+ self._display.debug("calling _low_level_execute_command to remove the tmp path")
self._low_level_execute_command(cmd, None, sudoable=False)
- debug("done removing the tmp path")
+ self._display.debug("done removing the tmp path")
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
- if type(data) == dict:
+ if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
- # FIXME: is this still necessary?
- #if not isinstance(data, unicode):
- # #ensure the data is valid UTF-8
- # data = data.decode('utf-8')
- #else:
- # data = data.encode('utf-8')
+ data = to_bytes(data, errors='strict')
afo.write(data)
except Exception as e:
#raise AnsibleError("failure encoding into utf-8: %s" % str(e))
@@ -232,10 +244,10 @@ class ActionBase:
Issue a remote chmod command
'''
- cmd = self._shell.chmod(mode, path)
- debug("calling _low_level_execute_command to chmod the remote path")
+ cmd = self._connection._shell.chmod(mode, path)
+ self._display.debug("calling _low_level_execute_command to chmod the remote path")
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable)
- debug("done with chmod call")
+ self._display.debug("done with chmod call")
return res
def _remote_checksum(self, tmp, path):
@@ -243,14 +255,13 @@ class ActionBase:
Takes a remote checksum and returns 1 if no file
'''
- # FIXME: figure out how this will work, probably pulled from the
- # variable manager data
+ # FIXME: figure out how this will work, probably pulled from the variable manager data
#python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python')
python_interp = 'python'
- cmd = self._shell.checksum(path, python_interp)
- debug("calling _low_level_execute_command to get the remote checksum")
+ cmd = self._connection._shell.checksum(path, python_interp)
+ self._display.debug("calling _low_level_execute_command to get the remote checksum")
data = self._low_level_execute_command(cmd, tmp, sudoable=True)
- debug("done getting the remote checksum")
+ self._display.debug("done getting the remote checksum")
# FIXME: implement this function?
#data2 = utils.last_non_blank_line(data['stdout'])
try:
@@ -274,19 +285,20 @@ class ActionBase:
def _remote_expand_user(self, path, tmp):
''' takes a remote path and performs tilde expansion on the remote host '''
- if not path.startswith('~'):
+ if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~
return path
+ # FIXME: Can't use os.path.sep for Windows paths.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
- if self._connection_info.become and self._connection_info.become_user:
- expand_path = '~%s' % self._connection_info.become_user
+ if self._play_context.become and self._play_context.become_user:
+ expand_path = '~%s' % self._play_context.become_user
- cmd = self._shell.expand_user(expand_path)
- debug("calling _low_level_execute_command to expand the remote user path")
+ cmd = self._connection._shell.expand_user(expand_path)
+ self._display.debug("calling _low_level_execute_command to expand the remote user path")
data = self._low_level_execute_command(cmd, tmp, sudoable=False)
- debug("done expanding the remote user path")
+ self._display.debug("done expanding the remote user path")
#initial_fragment = utils.last_non_blank_line(data['stdout'])
initial_fragment = data['stdout'].strip().splitlines()[-1]
@@ -296,7 +308,7 @@ class ActionBase:
return path
if len(split_path) > 1:
- return self._shell.join_path(initial_fragment, *split_path[1:])
+ return self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
@@ -317,7 +329,7 @@ class ActionBase:
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
- def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_files=False, delete_remote_tmp=True):
+ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=dict(), persist_files=False, delete_remote_tmp=True):
'''
Transfer and run a module along with its arguments.
'''
@@ -330,18 +342,18 @@ class ActionBase:
module_args = self._task.args
# set check mode in the module arguments, if required
- if self._connection_info.check_mode and not self._task.always_run:
+ if self._play_context.check_mode and not self._task.always_run:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
# set no log in the module arguments, if required
- if self._connection_info.no_log:
+ if self._play_context.no_log:
module_args['_ansible_no_log'] = True
- debug("in _execute_module (%s, %s)" % (module_name, module_args))
+ self._display.debug("in _execute_module (%s, %s)" % (module_name, module_args))
- (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args)
+ (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
raise AnsibleError("module is missing interpreter line")
@@ -349,18 +361,20 @@ class ActionBase:
remote_module_path = None
if not tmp and self._late_needs_tmp_path(tmp, module_style):
tmp = self._make_tmp_path()
- remote_module_path = self._shell.join_path(tmp, module_name)
+
+ if tmp:
+ remote_module_path = self._connection._shell.join_path(tmp, module_name)
# FIXME: async stuff here?
#if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES):
if remote_module_path:
- debug("transferring module to remote")
+ self._display.debug("transferring module to remote")
self._transfer_data(remote_module_path, module_data)
- debug("done transferring module to remote")
+ self._display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
- if tmp and "tmp" in tmp and self._connection_info.become and self._connection_info.become_user != 'root':
+ if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
# deal with possible umask issues once sudo'ed to other user
self._remote_chmod(tmp, 'a+r', remote_module_path)
@@ -370,7 +384,7 @@ class ActionBase:
# FIXME: all of the old-module style and async stuff has been removed from here, and
# might need to be re-added (unless we decide to drop support for old-style modules
# at this point and rework things to support non-python modules specifically)
- if self._connection.__class__.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
+ if self._connection.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
in_data = module_data
else:
if remote_module_path:
@@ -378,11 +392,11 @@ class ActionBase:
rm_tmp = None
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if not self._connection_info.become or self._connection_info.become_user == 'root':
+ if not self._play_context.become or self._play_context.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
- cmd = self._shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
+ cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
cmd = cmd.strip()
sudoable = True
@@ -391,15 +405,15 @@ class ActionBase:
# specified in the play, not the sudo_user
sudoable = False
- debug("calling _low_level_execute_command() for command %s" % cmd)
+ self._display.debug("calling _low_level_execute_command() for command %s" % cmd)
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable, in_data=in_data)
- debug("_low_level_execute_command returned ok")
+ self._display.debug("_low_level_execute_command returned ok")
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._play_context.become and self._play_context.become_user != 'root':
# not sudoing to root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
- cmd2 = self._shell.remove(tmp, recurse=True)
+ cmd2 = self._connection._shell.remove(tmp, recurse=True)
self._low_level_execute_command(cmd2, tmp, sudoable=False)
try:
@@ -408,7 +422,7 @@ class ActionBase:
# not valid json, lets try to capture error
data = dict(failed=True, parsed=False)
if 'stderr' in res and res['stderr'].startswith('Traceback'):
- data['traceback'] = res['stderr']
+ data['exception'] = res['stderr']
else:
data['msg'] = res.get('stdout', '')
if 'stderr' in res:
@@ -420,39 +434,37 @@ class ActionBase:
data['stdout_lines'] = data.get('stdout', '').splitlines()
# store the module invocation details back into the result
- data['invocation'] = dict(
- module_args = module_args,
- module_name = module_name,
- )
+ if self._task.async != 0:
+ data['invocation'] = dict(
+ module_args = module_args,
+ module_name = module_name,
+ )
- debug("done with _execute_module (%s, %s)" % (module_name, module_args))
+ self._display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
- def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, in_data=None):
+ def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None, executable=None):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
'''
- debug("in _low_level_execute_command() (%s)" % (cmd,))
+ if executable is not None:
+ cmd = executable + ' -c ' + cmd
+
+ self._display.debug("in _low_level_execute_command() (%s)" % (cmd,))
if not cmd:
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
- debug("no command, exiting _low_level_execute_command()")
+ self._display.debug("no command, exiting _low_level_execute_command()")
return dict(stdout='', stderr='')
- if executable is None:
- executable = C.DEFAULT_EXECUTABLE
-
- prompt = None
- success_key = None
-
if sudoable:
- cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd, executable)
+ cmd = self._play_context.make_become_cmd(cmd, executable=executable)
- debug("executing the command %s through the connection" % cmd)
- rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data)
- debug("command execution done")
+ self._display.debug("executing the command %s through the connection" % cmd)
+ rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, in_data=in_data, sudoable=sudoable)
+ self._display.debug("command execution done")
if not isinstance(stdout, basestring):
out = ''.join(stdout.readlines())
@@ -464,8 +476,80 @@ class ActionBase:
else:
err = stderr
- debug("done with _low_level_execute_command() (%s)" % (cmd,))
- if rc is not None:
- return dict(rc=rc, stdout=out, stderr=err)
- else:
- return dict(stdout=out, stderr=err)
+ self._display.debug("done with _low_level_execute_command() (%s)" % (cmd,))
+ if rc is None:
+ rc = 0
+
+ return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
+
+ def _get_first_available_file(self, faf, of=None, searchdir='files'):
+
+ self._display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
+ for fn in faf:
+ fn_orig = fn
+ fnt = self._templar.template(fn)
+ if self._task._role is not None:
+ lead = self._task._role._role_path
+ else:
+ lead = fnt
+ fnd = self._loader.path_dwim_relative(lead, searchdir, fnt)
+
+ if not os.path.exists(fnd) and of is not None:
+ if self._task._role is not None:
+ lead = self._task._role._role_path
+ else:
+ lead = of
+ fnd = self._loader.path_dwim_relative(lead, searchdir, of)
+
+ if os.path.exists(fnd):
+ return fnd
+
+ return None
+
+ def _get_diff_data(self, tmp, destination, source, task_vars, source_file=True):
+
+ diff = {}
+ self._display.debug("Going to peek to see if file has changed permissions")
+ peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)
+
+ if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0:
+
+ if peek_result['state'] == 'absent':
+ diff['before'] = ''
+ elif peek_result['appears_binary']:
+ diff['dst_binary'] = 1
+ elif peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
+ diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ self._display.debug("Slurping the file %s" % source)
+ dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
+ if 'content' in dest_result:
+ dest_contents = dest_result['content']
+ if dest_result['encoding'] == 'base64':
+ dest_contents = base64.b64decode(dest_contents)
+ else:
+ raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
+ diff['before_header'] = destination
+ diff['before'] = dest_contents
+
+ if source_file:
+ self._display.debug("Reading local copy of the file %s" % source)
+ try:
+ src = open(source)
+ src_contents = src.read(8192)
+ st = os.stat(source)
+ except Exception as e:
+ raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
+ if "\x00" in src_contents:
+ diff['src_binary'] = 1
+ elif st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
+ diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ diff['after_header'] = source
+ diff['after'] = src.read()
+ else:
+ self._display.debug("source of file passed in")
+ diff['after_header'] = 'dynamically generated'
+ diff['after'] = source
+
+ return diff
diff --git a/v2/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
similarity index 91%
rename from v2/ansible/plugins/action/add_host.py
rename to lib/ansible/plugins/action/add_host.py
index e28361b7145..12c9febe95d 100644
--- a/v2/ansible/plugins/action/add_host.py
+++ b/lib/ansible/plugins/action/add_host.py
@@ -31,9 +31,8 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
- # FIXME: is this necessary in v2?
- #if self.runner.noop_on_check(inject):
- # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
+ if self._play_context.check_mode:
+ return dict(skipped=True, msg='check mode not supported for this module')
# Parse out any hostname:port patterns
new_name = self._task.args.get('name', self._task.args.get('hostname', None))
diff --git a/v2/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py
similarity index 86%
rename from v2/ansible/plugins/action/assemble.py
rename to lib/ansible/plugins/action/assemble.py
index 4e796bddb6f..454e28aa34e 100644
--- a/v2/ansible/plugins/action/assemble.py
+++ b/lib/ansible/plugins/action/assemble.py
@@ -34,7 +34,7 @@ class ActionModule(ActionBase):
TRANSFERS_FILES = True
- def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
+ def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
@@ -46,7 +46,7 @@ class ActionModule(ActionBase):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = "%s/%s" % (src_path, f)
- if not os.path.isfile(fragment):
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = file(fragment).read()
@@ -77,17 +77,22 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
+ if self._play_context.check_mode:
+ return dict(skipped=True, msg=("skipped, this module does not support check_mode."))
+
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
+ ignore_hidden = self._task.args.get('ignore_hidden', False)
+
if src is None or dest is None:
return dict(failed=True, msg="src and dest are required")
if boolean(remote_src):
- return self._execute_module(tmp=tmp)
+ return self._execute_module(tmp=tmp, task_vars=task_vars)
elif self._task._role is not None:
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
else:
@@ -99,7 +104,7 @@ class ActionModule(ActionBase):
_re = re.compile(regexp)
# Does all work assembling the file
- path = self._assemble_from_fragments(src, delimiter, _re)
+ path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest, tmp)
@@ -109,7 +114,7 @@ class ActionModule(ActionBase):
resultant = file(path).read()
# FIXME: diff needs to be moved somewhere else
#if self.runner.diff:
- # dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), tmp=tmp, persist_files=True)
+ # dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), task_vars=task_vars, tmp=tmp, persist_files=True)
# if 'content' in dest_result:
# dest_contents = dest_result['content']
# if dest_result['encoding'] == 'base64':
@@ -119,11 +124,11 @@ class ActionModule(ActionBase):
xfered = self._transfer_data('src', resultant)
# fix file permissions when the copy is done as a different user
- if self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._play_context.become and self._play_context.become_user != 'root':
self._remote_chmod('a+r', xfered, tmp)
# run the copy module
-
+
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
@@ -133,14 +138,8 @@ class ActionModule(ActionBase):
)
)
- # FIXME: checkmode stuff
- #if self.runner.noop_on_check(inject):
- # return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
- #else:
- # res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
- # res.diff = dict(after=resultant)
- # return res
- res = self._execute_module(module_name='copy', module_args=new_module_args, tmp=tmp)
+ res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
+ # FIXME: diff stuff
#res.diff = dict(after=resultant)
return res
else:
@@ -153,4 +152,4 @@ class ActionModule(ActionBase):
)
)
- return self._execute_module(module_name='file', module_args=new_module_args, tmp=tmp)
+ return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
diff --git a/v2/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py
similarity index 95%
rename from v2/ansible/plugins/action/assert.py
rename to lib/ansible/plugins/action/assert.py
index 5c4fdd7b89c..d39484f3663 100644
--- a/v2/ansible/plugins/action/assert.py
+++ b/lib/ansible/plugins/action/assert.py
@@ -48,7 +48,7 @@ class ActionModule(ActionBase):
cond = Conditional(loader=self._loader)
for that in thats:
cond.when = [ that ]
- test_result = cond.evaluate_conditional(all_vars=task_vars)
+ test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result = dict(
failed = True,
diff --git a/v2/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py
similarity index 80%
rename from v2/ansible/plugins/action/async.py
rename to lib/ansible/plugins/action/async.py
index 7c02e09757e..b2fcd8756dd 100644
--- a/v2/ansible/plugins/action/async.py
+++ b/lib/ansible/plugins/action/async.py
@@ -28,30 +28,29 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' transfer the given module name, plus the async module, then run it '''
- # FIXME: noop stuff needs to be sorted ut
- #if self.runner.noop_on_check(inject):
- # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
+ if self._play_context.check_mode:
+ return dict(skipped=True, msg='check mode not supported for this module')
if not tmp:
tmp = self._make_tmp_path()
module_name = self._task.action
- async_module_path = self._shell.join_path(tmp, 'async_wrapper')
- remote_module_path = self._shell.join_path(tmp, module_name)
+ async_module_path = self._connection._shell.join_path(tmp, 'async_wrapper')
+ remote_module_path = self._connection._shell.join_path(tmp, module_name)
env_string = self._compute_environment_string()
# configure, upload, and chmod the target module
- (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=self._task.args)
+ (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=self._task.args, task_vars=task_vars)
self._transfer_data(remote_module_path, module_data)
self._remote_chmod(tmp, 'a+rx', remote_module_path)
# configure, upload, and chmod the async_wrapper module
- (async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict())
+ (async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars)
self._transfer_data(async_module_path, async_module_data)
self._remote_chmod(tmp, 'a+rx', async_module_path)
- argsfile = self._transfer_data(self._shell.join_path(tmp, 'arguments'), json.dumps(self._task.args))
+ argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), json.dumps(self._task.args))
async_limit = self._task.async
async_jid = str(random.randint(0, 999999999999))
diff --git a/v2/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py
similarity index 67%
rename from v2/ansible/plugins/action/copy.py
rename to lib/ansible/plugins/action/copy.py
index 6db130ad7f3..34a426f5e27 100644
--- a/v2/ansible/plugins/action/copy.py
+++ b/lib/ansible/plugins/action/copy.py
@@ -19,11 +19,9 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import base64
import json
import os
import pipes
-import stat
import tempfile
from ansible import constants as C
@@ -43,19 +41,19 @@ class ActionModule(ActionBase):
dest = self._task.args.get('dest', None)
raw = boolean(self._task.args.get('raw', 'no'))
force = boolean(self._task.args.get('force', 'yes'))
+ faf = self._task.first_available_file
- # FIXME: first available file needs to be reworked somehow...
- #if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
- # result=dict(failed=True, msg="src (or content) and dest are required")
- # return ReturnData(conn=conn, result=result)
- #elif (source is not None or 'first_available_file' in inject) and content is not None:
- # result=dict(failed=True, msg="src and content are mutually exclusive")
- # return ReturnData(conn=conn, result=result)
+ if (source is None and content is None and faf is None) or dest is None:
+ return dict(failed=True, msg="src (or content) and dest are required")
+ elif (source is not None or faf is not None) and content is not None:
+ return dict(failed=True, msg="src and content are mutually exclusive")
+ elif content is not None and dest is not None and dest.endswith("/"):
+ return dict(failed=True, msg="dest must be a file if content is defined")
# Check if the source ends with a "/"
source_trailing_slash = False
if source:
- source_trailing_slash = source.endswith(os.sep)
+ source_trailing_slash = self._connection._shell.path_has_trailing_slash(source)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
@@ -65,7 +63,7 @@ class ActionModule(ActionBase):
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
- if isinstance(content, dict):
+ if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
@@ -73,27 +71,12 @@ class ActionModule(ActionBase):
except Exception as err:
return dict(failed=True, msg="could not write content temp file: %s" % err)
- ###############################################################################################
- # FIXME: first_available_file needs to be reworked?
- ###############################################################################################
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
- #elif 'first_available_file' in inject:
- # found = False
- # for fn in inject.get('first_available_file'):
- # fn_orig = fn
- # fnt = template.template(self.runner.basedir, fn, inject)
- # fnd = utils.path_dwim(self.runner.basedir, fnt)
- # if not os.path.exists(fnd) and '_original_file' in inject:
- # fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
- # if os.path.exists(fnd):
- # source = fnd
- # found = True
- # break
- # if not found:
- # results = dict(failed=True, msg="could not find src in first_available_file list")
- # return ReturnData(conn=conn, result=results)
- ###############################################################################################
+ elif faf:
+ source = self._get_first_available_file(faf, task_vars.get('_original_file', None))
+ if source is None:
+ return dict(failed=True, msg="could not find src in first_available_file list")
else:
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
@@ -107,7 +90,7 @@ class ActionModule(ActionBase):
if os.path.isdir(source):
# Get the amount of spaces to remove to get the relative path.
if source_trailing_slash:
- sz = len(source)
+ sz = len(source) + 1
else:
sz = len(source.rsplit('/', 1)[0]) + 1
@@ -120,8 +103,8 @@ class ActionModule(ActionBase):
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
- if not self._shell.path_has_trailing_slash(dest):
- dest = self._shell.join_path(dest, '')
+ if not self._connection._shell.path_has_trailing_slash(dest):
+ dest = self._connection._shell.join_path(dest, '')
else:
source_files.append((source, os.path.basename(source)))
@@ -156,10 +139,10 @@ class ActionModule(ActionBase):
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
- if self._shell.path_has_trailing_slash(dest):
- dest_file = self._shell.join_path(dest, source_rel)
+ if self._connection._shell.path_has_trailing_slash(dest):
+ dest_file = self._connection._shell.join_path(dest, source_rel)
else:
- dest_file = self._shell.join_path(dest)
+ dest_file = self._connection._shell.join_path(dest)
# Attempt to get the remote checksum
remote_checksum = self._remote_checksum(tmp, dest_file)
@@ -172,7 +155,7 @@ class ActionModule(ActionBase):
return dict(failed=True, msg="can not use content with a dir as dest")
else:
# Append the relative source location to the destination and retry remote_checksum
- dest_file = self._shell.join_path(dest, source_rel)
+ dest_file = self._connection._shell.join_path(dest, source_rel)
remote_checksum = self._remote_checksum(tmp, dest_file)
if remote_checksum != '1' and not force:
@@ -189,23 +172,17 @@ class ActionModule(ActionBase):
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
- # FIXME: runner shouldn't have the diff option there
- #if self.runner.diff and not raw:
- # diff = self._get_diff_data(tmp, dest_file, source_full)
- #else:
- # diff = {}
- diff = {}
-
- # FIXME: noop stuff
- #if self.runner.noop_on_check(inject):
- # self._remove_tempfile_if_content_defined(content, content_tempfile)
- # diffs.append(diff)
- # changed = True
- # module_result = dict(changed=True)
- # continue
+ if self._play_context.diff and not raw:
+ diffs.append(self._get_diff_data(tmp, dest_file, source_full, task_vars))
+
+ if self._play_context.check_mode:
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ changed = True
+ module_return = dict(changed=True)
+ continue
# Define a remote directory that we will copy the file to.
- tmp_src = tmp + 'source'
+ tmp_src = self._connection._shell.join_path(tmp, 'source')
if not raw:
self._connection.put_file(source_full, tmp_src)
@@ -216,7 +193,7 @@ class ActionModule(ActionBase):
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
- if self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._play_context.become and self._play_context.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:
@@ -236,7 +213,7 @@ class ActionModule(ActionBase):
)
)
- module_return = self._execute_module(module_name='copy', module_args=new_module_args, delete_remote_tmp=delete_remote_tmp)
+ module_return = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
else:
@@ -246,7 +223,7 @@ class ActionModule(ActionBase):
if raw:
# Continue to next iteration if raw is defined.
- # self._remove_tmp_path(tmp)
+ self._remove_tmp_path(tmp)
continue
# Build temporary module_args.
@@ -260,7 +237,7 @@ class ActionModule(ActionBase):
)
# Execute the file module.
- module_return = self._execute_module(module_name='file', module_args=new_module_args, delete_remote_tmp=delete_remote_tmp)
+ module_return = self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp)
module_executed = True
if not module_return.get('checksum'):
@@ -304,44 +281,6 @@ class ActionModule(ActionBase):
f.close()
return content_tempfile
- def _get_diff_data(self, tmp, destination, source):
- peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), persist_files=True)
- if 'failed' in peek_result and peek_result['failed'] or peek_result.get('rc', 0) != 0:
- return {}
-
- diff = {}
- if peek_result['state'] == 'absent':
- diff['before'] = ''
- elif peek_result['appears_binary']:
- diff['dst_binary'] = 1
- # FIXME: this should not be in utils..
- #elif peek_result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
- # diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), tmp=tmp, persist_files=True)
- if 'content' in dest_result:
- dest_contents = dest_result['content']
- if dest_result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result)
- diff['before_header'] = destination
- diff['before'] = dest_contents
-
- src = open(source)
- src_contents = src.read(8192)
- st = os.stat(source)
- if "\x00" in src_contents:
- diff['src_binary'] = 1
- # FIXME: this should not be in utils
- #elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
- # diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- src.seek(0)
- diff['after_header'] = source
- diff['after'] = src.read()
-
- return diff
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
diff --git a/v2/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py
similarity index 85%
rename from v2/ansible/plugins/action/debug.py
rename to lib/ansible/plugins/action/debug.py
index 04db3c9cc1b..01a59d1ad2d 100644
--- a/v2/ansible/plugins/action/debug.py
+++ b/lib/ansible/plugins/action/debug.py
@@ -19,7 +19,6 @@ __metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
-from ansible.template import Templar
class ActionModule(ActionBase):
''' Print statements during execution '''
@@ -35,14 +34,15 @@ class ActionModule(ActionBase):
result = dict(msg=self._task.args['msg'])
# FIXME: move the LOOKUP_REGEX somewhere else
elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
- templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=task_vars)
- results = templar.template(self._task.args['var'], convert_bare=True)
+ results = self._templar.template(self._task.args['var'], convert_bare=True)
+ if results == self._task.args['var']:
+ results = "VARIABLE IS NOT DEFINED!"
result = dict()
result[self._task.args['var']] = results
else:
result = dict(msg='here we are')
# force flag to make debug output module always verbose
- result['verbose_always'] = True
+ result['_ansible_verbose_always'] = True
return result
diff --git a/v2/ansible/plugins/action/fail.py b/lib/ansible/plugins/action/fail.py
similarity index 100%
rename from v2/ansible/plugins/action/fail.py
rename to lib/ansible/plugins/action/fail.py
diff --git a/v2/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py
similarity index 88%
rename from v2/ansible/plugins/action/fetch.py
rename to lib/ansible/plugins/action/fetch.py
index c242c8739d0..81edf65ef14 100644
--- a/v2/ansible/plugins/action/fetch.py
+++ b/lib/ansible/plugins/action/fetch.py
@@ -29,15 +29,15 @@ from ansible.errors import *
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
+from ansible.utils.path import makedirs_safe
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for fetch operations '''
- # FIXME: is this even required anymore?
- #if self.runner.noop_on_check(inject):
- # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
+ if self._play_context.check_mode:
+ return dict(skipped=True, msg='check mode not (yet) supported for this module')
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
@@ -51,7 +51,7 @@ class ActionModule(ActionBase):
if source is None or dest is None:
return dict(failed=True, msg="src and dest are required")
- source = self._shell.join_path(source)
+ source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source, tmp)
# calculate checksum for the remote file
@@ -59,8 +59,8 @@ class ActionModule(ActionBase):
# use slurp if sudo and permissions are lacking
remote_data = None
- if remote_checksum in ('1', '2') or self._connection_info.become:
- slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), tmp=tmp)
+ if remote_checksum in ('1', '2') or self._play_context.become:
+ slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('rc') == 0:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
@@ -77,7 +77,8 @@ class ActionModule(ActionBase):
pass
# calculate the destination name
- if os.path.sep not in self._shell.join_path('a', ''):
+ if os.path.sep not in self._connection._shell.join_path('a', ''):
+ source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
@@ -97,7 +98,7 @@ class ActionModule(ActionBase):
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
- target_name = self._connection_info.remote_addr
+ target_name = self._play_context.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
@@ -125,16 +126,18 @@ class ActionModule(ActionBase):
if remote_checksum != local_checksum:
# create the containing directories, if needed
- if not os.path.isdir(os.path.dirname(dest)):
- os.makedirs(os.path.dirname(dest))
+ makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
- f = open(dest, 'w')
- f.write(remote_data)
- f.close()
+ try:
+ f = open(dest, 'w')
+ f.write(remote_data)
+ f.close()
+ except (IOError, OSError) as e:
+ raise AnsibleError("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled
# systems
diff --git a/v2/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py
similarity index 100%
rename from v2/ansible/plugins/action/group_by.py
rename to lib/ansible/plugins/action/group_by.py
diff --git a/v2/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
similarity index 88%
rename from v2/ansible/plugins/action/include_vars.py
rename to lib/ansible/plugins/action/include_vars.py
index 8a7a74d8705..37b4bff1d3e 100644
--- a/v2/ansible/plugins/action/include_vars.py
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -39,12 +39,13 @@ class ActionModule(ActionBase):
source = self._loader.path_dwim(source)
if os.path.exists(source):
- data = self._loader.load_from_file(source)
+ (data, show_content) = self._loader._get_file_contents(source)
+ data = self._loader.load(data, show_content)
if data is None:
data = {}
if not isinstance(data, dict):
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
- return dict(ansible_facts=data)
+ return dict(ansible_facts=data, _ansible_no_log=not show_content)
else:
return dict(failed=True, msg="Source file not found.", file=source)
diff --git a/v2/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py
similarity index 69%
rename from v2/ansible/plugins/action/normal.py
rename to lib/ansible/plugins/action/normal.py
index 431d9b0eebe..9ea962a240f 100644
--- a/v2/ansible/plugins/action/normal.py
+++ b/lib/ansible/plugins/action/normal.py
@@ -23,7 +23,13 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
- #vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
- return self._execute_module(tmp)
+ results = self._execute_module(tmp=tmp, task_vars=task_vars)
+ # Remove special fields from the result, which can only be set
+ # internally by the executor engine. We do this only here in
+ # the 'normal' action, as other action plugins may set this.
+ for field in ('ansible_notify',):
+ if field in results:
+ results.pop(field)
+ return results
diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py
new file mode 100644
index 00000000000..fa412d45db3
--- /dev/null
+++ b/lib/ansible/plugins/action/package.py
@@ -0,0 +1,53 @@
+# (c) 2015, Ansible Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' handler for package operations '''
+
+ name = self._task.args.get('name', None)
+ state = self._task.args.get('state', None)
+ module = self._task.args.get('use', 'auto')
+
+ if module == 'auto':
+ try:
+ module = self._templar.template('{{ansible_pkg_mgr}}')
+ except:
+ pass # could not get it from template!
+
+ if module == 'auto':
+ module = self._execute_module(module_name=setup, module_args={filter: 'ansible_pkg_mgr'}, task_vars=task_vars)
+
+ if module != 'auto':
+ # run the 'package' module
+ new_module_args = self._task.args.copy()
+ if 'use' in new_module_args:
+ del new_module_args['use']
+
+ return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars)
+
+ else:
+
+ return {'failed': True, 'msg': 'Could not detect which package manager to use. Try gathering facts or setting the "use" option.'}
diff --git a/v2/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py
similarity index 82%
rename from v2/ansible/plugins/action/patch.py
rename to lib/ansible/plugins/action/patch.py
index bf2af1be1ec..65417e50c3a 100644
--- a/v2/ansible/plugins/action/patch.py
+++ b/lib/ansible/plugins/action/patch.py
@@ -36,7 +36,7 @@ class ActionModule(ActionBase):
elif remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
- return self._execute_module()
+ return self._execute_module(task_vars=task_vars)
if self._task._role is not None:
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
@@ -47,14 +47,12 @@ class ActionModule(ActionBase):
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
- tmp_src = self._shell.join_path(tmp, os.path.basename(src))
+ tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
self._connection.put_file(src, tmp_src)
- if self._connection_info.become and self._connection_info.become_user != 'root':
- # FIXME: noop stuff here
- #if not self.runner.noop_on_check(inject):
- # self._remote_chmod('a+r', tmp_src, tmp)
- self._remote_chmod('a+r', tmp_src, tmp)
+ if self._play_context.become and self._play_context.become_user != 'root':
+ if not self._play_context.check_mode:
+ self._remote_chmod('a+r', tmp_src, tmp)
new_module_args = self._task.args.copy()
new_module_args.update(
@@ -63,4 +61,4 @@ class ActionModule(ActionBase):
)
)
- return self._execute_module('patch', module_args=new_module_args)
+ return self._execute_module('patch', module_args=new_module_args, task_vars=task_vars)
diff --git a/v2/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
similarity index 58%
rename from v2/ansible/plugins/action/pause.py
rename to lib/ansible/plugins/action/pause.py
index c5a97d53666..f57cee3811d 100644
--- a/v2/ansible/plugins/action/pause.py
+++ b/lib/ansible/plugins/action/pause.py
@@ -18,14 +18,21 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
+import signal
import sys
+import termios
import time
-
-from termios import tcflush, TCIFLUSH
+import tty
from ansible.errors import *
from ansible.plugins.action import ActionBase
+class AnsibleTimeoutExceeded(Exception):
+ pass
+
+def timeout_handler(signum, frame):
+ raise AnsibleTimeoutExceeded
+
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
@@ -48,14 +55,10 @@ class ActionModule(ActionBase):
delta = None,
)
- # FIXME: not sure if we can get this info directly like this anymore?
- #hosts = ', '.join(self.runner.host_set)
-
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
pause_type = 'prompt'
- #prompt = "[%s]\nPress enter to continue:\n" % hosts
- prompt = "[%s]\nPress enter to continue:\n" % self._task.get_name().strip()
+ prompt = "[%s]\nPress enter to continue:" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
@@ -76,51 +79,68 @@ class ActionModule(ActionBase):
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
pause_type = 'prompt'
- #prompt = "[%s]\n%s:\n" % (hosts, self._task.args['prompt'])
- prompt = "[%s]\n%s:\n" % (self._task.get_name().strip(), self._task.args['prompt'])
+ prompt = "[%s]\n%s:" % (self._task.get_name().strip(), self._task.args['prompt'])
- # I have no idea what you're trying to do. But it's so wrong.
else:
+ # I have no idea what you're trying to do. But it's so wrong.
return dict(failed=True, msg="invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES))
- #vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
- # (self.pause_type, self.duration_unit, self.seconds, self.prompt))
-
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
-
- # FIXME: this is all very broken right now, as prompting from the worker side
- # is not really going to be supported, and actions marked as BYPASS_HOST_LOOP
- # probably should not be run through the executor engine at all. Also, ctrl+c
- # is now captured on the parent thread, so it can't be caught here via the
- # KeyboardInterrupt exception.
-
try:
- if not pause_type == 'prompt':
- print("(^C-c = continue early, ^C-a = abort)")
- #print("[%s]\nPausing for %s seconds" % (hosts, seconds))
- print("[%s]\nPausing for %s seconds" % (self._task.get_name().strip(), seconds))
- time.sleep(seconds)
+ if seconds is not None:
+ # setup the alarm handler
+ signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(seconds)
+ # show the prompt
+ print("Pausing for %d seconds" % seconds)
+ print("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
else:
- # Clear out any unflushed buffered input which would
- # otherwise be consumed by raw_input() prematurely.
- #tcflush(sys.stdin, TCIFLUSH)
- result['user_input'] = raw_input(prompt.encode(sys.stdout.encoding))
- except KeyboardInterrupt:
+ print(prompt)
+
+ # save the attributes on the existing (duped) stdin so
+ # that we can restore them later after we set raw mode
+ fd = self._connection._new_stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ tty.setraw(fd)
+
+ # flush the buffer to make sure no previous key presses
+ # are read in below
+ termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH)
+
+ # read key presses and act accordingly
while True:
- print('\nAction? (a)bort/(c)ontinue: ')
- c = getch()
- if c == 'c':
- # continue playbook evaluation
- break
- elif c == 'a':
- # abort further playbook evaluation
- raise ae('user requested abort!')
+ key_pressed = self._connection._new_stdin.read(1)
+ if pause_type in ('minutes', 'seconds'):
+ if key_pressed == '\x03':
+ key_pressed = self._connection._new_stdin.read(1)
+ if key_pressed == 'a':
+ raise KeyboardInterrupt
+ elif key_pressed == 'c':
+ break
+ else:
+ if key_pressed == '\x03':
+ raise KeyboardInterrupt
+ elif key_pressed == '\r':
+ break
+ except KeyboardInterrupt:
+ # cancel the previously set alarm signal
+ if seconds is not None:
+ signal.alarm(0)
+ raise AnsibleError('user requested abort!')
+ except AnsibleTimeoutExceeded:
+ # this is the exception we expect when the alarm signal
+ # fires, so we simply ignore it to move into the cleanup
+ pass
finally:
+ # cleanup and save some information
+ # restore the old settings for the duped stdin fd
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
diff --git a/v2/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py
similarity index 76%
rename from v2/ansible/plugins/action/raw.py
rename to lib/ansible/plugins/action/raw.py
index f9cd56572b1..4d862d9ebb9 100644
--- a/v2/ansible/plugins/action/raw.py
+++ b/lib/ansible/plugins/action/raw.py
@@ -19,15 +19,16 @@ __metaclass__ = type
from ansible.plugins.action import ActionBase
+import re
+
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
- # FIXME: need to rework the noop stuff still
- #if self.runner.noop_on_check(inject):
- # # in --check mode, always skip this module execution
- # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True))
+ if self._play_context.check_mode:
+ # in --check mode, always skip this module execution
+ return dict(skipped=True)
executable = self._task.args.get('executable')
result = self._low_level_execute_command(self._task.args.get('_raw_params'), tmp=tmp, executable=executable)
@@ -35,7 +36,7 @@ class ActionModule(ActionBase):
# for some modules (script, raw), the sudo success key
# may leak into the stdout due to the way the sudo/su
# command is constructed, so we filter that out here
- if result.get('stdout','').strip().startswith('SUDO-SUCCESS-'):
- result['stdout'] = re.sub(r'^((\r)?\n)?SUDO-SUCCESS.*(\r)?\n', '', result['stdout'])
+ if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
+ result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
return result
diff --git a/v2/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py
similarity index 85%
rename from v2/ansible/plugins/action/script.py
rename to lib/ansible/plugins/action/script.py
index 3ca7dc6a342..2392851110d 100644
--- a/v2/ansible/plugins/action/script.py
+++ b/lib/ansible/plugins/action/script.py
@@ -28,11 +28,8 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
- # FIXME: noop stuff still needs to be sorted out
- #if self.runner.noop_on_check(inject):
- # # in check mode, always skip this module
- # return ReturnData(conn=conn, comm_ok=True,
- # result=dict(skipped=True, msg='check mode not supported for this module'))
+ if self._play_context.check_mode:
+ return dict(skipped=True, msg='check mode not supported for this module')
if not tmp:
tmp = self._make_tmp_path()
@@ -42,7 +39,7 @@ class ActionModule(ActionBase):
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
- result = self._execute_module(module_name='stat', module_args=dict(path=creates), tmp=tmp, persist_files=True)
+ result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars, tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
@@ -52,7 +49,7 @@ class ActionModule(ActionBase):
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
- result = self._execute_module(module_name='stat', module_args=dict(path=removes), tmp=tmp, persist_files=True)
+ result = self._execute_module(module_name='stat', module_args=dict(path=removes), task_vars=task_vars, tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and not stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
@@ -71,12 +68,12 @@ class ActionModule(ActionBase):
source = self._loader.path_dwim(source)
# transfer the file to a remote tmp location
- tmp_src = self._shell.join_path(tmp, os.path.basename(source))
+ tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
self._connection.put_file(source, tmp_src)
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
- if self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._play_context.become and self._play_context.become_user != 'root':
chmod_mode = 'a+rx'
sudoable = False
else:
diff --git a/v2/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
similarity index 57%
rename from v2/ansible/plugins/action/set_fact.py
rename to lib/ansible/plugins/action/set_fact.py
index 6086ee6e8b2..5822fb3f083 100644
--- a/v2/ansible/plugins/action/set_fact.py
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -17,21 +17,58 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import ast
+
+from six import string_types
+
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
-from ansible.template import Templar
from ansible.utils.boolean import boolean
+def isidentifier(ident):
+ """
+ Determines, if string is valid Python identifier using the ast module.
+ Orignally posted at: http://stackoverflow.com/a/29586366
+ """
+
+ if not isinstance(ident, string_types):
+ return False
+
+ try:
+ root = ast.parse(ident)
+ except SyntaxError:
+ return False
+
+ if not isinstance(root, ast.Module):
+ return False
+
+ if len(root.body) != 1:
+ return False
+
+ if not isinstance(root.body[0], ast.Expr):
+ return False
+
+ if not isinstance(root.body[0].value, ast.Name):
+ return False
+
+ if root.body[0].value.id != ident:
+ return False
+
+ return True
+
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
- templar = Templar(loader=self._loader, variables=task_vars)
facts = dict()
if self._task.args:
for (k, v) in self._task.args.iteritems():
- k = templar.template(k)
+ k = self._templar.template(k)
+
+ if not isidentifier(k):
+ return dict(failed=True, msg="The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k)
+
if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v)
facts[k] = v
diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py
new file mode 100644
index 00000000000..e22ea11600c
--- /dev/null
+++ b/lib/ansible/plugins/action/synchronize.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012-2013, Timothy Appnel
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import os.path
+
+from ansible.plugins.action import ActionBase
+from ansible.plugins import connection_loader
+from ansible.utils.boolean import boolean
+from ansible import constants
+
+
+class ActionModule(ActionBase):
+
+ def _get_absolute_path(self, path):
+ if self._task._role is not None:
+ original_path = path
+ path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
+ if original_path and original_path[-1] == '/' and path[-1] != '/':
+ # make sure the dwim'd path ends in a trailing "/"
+ # if the original path did
+ path += '/'
+
+ return path
+
+ def _process_origin(self, host, path, user):
+
+ if host not in ('127.0.0.1', 'localhost', '::1'):
+ if user:
+ return '%s@%s:%s' % (user, host, path)
+ else:
+ return '%s:%s' % (host, path)
+
+ if ':' not in path and not path.startswith('/'):
+ path = self._get_absolute_path(path=path)
+ return path
+
+ def _process_remote(self, host, path, user):
+ transport = self._play_context.connection
+ if host not in ('127.0.0.1', 'localhost', '::1') or transport != "local":
+ if user:
+ return '%s@%s:%s' % (user, host, path)
+ else:
+ return '%s:%s' % (host, path)
+
+ if ':' not in path and not path.startswith('/'):
+ path = self._get_absolute_path(path=path)
+ return path
+
+ def _override_module_replaced_vars(self, task_vars):
+ """ Some vars are substituted into the modules. Have to make sure
+ that those are correct for localhost when synchronize creates its own
+ connection to localhost."""
+
+ # Clear the current definition of these variables as they came from the
+ # connection to the remote host
+ if 'ansible_syslog_facility' in task_vars:
+ del task_vars['ansible_syslog_facility']
+ for key in task_vars:
+ if key.startswith("ansible_") and key.endswith("_interpreter"):
+ del task_vars[key]
+
+ # Add the definition from localhost
+ localhost = task_vars['hostvars']['localhost']
+ if 'ansible_syslog_facility' in localhost:
+ task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility']
+ for key in localhost:
+ if key.startswith("ansible_") and key.endswith("_interpreter"):
+ task_vars[key] = localhost[key]
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' generates params and passes them on to the rsync module '''
+
+ original_transport = task_vars.get('ansible_connection') or self._play_context.connection
+ transport_overridden = False
+ try:
+ delegate_to = self._play_context.delegate_to
+ except (AttributeError, KeyError):
+ delegate_to = None
+
+ use_ssh_args = self._task.args.pop('use_ssh_args', None)
+
+ # Parameter name needed by the ansible module
+ self._task.args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync'
+
+ # from the perspective of the rsync call the delegate is the localhost
+ src_host = '127.0.0.1'
+ dest_host = task_vars.get('ansible_ssh_host') or task_vars.get('inventory_hostname')
+
+ ### FIXME: do we still need to explicitly template ansible_ssh_host here in v2?
+
+ dest_is_local = dest_host in ['127.0.0.1', 'localhost']
+
+
+ # CHECK FOR NON-DEFAULT SSH PORT
+ dest_port = task_vars.get('ansible_ssh_port') or self._task.args.get('dest_port') or 22
+
+ # CHECK DELEGATE HOST INFO
+ use_delegate = False
+
+ if dest_host == delegate_to:
+ # edge case: explicit delegate and dest_host are the same
+ # so we run rsync on the remote machine targetting its localhost
+ # (itself)
+ dest_host = '127.0.0.1'
+ use_delegate = True
+ else:
+ if 'hostvars' in task_vars:
+ if delegate_to in task_vars['hostvars'] and original_transport != 'local':
+ # use a delegate host instead of localhost
+ use_delegate = True
+
+ # COMPARE DELEGATE, HOST AND TRANSPORT
+ process_args = False
+ if dest_host != src_host and original_transport != 'local':
+ # interpret and task_vars remote host info into src or dest
+ process_args = True
+
+ # SWITCH SRC AND DEST PER MODE
+ if self._task.args.get('mode', 'push') == 'pull':
+ (dest_host, src_host) = (src_host, dest_host)
+
+ # Delegate to localhost as the source of the rsync unless we've been
+ # told (via delegate_to) that a different host is the source of the
+ # rsync
+ transport_overridden = False
+ if not use_delegate and original_transport != 'local':
+ # Create a connection to localhost to run rsync on
+ new_stdin = self._connection._new_stdin
+ new_connection = connection_loader.get('local', self._play_context, new_stdin)
+ self._connection = new_connection
+ transport_overridden = True
+ self._override_module_replaced_vars(task_vars)
+ ### FIXME: We think that this was here for v1 because the local
+ # connection didn't support sudo. In v2 it does so we think it's
+ # safe to remove this now.
+
+ # Also disable sudo
+ #self._play_context.become = False
+
+ # MUNGE SRC AND DEST PER REMOTE_HOST INFO
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ if process_args or use_delegate:
+
+ user = None
+ if boolean(task_vars.get('set_remote_user', 'yes')):
+ if use_delegate:
+ user = task_vars['hostvars'][delegate_to].get('ansible_ssh_user')
+
+ if not use_delegate or not user:
+ user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
+
+ if use_delegate:
+ private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file
+ else:
+ private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file
+
+ if private_key is not None:
+ private_key = os.path.expanduser(private_key)
+ self._task.args['private_key'] = private_key
+
+ # use the mode to define src and dest's url
+ if self._task.args.get('mode', 'push') == 'pull':
+ # src is a remote path: @, dest is a local path
+ src = self._process_remote(src_host, src, user)
+ dest = self._process_origin(dest_host, dest, user)
+ else:
+ # src is a local path, dest is a remote path: @
+ src = self._process_origin(src_host, src, user)
+ dest = self._process_remote(dest_host, dest, user)
+
+ self._task.args['src'] = src
+ self._task.args['dest'] = dest
+
+ # Remove mode as it is handled purely in this action module
+ if 'mode' in self._task.args:
+ del self._task.args['mode']
+
+ # Allow custom rsync path argument.
+ rsync_path = self._task.args.get('rsync_path', None)
+
+ # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
+ if not rsync_path and transport_overridden and self._play_context.become and self._play_context.become_method == 'sudo' and not dest_is_local:
+ rsync_path = 'sudo rsync'
+
+ # make sure rsync path is quoted.
+ if rsync_path:
+ self._task.args['rsync_path'] = '"%s"' % rsync_path
+
+ if use_ssh_args:
+ self._task.args['ssh_args'] = constants.ANSIBLE_SSH_ARGS
+
+ # run the module and store the result
+ result = self._execute_module('synchronize', task_vars=task_vars)
+
+ if 'SyntaxError' in result['msg']:
+ # Emit a warning about using python3 because synchronize is
+ # somewhat unique in running on localhost
+ result['traceback'] = result['msg']
+ result['msg'] = 'SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this'
+ return result
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
new file mode 100644
index 00000000000..358d7d31648
--- /dev/null
+++ b/lib/ansible/plugins/action/template.py
@@ -0,0 +1,184 @@
+# (c) 2015, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import datetime
+import os
+import time
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum_s
+from ansible.utils.unicode import to_bytes, to_unicode
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def get_checksum(self, tmp, dest, try_directory=False, source=None):
+ remote_checksum = self._remote_checksum(tmp, dest)
+
+ if remote_checksum in ('0', '2', '3', '4'):
+ # Note: 1 means the file is not present which is fine; template
+ # will create it. 3 means directory was specified instead of file
+ if try_directory and remote_checksum == '3' and source:
+ base = os.path.basename(source)
+ dest = os.path.join(dest, base)
+ remote_checksum = self.get_checksum(tmp, dest, try_directory=False)
+ if remote_checksum not in ('0', '2', '3', '4'):
+ return remote_checksum
+
+ result = dict(failed=True, msg="failed to checksum remote file."
+ " Checksum error code: %s" % remote_checksum)
+ return result
+
+ return remote_checksum
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' handler for template operations '''
+
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ faf = self._task.first_available_file
+
+ if (source is None and faf is not None) or dest is None:
+ return dict(failed=True, msg="src and dest are required")
+
+ if tmp is None:
+ tmp = self._make_tmp_path()
+
+ if faf:
+ source = self._get_first_available_file(faf, task_vars.get('_original_file', None, 'templates'))
+ if source is None:
+ return dict(failed=True, msg="could not find src in first_available_file list")
+ else:
+ if self._task._role is not None:
+ source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source)
+ else:
+ source = self._loader.path_dwim(source)
+
+ # Expand any user home dir specification
+ dest = self._remote_expand_user(dest, tmp)
+
+ directory_prepended = False
+ if dest.endswith(os.sep):
+ directory_prepended = True
+ base = os.path.basename(source)
+ dest = os.path.join(dest, base)
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(source, 'r') as f:
+ template_data = to_unicode(f.read())
+
+ try:
+ template_uid = pwd.getpwuid(os.stat(source).st_uid).pw_name
+ except:
+ template_uid = os.stat(source).st_uid
+
+ temp_vars = task_vars.copy()
+ temp_vars['template_host'] = os.uname()[1]
+ temp_vars['template_path'] = source
+ temp_vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(source))
+ temp_vars['template_uid'] = template_uid
+ temp_vars['template_fullpath'] = os.path.abspath(source)
+ temp_vars['template_run_date'] = datetime.datetime.now()
+
+ managed_default = C.DEFAULT_MANAGED_STR
+ managed_str = managed_default.format(
+ host = temp_vars['template_host'],
+ uid = temp_vars['template_uid'],
+ file = to_bytes(temp_vars['template_path'])
+ )
+ temp_vars['ansible_managed'] = time.strftime(
+ managed_str,
+ time.localtime(os.path.getmtime(source))
+ )
+
+ self._templar.environment.searchpath = [self._loader._basedir, os.path.dirname(source)]
+ if self._task._role is not None:
+ self._templar.environment.searchpath.insert(1, C.DEFAULT_ROLES_PATH)
+ self._templar.environment.searchpath.insert(1, self._task._role._role_path)
+
+ old_vars = self._templar._available_variables
+ self._templar.set_available_variables(temp_vars)
+ resultant = self._templar.template(template_data, preserve_trailing_newlines=True, convert_data=False)
+ self._templar.set_available_variables(old_vars)
+ except Exception as e:
+ return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
+
+ local_checksum = checksum_s(resultant)
+ remote_checksum = self.get_checksum(tmp, dest, not directory_prepended, source=source)
+ if isinstance(remote_checksum, dict):
+ # Error from remote_checksum is a dict. Valid return is a str
+ return remote_checksum
+
+ diff = {}
+ new_module_args = self._task.args.copy()
+
+ if local_checksum != remote_checksum:
+ dest_contents = ''
+
+ # if showing diffs, we need to get the remote value
+ if self._play_context.diff:
+ diff = self._get_diff_data(tmp, dest, resultant, task_vars, source_file=False)
+
+ if not self._play_context.check_mode: # do actual work thorugh copy
+ xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant)
+
+ # fix file permissions when the copy is done as a different user
+ if self._play_context.become and self._play_context.become_user != 'root':
+ self._remote_chmod('a+r', xfered, tmp)
+
+ # run the copy module
+ new_module_args.update(
+ dict(
+ src=xfered,
+ dest=dest,
+ original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+ result = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)
+ else:
+ result=dict(changed=True)
+
+ if result.get('changed', False) and self._play_context.diff:
+ result['diff'] = diff
+ # result['diff'] = dict(before=dest_contents, after=resultant, before_header=dest, after_header=source)
+
+ return result
+
+ else:
+ # when running the file module based on the template data, we do
+ # not want the source filename (the name of the template) to be used,
+ # since this would mess up links, so we clear the src param and tell
+ # the module to follow links. When doing that, we have to set
+ # original_basename to the template just in case the dest is
+ # a directory.
+ new_module_args.update(
+ dict(
+ src=None,
+ original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+
+ return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)
+
diff --git a/v2/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
similarity index 78%
rename from v2/ansible/plugins/action/unarchive.py
rename to lib/ansible/plugins/action/unarchive.py
index b7601ed9107..6a43259763e 100644
--- a/v2/ansible/plugins/action/unarchive.py
+++ b/lib/ansible/plugins/action/unarchive.py
@@ -22,6 +22,7 @@ import os
import pipes
from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
@@ -33,7 +34,7 @@ class ActionModule(ActionBase):
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
- copy = self._task.args.get('copy', True)
+ copy = boolean(self._task.args.get('copy', True))
creates = self._task.args.get('creates', None)
if source is None or dest is None:
@@ -47,7 +48,7 @@ class ActionModule(ActionBase):
# and the filename already exists. This allows idempotence
# of command executions.
module_args_tmp = "path=%s" % creates
- result = self._execute_module(module_name='stat', module_args=dict(path=creates))
+ result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
@@ -77,11 +78,9 @@ class ActionModule(ActionBase):
# handle check mode client side
# fix file permissions when the copy is done as a different user
if copy:
- if self._connection_info.become and self._connection_info.become_user != 'root':
- # FIXME: noop stuff needs to be reworked
- #if not self.runner.noop_on_check(task_vars):
- # self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
- self._remote_chmod(tmp, 'a+r', tmp_src)
+ if self._play_context.become and self._play_context.become_user != 'root':
+ if not self._play_context.check_mode:
+ self._remote_chmod(tmp, 'a+r', tmp_src)
# Build temporary module_args.
new_module_args = self._task.args.copy()
@@ -92,11 +91,6 @@ class ActionModule(ActionBase):
),
)
- # make sure checkmod is passed on correctly
- # FIXME: noop again, probably doesn't need to be done here anymore?
- #if self.runner.noop_on_check(task_vars):
- # new_module_args['CHECKMODE'] = True
-
else:
new_module_args = self._task.args.copy()
new_module_args.update(
@@ -104,11 +98,7 @@ class ActionModule(ActionBase):
original_basename=os.path.basename(source),
),
)
- # make sure checkmod is passed on correctly
- # FIXME: noop again, probably doesn't need to be done here anymore?
- #if self.runner.noop_on_check(task_vars):
- # module_args += " CHECKMODE=True"
# execute the unarchive module now, with the updated args
- return self._execute_module(module_args=new_module_args)
+ return self._execute_module(module_args=new_module_args, task_vars=task_vars)
diff --git a/lib/ansible/plugins/action/win_copy.py b/lib/ansible/plugins/action/win_copy.py
new file mode 100644
index 00000000000..54d94e12e66
--- /dev/null
+++ b/lib/ansible/plugins/action/win_copy.py
@@ -0,0 +1,28 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.plugins.action.copy import ActionModule as CopyActionModule
+
+# Even though CopyActionModule inherits from ActionBase, we still need to
+# directly inherit from ActionBase to appease the plugin loader.
+class ActionModule(CopyActionModule, ActionBase):
+ pass
diff --git a/lib/ansible/plugins/action/win_template.py b/lib/ansible/plugins/action/win_template.py
new file mode 100644
index 00000000000..03091d494f5
--- /dev/null
+++ b/lib/ansible/plugins/action/win_template.py
@@ -0,0 +1,28 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.plugins.action.template import ActionModule as TemplateActionModule
+
+# Even though TemplateActionModule inherits from ActionBase, we still need to
+# directly inherit from ActionBase to appease the plugin loader.
+class ActionModule(TemplateActionModule, ActionBase):
+ pass
diff --git a/v2/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
similarity index 89%
rename from v2/ansible/plugins/cache/__init__.py
rename to lib/ansible/plugins/cache/__init__.py
index 8ffe554cc63..323d8c5ca35 100644
--- a/v2/ansible/plugins/cache/__init__.py
+++ b/lib/ansible/plugins/cache/__init__.py
@@ -22,12 +22,20 @@ from collections import MutableMapping
from ansible import constants as C
from ansible.plugins import cache_loader
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
self._plugin = cache_loader.get(C.CACHE_PLUGIN)
+ self._display = display
+
if self._plugin is None:
- # FIXME: this should be an exception
+ self._display.warning("Failed to load fact cache plugins")
return
def __getitem__(self, key):
diff --git a/v2/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py
similarity index 83%
rename from v2/ansible/plugins/cache/base.py
rename to lib/ansible/plugins/cache/base.py
index 051f02d0b00..b3b6ece9002 100644
--- a/v2/ansible/plugins/cache/base.py
+++ b/lib/ansible/plugins/cache/base.py
@@ -20,11 +20,18 @@ __metaclass__ = type
from abc import ABCMeta, abstractmethod
-from six import add_metaclass
+from six import with_metaclass
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
-@add_metaclass(ABCMeta)
-class BaseCacheModule:
+
+class BaseCacheModule(with_metaclass(ABCMeta, object)):
+
+ display = display
@abstractmethod
def get(self, key):
@@ -53,3 +60,4 @@ class BaseCacheModule:
@abstractmethod
def copy(self):
pass
+
diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py
new file mode 100644
index 00000000000..04e05f9b0ce
--- /dev/null
+++ b/lib/ansible/plugins/cache/jsonfile.py
@@ -0,0 +1,152 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import os
+import time
+import errno
+import codecs
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from ansible import constants as C
+from ansible.errors import *
+from ansible.parsing.utils.jsonify import jsonify
+from ansible.plugins.cache.base import BaseCacheModule
+
+class CacheModule(BaseCacheModule):
+ """
+ A caching module backed by json files.
+ """
+ def __init__(self, *args, **kwargs):
+
+ self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
+ self._cache = {}
+ self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
+ if not self._cache_dir:
+ raise AnsibleError("error, fact_caching_connection is not set, cannot use fact cache")
+
+ if not os.path.exists(self._cache_dir):
+ try:
+ os.makedirs(self._cache_dir)
+ except (OSError,IOError), e:
+ self._display.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
+ return None
+
+ def get(self, key):
+
+ if key in self._cache:
+ return self._cache.get(key)
+
+ if self.has_expired(key):
+ raise KeyError
+
+ cachefile = "%s/%s" % (self._cache_dir, key)
+ try:
+ f = codecs.open(cachefile, 'r', encoding='utf-8')
+ except (OSError,IOError), e:
+ self._display.warning("error while trying to read %s : %s" % (cachefile, str(e)))
+ pass
+ else:
+ try:
+ value = json.load(f)
+ self._cache[key] = value
+ return value
+ except ValueError:
+ self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
+ raise KeyError
+ finally:
+ f.close()
+
+ def set(self, key, value):
+
+ self._cache[key] = value
+
+ cachefile = "%s/%s" % (self._cache_dir, key)
+ try:
+ f = codecs.open(cachefile, 'w', encoding='utf-8')
+ except (OSError,IOError), e:
+ self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
+ pass
+ else:
+ f.write(jsonify(value))
+ finally:
+ f.close()
+
+ def has_expired(self, key):
+
+ cachefile = "%s/%s" % (self._cache_dir, key)
+ try:
+ st = os.stat(cachefile)
+ except (OSError,IOError), e:
+ if e.errno == errno.ENOENT:
+ return False
+ else:
+ self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
+ pass
+
+ if time.time() - st.st_mtime <= self._timeout:
+ return False
+
+ if key in self._cache:
+ del self._cache[key]
+ return True
+
+ def keys(self):
+ keys = []
+ for k in os.listdir(self._cache_dir):
+ if not (k.startswith('.') or self.has_expired(k)):
+ keys.append(k)
+ return keys
+
+ def contains(self, key):
+ cachefile = "%s/%s" % (self._cache_dir, key)
+
+ if key in self._cache:
+ return True
+
+ if self.has_expired(key):
+ return False
+ try:
+ st = os.stat(cachefile)
+ return True
+ except (OSError,IOError), e:
+ if e.errno == errno.ENOENT:
+ return False
+ else:
+ self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
+ pass
+
+ def delete(self, key):
+ del self._cache[key]
+ try:
+ os.remove("%s/%s" % (self._cache_dir, key))
+ except (OSError,IOError), e:
+ pass #TODO: only pass on non existing?
+
+ def flush(self):
+ self._cache = {}
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ ret = dict()
+ for key in self.keys():
+ ret[key] = self.get(key)
+ return ret
diff --git a/v2/ansible/plugins/cache/memcached.py b/lib/ansible/plugins/cache/memcached.py
similarity index 98%
rename from v2/ansible/plugins/cache/memcached.py
rename to lib/ansible/plugins/cache/memcached.py
index e7321a5a6b5..a34855bafc4 100644
--- a/v2/ansible/plugins/cache/memcached.py
+++ b/lib/ansible/plugins/cache/memcached.py
@@ -191,3 +191,9 @@ class CacheModule(BaseCacheModule):
def copy(self):
return self._keys.copy()
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/v2/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py
similarity index 91%
rename from v2/ansible/plugins/cache/memory.py
rename to lib/ansible/plugins/cache/memory.py
index 15628361513..417ef20e0ed 100644
--- a/v2/ansible/plugins/cache/memory.py
+++ b/lib/ansible/plugins/cache/memory.py
@@ -44,3 +44,9 @@ class CacheModule(BaseCacheModule):
def copy(self):
return self._cache.copy()
+
+ def __getstate__(self):
+ return self.copy()
+
+ def __setstate__(self, data):
+ self._cache = data
diff --git a/v2/ansible/plugins/cache/redis.py b/lib/ansible/plugins/cache/redis.py
similarity index 93%
rename from v2/ansible/plugins/cache/redis.py
rename to lib/ansible/plugins/cache/redis.py
index 287c14bd2a2..99ecbffcb4b 100644
--- a/v2/ansible/plugins/cache/redis.py
+++ b/lib/ansible/plugins/cache/redis.py
@@ -28,8 +28,7 @@ from ansible.plugins.cache.base import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
- print("The 'redis' python module is required, 'pip install redis'")
- sys.exit(1)
+ raise AnsibleError("The 'redis' python module is required for the redis fact cache, 'pip install redis'")
class CacheModule(BaseCacheModule):
"""
@@ -100,3 +99,9 @@ class CacheModule(BaseCacheModule):
for key in self.keys():
ret[key] = self.get(key)
return ret
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
new file mode 100644
index 00000000000..734db24d66c
--- /dev/null
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -0,0 +1,283 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division)
+__metaclass__ = type
+
+import json
+import difflib
+import warnings
+from copy import deepcopy
+
+from six import string_types
+
+from ansible import constants as C
+from ansible.utils.unicode import to_unicode
+
+__all__ = ["CallbackBase"]
+
+
+class CallbackBase:
+
+ '''
+ This is a base ansible callback class that does nothing. New callbacks should
+ use this class as a base and override any callback methods they wish to execute
+ custom actions.
+ '''
+
+ # FIXME: the list of functions here needs to be updated once we have
+ # finalized the list of callback methods used in the default callback
+
+ def __init__(self, display):
+ self._display = display
+ if self._display.verbosity >= 4:
+ name = getattr(self, 'CALLBACK_NAME', 'unnamed')
+ ctype = getattr(self, 'CALLBACK_TYPE', 'old')
+ version = getattr(self, 'CALLBACK_VERSION', '1.0')
+ self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version))
+
+ def _dump_results(self, result, indent=None, sort_keys=True):
+
+ if result.get('_ansible_no_log', False):
+ return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result"))
+
+ if not indent and '_ansible_verbose_always' in result and result['_ansible_verbose_always']:
+ indent = 4
+
+ # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
+ for k in result.keys():
+ if isinstance(k, string_types) and k.startswith('_ansible_'):
+ del result[k]
+
+ return json.dumps(result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
+
+ def _handle_warnings(self, res):
+ ''' display warnings, if enabled and any exist in the result '''
+ if C.COMMAND_WARNINGS and 'warnings' in res and res['warnings']:
+ for warning in res['warnings']:
+ self._display.warning(warning)
+
+ def _get_diff(self, diff):
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ ret = []
+ if 'dst_binary' in diff:
+ ret.append("diff skipped: destination file appears to be binary\n")
+ if 'src_binary' in diff:
+ ret.append("diff skipped: source file appears to be binary\n")
+ if 'dst_larger' in diff:
+ ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
+ if 'src_larger' in diff:
+ ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
+ if 'before' in diff and 'after' in diff:
+ if 'before_header' in diff:
+ before_header = "before: %s" % diff['before_header']
+ else:
+ before_header = 'before'
+ if 'after_header' in diff:
+ after_header = "after: %s" % diff['after_header']
+ else:
+ after_header = 'after'
+ differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
+ for line in list(differ):
+ ret.append(line)
+ return u"".join(ret)
+ except UnicodeDecodeError:
+ return ">> the files are different, but the diff library cannot compare unicode strings"
+
+ def _process_items(self, result):
+
+ for res in result._result['results']:
+ newres = deepcopy(result)
+ newres._result = res
+ if 'failed' in res and res['failed']:
+ self.v2_playbook_item_on_failed(newres)
+ elif 'skipped' in res and res['skipped']:
+ self.v2_playbook_item_on_skipped(newres)
+ else:
+ self.v2_playbook_item_on_ok(newres)
+
+ del result._result['results']
+
+ def set_play_context(self, play_context):
+ pass
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ pass
+
+ def runner_on_ok(self, host, res):
+ pass
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res, jid, clock):
+ pass
+
+ def runner_on_async_ok(self, host, res, jid):
+ pass
+
+ def runner_on_async_failed(self, host, res, jid):
+ pass
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ pass
+
+ def on_file_diff(self, host, diff):
+ pass
+
+ ####### V2 METHODS, by default they call v1 counterparts if possible ######
+ def v2_on_any(self, *args, **kwargs):
+ self.on_any(args, kwargs)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ host = result._host.get_name()
+ self.runner_on_failed(host, result._result, ignore_errors)
+
+ def v2_runner_on_ok(self, result):
+ host = result._host.get_name()
+ self.runner_on_ok(host, result._result)
+
+ def v2_runner_on_skipped(self, result):
+ host = result._host.get_name()
+ #FIXME, get item to pass through
+ item = None
+ self.runner_on_skipped(host, item)
+
+ def v2_runner_on_unreachable(self, result):
+ host = result._host.get_name()
+ self.runner_on_unreachable(host, result._result)
+
+ def v2_runner_on_no_hosts(self, task):
+ self.runner_on_no_hosts()
+
+ def v2_runner_on_async_poll(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ #FIXME, get real clock
+ clock = 0
+ self.runner_on_async_poll(host, result._result, jid, clock)
+
+ def v2_runner_on_async_ok(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ self.runner_on_async_ok(host, result._result, jid)
+
+ def v2_runner_on_async_failed(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ self.runner_on_async_failed(host, result._result, jid)
+
+ def v2_runner_on_file_diff(self, result, diff):
+ pass #no v1 correspondance
+
+ def v2_playbook_on_start(self):
+ self.playbook_on_start()
+
+ def v2_playbook_on_notify(self, result, handler):
+ host = result._host.get_name()
+ self.playbook_on_notify(host, handler)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self.playbook_on_no_hosts_matched()
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self.playbook_on_no_hosts_remaining()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.playbook_on_task_start(task, is_conditional)
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ pass #no v1 correspondance
+
+ def v2_playbook_on_handler_task_start(self, task):
+ pass #no v1 correspondance
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default)
+
+ def v2_playbook_on_setup(self):
+ self.playbook_on_setup()
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ host = result._host.get_name()
+ self.playbook_on_import_for_host(host, imported_file)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ host = result._host.get_name()
+ self.playbook_on_not_import_for_host(host, missing_file)
+
+ def v2_playbook_on_play_start(self, play):
+ self.playbook_on_play_start(play.name)
+
+ def v2_playbook_on_stats(self, stats):
+ self.playbook_on_stats(stats)
+
+ def v2_on_file_diff(self, result):
+ host = result._host.get_name()
+ if 'diff' in result._result:
+ self.on_file_diff(host, result._result['diff'])
+
+ def v2_playbook_on_item_ok(self, result):
+ pass # no v1
+
+ def v2_playbook_on_item_failed(self, result):
+ pass # no v1
+
+ def v2_playbook_on_item_skipped(self, result):
+ pass # no v1
diff --git a/plugins/callbacks/context_demo.py b/lib/ansible/plugins/callback/context_demo.py
similarity index 63%
rename from plugins/callbacks/context_demo.py
rename to lib/ansible/plugins/callback/context_demo.py
index 5c3015d85f6..ad22ead07df 100644
--- a/plugins/callbacks/context_demo.py
+++ b/lib/ansible/plugins/callback/context_demo.py
@@ -15,17 +15,24 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import os
-import time
-import json
+from ansible.plugins.callback import CallbackBase
-class CallbackModule(object):
+class CallbackModule(CallbackBase):
"""
This is a very trivial example of how any callback function can get at play and task objects.
play will be 'None' for runner invocations, and task will be None for 'setup' invocations.
"""
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_TYPE = 'context_demo'
- def on_any(self, *args, **kwargs):
- play = getattr(self, 'play', None)
- task = getattr(self, 'task', None)
- print "play = %s, task = %s, args = %s, kwargs = %s" % (play,task,args,kwargs)
+ def v2_on_any(self, *args, **kwargs):
+ i = 0
+ self._display.display(" --- ARGS ")
+ for a in args:
+ self._display.display(' %s: %s' % (i, a))
+ i += 1
+
+ self._display.display(" --- KWARGS ")
+ for k in kwargs:
+ self._display.display(' %s: %s' % (k, kwargs[k]))
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
new file mode 100644
index 00000000000..92cf68a4e0c
--- /dev/null
+++ b/lib/ansible/plugins/callback/default.py
@@ -0,0 +1,155 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'default'
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color='red')
+
+ # finally, remove the exception from the result so it's not shown every time
+ del result._result['exception']
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
+
+ if result._task.ignore_errors:
+ self._display.display("...ignoring", color='cyan')
+
+ def v2_runner_on_ok(self, result):
+
+ if result._task.action == 'include':
+ msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
+ color = 'cyan'
+ elif result._result.get('changed', False):
+ msg = "changed: [%s]" % result._host.get_name()
+ color = 'yellow'
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = 'green'
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color)
+
+ self._handle_warnings(result._result)
+
+ def v2_runner_on_skipped(self, result):
+ msg = "skipping: [%s]" % result._host.get_name()
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color='cyan')
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display("skipping: no hosts matched", color='cyan')
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.banner("NO MORE HOSTS LEFT")
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._display.banner("TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if not name:
+ msg = "PLAY"
+ else:
+ msg = "PLAY [%s]" % name
+
+ self._display.banner(msg)
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result and result._result['diff'] != {}:
+ self._display.display(self._get_diff(result._result['diff']))
+
+ def v2_playbook_item_on_ok(self, result):
+
+ if result._task.action == 'include':
+ msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
+ color = 'cyan'
+ elif result._result.get('changed', False):
+ msg = "changed: [%s]" % result._host.get_name()
+ color = 'yellow'
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = 'green'
+
+ msg += " => (item=%s)" % result._result['item']
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color)
+
+ def v2_playbook_item_on_failed(self, result):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color='red')
+
+ # finally, remove the exception from the result so it's not shown every time
+ del result._result['exception']
+
+ self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
+ self._handle_warnings(result._result)
+
+ def v2_playbook_item_on_skipped(self, result):
+ msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item'])
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color='cyan')
+
diff --git a/plugins/callbacks/hipchat.py b/lib/ansible/plugins/callback/hipchat.py
similarity index 77%
rename from plugins/callbacks/hipchat.py
rename to lib/ansible/plugins/callback/hipchat.py
index 45c2e2c8197..b0d1bfb67e6 100644
--- a/plugins/callbacks/hipchat.py
+++ b/lib/ansible/plugins/callback/hipchat.py
@@ -19,16 +19,15 @@ import os
import urllib
import urllib2
-from ansible import utils
-
try:
import prettytable
HAS_PRETTYTABLE = True
except ImportError:
HAS_PRETTYTABLE = False
+from ansible.plugins.callback import CallbackBase
-class CallbackModule(object):
+class CallbackModule(CallbackBase):
"""This is an example ansible callback plugin that sends status
updates to a HipChat channel during playbook execution.
@@ -42,11 +41,17 @@ class CallbackModule(object):
prettytable
"""
+ CALLBACK_VERSION = 2.0
+ CALLBACK_VERSION = 2.0
+ CALLBACK_NAME = 'hipchat'
+
+ def __init__(self, display):
+
+ super(CallbackModule, self).__init__(display)
- def __init__(self):
if not HAS_PRETTYTABLE:
self.disabled = True
- utils.warning('The `prettytable` python module is not installed. '
+ self.display.warning('The `prettytable` python module is not installed. '
'Disabling the HipChat callback plugin.')
self.msg_uri = 'https://api.hipchat.com/v1/rooms/message'
@@ -57,7 +62,7 @@ class CallbackModule(object):
if self.token is None:
self.disabled = True
- utils.warning('HipChat token could not be loaded. The HipChat '
+ self.display.warning('HipChat token could not be loaded. The HipChat '
'token can be provided using the `HIPCHAT_TOKEN` '
'environment variable.')
@@ -80,63 +85,8 @@ class CallbackModule(object):
response = urllib2.urlopen(url, urllib.urlencode(params))
return response.read()
except:
- utils.warning('Could not submit message to hipchat')
-
- def on_any(self, *args, **kwargs):
- pass
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- pass
-
- def runner_on_ok(self, host, res):
- pass
-
- def runner_on_skipped(self, host, item=None):
- pass
-
- def runner_on_unreachable(self, host, res):
- pass
-
- def runner_on_no_hosts(self):
- pass
-
- def runner_on_async_poll(self, host, res, jid, clock):
- pass
-
- def runner_on_async_ok(self, host, res, jid):
- pass
-
- def runner_on_async_failed(self, host, res, jid):
- pass
-
- def playbook_on_start(self):
- pass
-
- def playbook_on_notify(self, host, handler):
- pass
-
- def playbook_on_no_hosts_matched(self):
- pass
-
- def playbook_on_no_hosts_remaining(self):
- pass
-
- def playbook_on_task_start(self, name, is_conditional):
- pass
-
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
- encrypt=None, confirm=False, salt_size=None,
- salt=None, default=None):
- pass
-
- def playbook_on_setup(self):
- pass
-
- def playbook_on_import_for_host(self, host, imported_file):
- pass
+ self.display.warning('Could not submit message to hipchat')
- def playbook_on_not_import_for_host(self, host, missing_file):
- pass
def playbook_on_play_start(self, name):
"""Display Playbook and play start messages"""
diff --git a/lib/ansible/plugins/callback/log_plays.py b/lib/ansible/plugins/callback/log_plays.py
new file mode 100644
index 00000000000..11e0accea5f
--- /dev/null
+++ b/lib/ansible/plugins/callback/log_plays.py
@@ -0,0 +1,86 @@
+# (C) 2012, Michael DeHaan,
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import os
+import time
+import json
+
+from ansible.plugins.callback import CallbackBase
+
+# NOTE: in Ansible 1.2 or later general logging is available without
+# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
+# or log_path in the DEFAULTS section of your ansible configuration
+# file. This callback is an example of per hosts logging for those
+# that want it.
+
+
+class CallbackModule(CallbackBase):
+ """
+ logs playbook results, per host, in /var/log/ansible/hosts
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'log_plays'
+
+ TIME_FORMAT="%b %d %Y %H:%M:%S"
+ MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n"
+
+ def __init__(self, display):
+
+ super(CallbackModule, self).__init__(display)
+
+ if not os.path.exists("/var/log/ansible/hosts"):
+ os.makedirs("/var/log/ansible/hosts")
+
+ def log(self, host, category, data):
+ if type(data) == dict:
+ if '_ansible_verbose_override' in data:
+ # avoid logging extraneous data
+ data = 'omitted'
+ else:
+ data = data.copy()
+ invocation = data.pop('invocation', None)
+ data = json.dumps(data)
+ if invocation is not None:
+ data = json.dumps(invocation) + " => %s " % data
+
+ path = os.path.join("/var/log/ansible/hosts", host)
+ now = time.strftime(self.TIME_FORMAT, time.localtime())
+ fd = open(path, "a")
+ fd.write(self.MSG_FORMAT % dict(now=now, category=category, data=data))
+ fd.close()
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.log(host, 'FAILED', res)
+
+ def runner_on_ok(self, host, res):
+ self.log(host, 'OK', res)
+
+ def runner_on_skipped(self, host, item=None):
+ self.log(host, 'SKIPPED', '...')
+
+ def runner_on_unreachable(self, host, res):
+ self.log(host, 'UNREACHABLE', res)
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.log(host, 'ASYNC_FAILED', res)
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ self.log(host, 'IMPORTED', imported_file)
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ self.log(host, 'NOTIMPORTED', missing_file)
diff --git a/plugins/callbacks/mail.py b/lib/ansible/plugins/callback/mail.py
similarity index 61%
rename from plugins/callbacks/mail.py
rename to lib/ansible/plugins/callback/mail.py
index e21961079cd..3357e014093 100644
--- a/plugins/callbacks/mail.py
+++ b/lib/ansible/plugins/callback/mail.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers
#
# This file is part of Ansible
@@ -15,13 +16,24 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+import os
import smtplib
+import json
+from ansible.plugins.callback import CallbackBase
-def mail(subject='Ansible error mail', sender='', to='root', cc=None, bcc=None, body=None):
- if not body:
+def mail(subject='Ansible error mail', sender=None, to=None, cc=None, bcc=None, body=None, smtphost=None):
+
+ if sender is None:
+ sender=''
+ if to is None:
+ to='root'
+ if smtphost is None:
+ smtphost=os.getenv('SMTPHOST', 'localhost')
+
+ if body is None:
body = subject
- smtp = smtplib.SMTP('localhost')
+ smtp = smtplib.SMTP(smtphost)
content = 'From: %s\n' % sender
content += 'To: %s\n' % to
@@ -42,31 +54,45 @@ def mail(subject='Ansible error mail', sender='', to='root', cc=None, bcc=
smtp.quit()
-class CallbackModule(object):
-
+class CallbackModule(CallbackBase):
"""
This Ansible callback plugin mails errors to interested parties.
"""
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'mail'
+
+ def v2_runner_on_failed(self, res, ignore_errors=False):
+
+ host = res._host.get_name()
- def runner_on_failed(self, host, res, ignore_errors=False):
if ignore_errors:
return
sender = '"Ansible: %s" ' % host
- subject = 'Failed: %(module_name)s %(module_args)s' % res['invocation']
- body = 'The following task failed for host ' + host + ':\n\n%(module_name)s %(module_args)s\n\n' % res['invocation']
- if 'stdout' in res.keys() and res['stdout']:
- subject = res['stdout'].strip('\r\n').split('\n')[-1]
- body += 'with the following output in standard output:\n\n' + res['stdout'] + '\n\n'
- if 'stderr' in res.keys() and res['stderr']:
+ attach = res._task.action
+ if 'invocation' in res._result:
+ attach = "%s: %s" % (res._result['invocation']['module_name'], json.dumps(res._result['invocation']['module_args']))
+
+ subject = 'Failed: %s' % attach
+ body = 'The following task failed for host ' + host + ':\n\n%s\n\n' % attach
+
+ if 'stdout' in res._result.keys() and res._result['stdout']:
+ subject = res._result['stdout'].strip('\r\n').split('\n')[-1]
+ body += 'with the following output in standard output:\n\n' + res._result['stdout'] + '\n\n'
+ if 'stderr' in res._result.keys() and res._result['stderr']:
subject = res['stderr'].strip('\r\n').split('\n')[-1]
- body += 'with the following output in standard error:\n\n' + res['stderr'] + '\n\n'
- if 'msg' in res.keys() and res['msg']:
- subject = res['msg'].strip('\r\n').split('\n')[0]
- body += 'with the following message:\n\n' + res['msg'] + '\n\n'
- body += 'A complete dump of the error:\n\n' + str(res)
+ body += 'with the following output in standard error:\n\n' + res._result['stderr'] + '\n\n'
+ if 'msg' in res._result.keys() and res._result['msg']:
+ subject = res._result['msg'].strip('\r\n').split('\n')[0]
+ body += 'with the following message:\n\n' + res._result['msg'] + '\n\n'
+ body += 'A complete dump of the error:\n\n' + self._dump_results(res._result)
mail(sender=sender, subject=subject, body=body)
-
- def runner_on_unreachable(self, host, res):
+
+ def v2_runner_on_unreachable(self, result):
+
+ host = result._host.get_name()
+ res = result._result
+
sender = '"Ansible: %s" ' % host
if isinstance(res, basestring):
subject = 'Unreachable: %s' % res.strip('\r\n').split('\n')[-1]
@@ -77,7 +103,11 @@ class CallbackModule(object):
res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res)
mail(sender=sender, subject=subject, body=body)
- def runner_on_async_failed(self, host, res, jid):
+ def v2_runner_on_async_failed(self, result):
+
+ host = result._host.get_name()
+ res = result._result
+
sender = '"Ansible: %s" ' % host
if isinstance(res, basestring):
subject = 'Async failure: %s' % res.strip('\r\n').split('\n')[-1]
diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
new file mode 100644
index 00000000000..d03a023605e
--- /dev/null
+++ b/lib/ansible/plugins/callback/minimal.py
@@ -0,0 +1,81 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'minimal'
+
+ def _command_generic_msg(self, host, result, caption):
+ ''' output the result of a command run '''
+
+ buf = "%s | %s | rc=%s >>\n" % (host, caption, result.get('rc',0))
+ buf += result.get('stdout','')
+ buf += result.get('stderr','')
+ buf += result.get('msg','')
+
+ return buf + "\n"
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color='red')
+
+ # finally, remove the exception from the result so it's not shown every time
+ del result._result['exception']
+
+ if result._task.action in C.MODULE_NO_JSON:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result,"FAILED"), color='red')
+ else:
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red')
+
+ def v2_runner_on_ok(self, result):
+ if result._task.action in C.MODULE_NO_JSON:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result,"SUCCESS"), color='green')
+ else:
+ self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green')
+ self._handle_warnings(result._result)
+
+ def v2_runner_on_skipped(self, result):
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result and result._result['diff'] != {}:
+ self._display.display(self._get_diff(result._result['diff']))
diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py
new file mode 100644
index 00000000000..a99b680c05c
--- /dev/null
+++ b/lib/ansible/plugins/callback/oneline.py
@@ -0,0 +1,75 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'oneline'
+
+ def _command_generic_msg(self, hostname, result, caption):
+ stdout = result.get('stdout','').replace('\n', '\\n')
+ if 'stderr' in result and result['stderr']:
+ stderr = result.get('stderr','').replace('\n', '\\n')
+ return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc',0), stdout, stderr)
+ else:
+ return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc',0), stdout)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
+
+ if result._task.action in C.MODULE_NO_JSON:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red')
+ else:
+ self._display.display(msg, color='red')
+
+ # finally, remove the exception from the result so it's not shown every time
+ del result._result['exception']
+
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red')
+
+ def v2_runner_on_ok(self, result):
+ if result._task.action in C.MODULE_NO_JSON:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green')
+ else:
+ self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green')
+
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
+
+ def v2_runner_on_skipped(self, result):
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
diff --git a/plugins/callbacks/osx_say.py b/lib/ansible/plugins/callback/osx_say.py
similarity index 54%
rename from plugins/callbacks/osx_say.py
rename to lib/ansible/plugins/callback/osx_say.py
index 174a03300f1..36b053026e2 100644
--- a/plugins/callbacks/osx_say.py
+++ b/lib/ansible/plugins/callback/osx_say.py
@@ -19,87 +19,70 @@
import subprocess
import os
+from ansible.plugins.callback import CallbackBase
+
FAILED_VOICE="Zarvox"
REGULAR_VOICE="Trinoids"
HAPPY_VOICE="Cellos"
LASER_VOICE="Princess"
SAY_CMD="/usr/bin/say"
-def say(msg, voice):
- subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
-
-class CallbackModule(object):
+class CallbackModule(CallbackBase):
"""
makes Ansible much more exciting on OS X.
"""
- def __init__(self):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'osx_say'
+
+ def __init__(self, display):
+
+ super(CallbackModule, self).__init__(display)
+
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
if not os.path.exists(SAY_CMD):
self.disabled = True
- print "%s does not exist, plugin %s disabled" % \
- (SAY_CMD, os.path.basename(__file__))
+ self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__)) )
- def on_any(self, *args, **kwargs):
- pass
+ def say(self, msg, voice):
+ subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)])
def runner_on_failed(self, host, res, ignore_errors=False):
- say("Failure on host %s" % host, FAILED_VOICE)
+ self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_ok(self, host, res):
- say("pew", LASER_VOICE)
+ self.say("pew", LASER_VOICE)
def runner_on_skipped(self, host, item=None):
- say("pew", LASER_VOICE)
+ self.say("pew", LASER_VOICE)
def runner_on_unreachable(self, host, res):
- say("Failure on host %s" % host, FAILED_VOICE)
-
- def runner_on_no_hosts(self):
- pass
-
- def runner_on_async_poll(self, host, res, jid, clock):
- pass
+ self.say("Failure on host %s" % host, FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
- say("pew", LASER_VOICE)
+ self.say("pew", LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
- say("Failure on host %s" % host, FAILED_VOICE)
+ self.say("Failure on host %s" % host, FAILED_VOICE)
def playbook_on_start(self):
- say("Running Playbook", REGULAR_VOICE)
+ self.say("Running Playbook", REGULAR_VOICE)
def playbook_on_notify(self, host, handler):
- say("pew", LASER_VOICE)
-
- def playbook_on_no_hosts_matched(self):
- pass
-
- def playbook_on_no_hosts_remaining(self):
- pass
+ self.say("pew", LASER_VOICE)
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
- say("Starting task: %s" % name, REGULAR_VOICE)
+ self.say("Starting task: %s" % name, REGULAR_VOICE)
else:
- say("Notifying task: %s" % name, REGULAR_VOICE)
-
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
+ self.say("Notifying task: %s" % name, REGULAR_VOICE)
def playbook_on_setup(self):
- say("Gathering facts", REGULAR_VOICE)
-
- def playbook_on_import_for_host(self, host, imported_file):
- pass
-
- def playbook_on_not_import_for_host(self, host, missing_file):
- pass
+ self.say("Gathering facts", REGULAR_VOICE)
def playbook_on_play_start(self, name):
- say("Starting play: %s" % name, HAPPY_VOICE)
+ self.say("Starting play: %s" % name, HAPPY_VOICE)
def playbook_on_stats(self, stats):
- say("Play complete", HAPPY_VOICE)
-
+ self.say("Play complete", HAPPY_VOICE)
diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py
new file mode 100644
index 00000000000..f873b75ead0
--- /dev/null
+++ b/lib/ansible/plugins/callback/profile_tasks.py
@@ -0,0 +1,113 @@
+# (C) 2015, Tom Paine,
+# (C) 2014, Jharrod LaFon, @JharrodLaFon
+# (C) 2012-2013, Michael DeHaan,
+#
+# This file is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# File is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# See for a copy of the
+# GNU General Public License
+
+# Provides per-task timing, ongoing playbook elapsed time and
+# ordered list of top 20 longest running tasks at end
+
+import time
+
+from ansible.plugins.callback import CallbackBase
+
+# define start time
+t0 = tn = time.time()
+
+def secondsToStr(t):
+ # http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
+ rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:]
+ return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
+
+
+def filled(msg, fchar="*"):
+ if len(msg) == 0:
+ width = 79
+ else:
+ msg = "%s " % msg
+ width = 79 - len(msg)
+ if width < 3:
+ width = 3
+ filler = fchar * width
+ return "%s%s " % (msg, filler)
+
+
+def timestamp(self):
+ if self.current is not None:
+ self.stats[self.current] = time.time() - self.stats[self.current]
+
+
+def tasktime():
+ global tn
+ time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
+ time_elapsed = secondsToStr(time.time() - tn)
+ time_total_elapsed = secondsToStr(time.time() - t0)
+ display(filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed)))
+ tn = time.time()
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback module provides per-task timing, ongoing playbook elapsed time
+ and ordered list of top 20 longest running tasks at end.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'profile_tasks'
+
+ def __init__(self, display):
+ self.stats = {}
+ self.current = None
+
+ super(CallbackModule, self).__init__(display)
+
+
+ def playbook_on_task_start(self, name, is_conditional):
+ """
+ Logs the start of each task
+ """
+ tasktime()
+ timestamp(self)
+
+ # Record the start time of the current task
+ self.current = name
+ self.stats[self.current] = time.time()
+
+ def playbook_on_setup(self):
+ tasktime()
+
+ def playbook_on_stats(self, stats):
+ tasktime()
+ display(filled("", fchar="="))
+
+ timestamp(self)
+
+ # Sort the tasks by their running time
+ results = sorted(
+ self.stats.items(),
+ key=lambda value: value[1],
+ reverse=True,
+ )
+
+ # Just keep the top 20
+ results = results[:20]
+
+ # Print the timings
+ for name, elapsed in results:
+ self.display.display(
+ "{0:-<70}{1:->9}".format(
+ '{0} '.format(name),
+ ' {0:.02f}s'.format(elapsed),
+ )
+ )
diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py
new file mode 100644
index 00000000000..4307298b495
--- /dev/null
+++ b/lib/ansible/plugins/callback/skippy.py
@@ -0,0 +1,149 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'skippy'
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color='red')
+
+ # finally, remove the exception from the result so it's not shown every time
+ del result._result['exception']
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
+
+ if result._task.ignore_errors:
+ self._display.display("...ignoring", color='cyan')
+
+ def v2_runner_on_ok(self, result):
+
+ if result._task.action == 'include':
+ msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
+ color = 'cyan'
+ elif result._result.get('changed', False):
+ msg = "changed: [%s]" % result._host.get_name()
+ color = 'yellow'
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = 'green'
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color)
+
+ self._handle_warnings(result._result)
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display("skipping: no hosts matched", color='cyan')
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.banner("NO MORE HOSTS LEFT")
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._display.banner("TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if not name:
+ msg = "PLAY"
+ else:
+ msg = "PLAY [%s]" % name
+
+ self._display.banner(msg)
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result and result._result['diff'] != {}:
+ self._display.display(self._get_diff(result._result['diff']))
+
+ def v2_playbook_item_on_ok(self, result):
+
+ if result._task.action == 'include':
+ msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
+ color = 'cyan'
+ elif result._result.get('changed', False):
+ msg = "changed: [%s]" % result._host.get_name()
+ color = 'yellow'
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = 'green'
+
+ msg += " => (item=%s)" % result._result['item']
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color)
+
+ def v2_playbook_item_on_failed(self, result):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color='red')
+
+ # finally, remove the exception from the result so it's not shown every time
+ del result._result['exception']
+
+ self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
+ self._handle_warnings(result._result)
+
+ def v2_playbook_item_on_skipped(self, result):
+ msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item'])
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color='cyan')
+
diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py
new file mode 100644
index 00000000000..991a94dd31b
--- /dev/null
+++ b/lib/ansible/plugins/callback/syslog_json.py
@@ -0,0 +1,61 @@
+import os
+import json
+
+import logging
+import logging.handlers
+
+import socket
+
+from ansible.plugins.callback import CallbackBase
+
+class CallbackModule(CallbackBase):
+ """
+ logs ansible-playbook and ansible runs to a syslog server in json format
+ make sure you have in ansible.cfg:
+ callback_plugins =
+ and put the plugin in
+
+ This plugin makes use of the following environment variables:
+ SYSLOG_SERVER (optional): defaults to localhost
+ SYSLOG_PORT (optional): defaults to 514
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'syslog_json'
+
+ def __init__(self, display):
+
+ super(CallbackModule, self).__init__(display)
+
+ self.logger = logging.getLogger('ansible logger')
+ self.logger.setLevel(logging.DEBUG)
+
+ self.handler = logging.handlers.SysLogHandler(
+ address = (os.getenv('SYSLOG_SERVER','localhost'),
+ os.getenv('SYSLOG_PORT',514)),
+ facility=logging.handlers.SysLogHandler.LOG_USER
+ )
+ self.logger.addHandler(self.handler)
+ self.hostname = socket.gethostname()
+
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
+
+ def runner_on_ok(self, host, res):
+ self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
+
+ def runner_on_skipped(self, host, item=None):
+ self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped'))
+
+ def runner_on_unreachable(self, host, res):
+ self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
+
+ def runner_on_async_failed(self, host, res):
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))
diff --git a/lib/ansible/plugins/callback/timer.py b/lib/ansible/plugins/callback/timer.py
new file mode 100644
index 00000000000..f75b55e4be6
--- /dev/null
+++ b/lib/ansible/plugins/callback/timer.py
@@ -0,0 +1,33 @@
+import os
+import datetime
+from datetime import datetime, timedelta
+
+from ansible.plugins.callback import CallbackBase
+
+class CallbackModule(CallbackBase):
+ """
+ This callback module tells you how long your plays ran for.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'timer'
+
+ def __init__(self, display):
+
+ super(CallbackModule, self).__init__(display)
+
+ self.start_time = datetime.now()
+
+ def days_hours_minutes_seconds(self, timedelta):
+ minutes = (timedelta.seconds//60)%60
+ r_seconds = timedelta.seconds - (minutes * 60)
+ return timedelta.days, timedelta.seconds//3600, minutes, r_seconds
+
+ def playbook_on_stats(self, stats):
+ self.v2_playbook_on_stats(stats)
+
+ def v2_playbook_on_stats(self, stats):
+ end_time = datetime.now()
+ timedelta = end_time - self.start_time
+ self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(timedelta)))
+
diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py
new file mode 100644
index 00000000000..1ad28763817
--- /dev/null
+++ b/lib/ansible/plugins/connections/__init__.py
@@ -0,0 +1,157 @@
+# (c) 2012-2014, Michael DeHaan
+# (c) 2015 Toshio Kuratomi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fcntl
+import gettext
+import select
+import os
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+from functools import wraps
+from six import with_metaclass
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.plugins import shell_loader
+
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
+__all__ = ['ConnectionBase', 'ensure_connect']
+
+
+def ensure_connect(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ self._connect()
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+class ConnectionBase(with_metaclass(ABCMeta, object)):
+ '''
+ A base class for connections to contain common code.
+ '''
+
+ has_pipelining = False
+ become_methods = C.BECOME_METHODS
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ # All these hasattrs allow subclasses to override these parameters
+ if not hasattr(self, '_play_context'):
+ self._play_context = play_context
+ if not hasattr(self, '_new_stdin'):
+ self._new_stdin = new_stdin
+ if not hasattr(self, '_display'):
+ self._display = display
+ if not hasattr(self, '_connected'):
+ self._connected = False
+
+ self.success_key = None
+ self.prompt = None
+
+ # load the shell plugin for this action/connection
+ if play_context.shell:
+ shell_type = play_context.shell
+ elif hasattr(self, '_shell_type'):
+ shell_type = getattr(self, '_shell_type')
+ else:
+ shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
+
+ self._shell = shell_loader.get(shell_type)
+ if not self._shell:
+ raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
+
+ def _become_method_supported(self):
+ ''' Checks if the current class supports this privilege escalation method '''
+
+ if self._play_context.become_method in self.become_methods:
+ return True
+
+ raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method)
+
+ def set_host_overrides(self, host):
+ '''
+ An optional method, which can be used to set connection plugin parameters
+ from variables set on the host (or groups to which the host belongs)
+
+ Any connection plugin using this should first initialize its attributes in
+ an overridden `def __init__(self):`, and then use `host.get_vars()` to find
+ variables which may be used to set those attributes in this method.
+ '''
+ pass
+
+ @abstractproperty
+ def transport(self):
+ """String used to identify this Connection class from other classes"""
+ pass
+
+ @abstractmethod
+ def _connect(self):
+ """Connect to the host we've been initialized with"""
+
+ # Check if PE is supported
+ if self._play_context.become:
+ self.__become_method_supported()
+
+ @ensure_connect
+ @abstractmethod
+ def exec_command(self, cmd, tmp_path, in_data=None, executable=None, sudoable=True):
+ """Run a command on the remote host"""
+ pass
+
+ @ensure_connect
+ @abstractmethod
+ def put_file(self, in_path, out_path):
+ """Transfer a file from local to remote"""
+ pass
+
+ @ensure_connect
+ @abstractmethod
+ def fetch_file(self, in_path, out_path):
+ """Fetch a file from remote to local"""
+ pass
+
+ @abstractmethod
+ def close(self):
+ """Terminate the connection"""
+ pass
+
+ def check_become_success(self, output):
+ return self._play_context.success_key in output
+
+ def check_password_prompt(self, output):
+ if self._play_context.prompt is None:
+ return False
+ elif isinstance(self._play_context.prompt, basestring):
+ return output.endswith(self._play_context.prompt)
+ else:
+ return self._play_context.prompt(output)
+
+ def check_incorrect_password(self, output):
+ incorrect_password = gettext.dgettext(self._play_context.become_method, C.BECOME_ERROR_STRINGS[self._play_context.become_method])
+ if incorrect_password in output:
+ raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
+
diff --git a/v2/ansible/plugins/connections/accelerate.py b/lib/ansible/plugins/connections/accelerate.py
similarity index 100%
rename from v2/ansible/plugins/connections/accelerate.py
rename to lib/ansible/plugins/connections/accelerate.py
diff --git a/v2/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connections/chroot.py
similarity index 56%
rename from v2/ansible/plugins/connections/chroot.py
rename to lib/ansible/plugins/connections/chroot.py
index 3ecc0f70301..cc5cee7803d 100644
--- a/v2/ansible/plugins/connections/chroot.py
+++ b/lib/ansible/plugins/connections/chroot.py
@@ -1,5 +1,6 @@
# Based on local.py (c) 2012, Michael DeHaan
# (c) 2013, Maykel Moya
+# (c) 2015, Toshio Kuratomi
#
# This file is part of Ansible
#
@@ -21,13 +22,16 @@ __metaclass__ = type
import distutils.spawn
import traceback
import os
-import shutil
+import shlex
import subprocess
from ansible import errors
from ansible import utils
+from ansible.utils.unicode import to_bytes
from ansible.callbacks import vvv
import ansible.constants as C
+BUFSIZE = 65536
+
class Connection(object):
''' Local chroot based connections '''
@@ -64,8 +68,25 @@ class Connection(object):
return self
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
+ def _generate_cmd(self, executable, cmd):
+ if executable:
+ local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
+ else:
+ # Prev to python2.7.3, shlex couldn't handle unicode type strings
+ cmd = to_bytes(cmd)
+ cmd = shlex.split(cmd)
+ local_cmd = [self.chroot_cmd, self.chroot]
+ local_cmd += cmd
+ return local_cmd
+
+ def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
+ ''' run a command on the chroot. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
@@ -73,61 +94,68 @@ class Connection(object):
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # We enter chroot as root so we ignore privlege escalation?
-
- if executable:
- local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
- else:
- local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd)
+ # We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
+ local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.chroot)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
+ p = subprocess.Popen(local_cmd, shell=False,
cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
+ stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ return p
+
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
+ ''' run a command on the chroot '''
+
+ p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
+
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to chroot '''
- if not out_path.startswith(os.path.sep):
- out_path = os.path.join(os.path.sep, out_path)
- normpath = os.path.normpath(out_path)
- out_path = os.path.join(self.chroot, normpath[1:])
-
vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+
try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
+ with open(in_path, 'rb') as in_file:
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
+ except OSError:
+ raise errors.AnsibleError("chroot connection requires dd command in the chroot")
+ try:
+ stdout, stderr = p.communicate()
+ except:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
+ raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from chroot to local '''
- if not in_path.startswith(os.path.sep):
- in_path = os.path.join(os.path.sep, in_path)
- normpath = os.path.normpath(in_path)
- in_path = os.path.join(self.chroot, normpath[1:])
-
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+
try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
+ except OSError:
+ raise errors.AnsibleError("chroot connection requires dd command in the chroot")
+
+ with open(out_path, 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
diff --git a/v2/ansible/plugins/connections/funcd.py b/lib/ansible/plugins/connections/funcd.py
similarity index 100%
rename from v2/ansible/plugins/connections/funcd.py
rename to lib/ansible/plugins/connections/funcd.py
diff --git a/v2/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py
similarity index 57%
rename from v2/ansible/plugins/connections/jail.py
rename to lib/ansible/plugins/connections/jail.py
index f7623b39382..d12318391ce 100644
--- a/v2/ansible/plugins/connections/jail.py
+++ b/lib/ansible/plugins/connections/jail.py
@@ -1,6 +1,7 @@
# Based on local.py (c) 2012, Michael DeHaan
# and chroot.py (c) 2013, Maykel Moya
# (c) 2013, Michael Scherer
+# (c) 2015, Toshio Kuratomi
#
# This file is part of Ansible
#
@@ -22,14 +23,17 @@ __metaclass__ = type
import distutils.spawn
import traceback
import os
-import shutil
+import shlex
import subprocess
from ansible import errors
+from ansible.utils.unicode import to_bytes
from ansible.callbacks import vvv
import ansible.constants as C
+BUFSIZE = 65536
+
class Connection(object):
- ''' Local chroot based connections '''
+ ''' Local BSD Jail based connections '''
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
@@ -57,8 +61,6 @@ class Connection(object):
# remove \n
return stdout[:-1]
-
-
def __init__(self, runner, host, port, *args, **kwargs):
self.jail = host
self.runner = runner
@@ -71,7 +73,7 @@ class Connection(object):
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
-
+
if not self.jail in self.list_jails():
raise errors.AnsibleError("incorrect jail name %s" % self.jail)
@@ -81,9 +83,9 @@ class Connection(object):
self.port = port
def connect(self, port=None):
- ''' connect to the chroot; nothing to do here '''
+ ''' connect to the jail; nothing to do here '''
- vvv("THIS IS A LOCAL CHROOT DIR", host=self.jail)
+ vvv("THIS IS A LOCAL JAIL DIR", host=self.jail)
return self
@@ -92,11 +94,21 @@ class Connection(object):
if executable:
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
else:
- local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd)
+ # Prev to python2.7.3, shlex couldn't handle unicode type strings
+ cmd = to_bytes(cmd)
+ cmd = shlex.split(cmd)
+ local_cmd = [self.jexec_cmd, self.jail]
+ local_cmd += cmd
return local_cmd
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
+ def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
+ ''' run a command on the jail. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
@@ -104,51 +116,68 @@ class Connection(object):
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # Ignores privilege escalation
+ # We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.jail)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
+ p = subprocess.Popen(local_cmd, shell=False,
cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
+ stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
+ return p
+
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
+ ''' run a command on the jail '''
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
+ p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
- def _copy_file(self, in_path, out_path):
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
+ stdout, stderr = p.communicate()
+ return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
- ''' transfer a file from local to chroot '''
+ ''' transfer a file from local to jail '''
- out_path = self._normalize_path(out_path, self.get_jail_path())
vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
- self._copy_file(in_path, out_path)
+ try:
+ with open(in_path, 'rb') as in_file:
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
+ except OSError:
+ raise errors.AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
- ''' fetch a file from chroot to local '''
+ ''' fetch a file from jail to local '''
- in_path = self._normalize_path(in_path, self.get_jail_path())
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
- self._copy_file(in_path, out_path)
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
+ except OSError:
+ raise errors.AnsibleError("jail connection requires dd command in the jail")
+
+ with open(out_path, 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
diff --git a/v2/ansible/plugins/connections/libvirt_lxc.py b/lib/ansible/plugins/connections/libvirt_lxc.py
similarity index 100%
rename from v2/ansible/plugins/connections/libvirt_lxc.py
rename to lib/ansible/plugins/connections/libvirt_lxc.py
diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py
new file mode 100644
index 00000000000..e4eddbd4cba
--- /dev/null
+++ b/lib/ansible/plugins/connections/local.py
@@ -0,0 +1,128 @@
+# (c) 2012, Michael DeHaan
+# (c) 2015 Toshio Kuratomi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+import os
+import shutil
+import subprocess
+import select
+import fcntl
+
+import ansible.constants as C
+
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.plugins.connections import ConnectionBase
+
+class Connection(ConnectionBase):
+ ''' Local based connections '''
+
+ @property
+ def transport(self):
+ ''' used to identify this connection object '''
+ return 'local'
+
+ def _connect(self, port=None):
+ ''' connect to the local host; nothing to do here '''
+
+ if not self._connected:
+ self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr))
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True):
+ ''' run a command on the local host '''
+
+ super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
+
+ self._display.debug("in local.exec_command()")
+
+ if in_data:
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
+ executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
+
+ self._display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
+ # FIXME: cwd= needs to be set to the basedir of the playbook
+ self._display.debug("opening command with Popen()")
+ p = subprocess.Popen(
+ cmd,
+ shell=isinstance(cmd, basestring),
+ executable=executable, #cwd=...
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ self._display.debug("done running command with Popen()")
+
+ if self._play_context.prompt and self._play_context.become_pass and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ become_output = ''
+ while not self.check_become_success(become_output) and not self.check_password_prompt(become_output):
+
+ rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._play_context.timeout)
+ if p.stdout in rfd:
+ chunk = p.stdout.read()
+ elif p.stderr in rfd:
+ chunk = p.stderr.read()
+ else:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + become_output)
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + become_output)
+ become_output += chunk
+ if not self.check_become_success(become_output):
+ p.stdin.write(self._play_context.become_pass + '\n')
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ self._display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate()
+ self._display.debug("done communicating")
+
+ self._display.debug("done with local.exec_command()")
+ return (p.returncode, '', stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to local '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv("{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
+ if not os.path.exists(in_path):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
+ try:
+ shutil.copyfile(in_path, out_path)
+ except shutil.Error:
+ raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path))
+ except IOError as e:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, e))
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from local to local -- for copatibility '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv("{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
+ self.put_file(in_path, out_path)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ self._connected = False
diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py
similarity index 78%
rename from v2/ansible/plugins/connections/paramiko_ssh.py
rename to lib/ansible/plugins/connections/paramiko_ssh.py
index 01e95451b80..ab3253d868b 100644
--- a/v2/ansible/plugins/connections/paramiko_ssh.py
+++ b/lib/ansible/plugins/connections/paramiko_ssh.py
@@ -42,6 +42,7 @@ from binascii import hexlify
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connections import ConnectionBase
+from ansible.utils.path import makedirs_safe
AUTHENTICITY_MSG="""
paramiko: The authenticity of host '%s' can't be established.
@@ -60,6 +61,7 @@ with warnings.catch_warnings():
except ImportError:
pass
+
class MyAddPolicy(object):
"""
Based on AutoAddPolicy in paramiko so we can determine when keys are added
@@ -126,7 +128,7 @@ class Connection(ConnectionBase):
return 'paramiko'
def _cache_key(self):
- return "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user)
+ return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
def _connect(self):
cache_key = self._cache_key()
@@ -142,36 +144,42 @@ class Connection(ConnectionBase):
if not HAVE_PARAMIKO:
raise AnsibleError("paramiko is not installed")
- port = self._connection_info.port or 22
- self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._connection_info.remote_user, port, self._connection_info.remote_addr), host=self._connection_info.remote_addr)
+ port = self._play_context.port or 22
+ self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr)
ssh = paramiko.SSHClient()
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
if C.HOST_KEY_CHECKING:
+ try:
+ #TODO: check if we need to look at several possible locations, possible for loop
+ ssh.load_system_host_keys("/etc/ssh/ssh_known_hosts")
+ except IOError:
+ pass # file was not found, but not required to function
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin))
allow_agent = True
- if self._connection_info.password is not None:
+ if self._play_context.password is not None:
allow_agent = False
try:
key_filename = None
- if self._connection_info.private_key_file:
- key_filename = os.path.expanduser(self._connection_info.private_key_file)
+ if self._play_context.private_key_file:
+ key_filename = os.path.expanduser(self._play_context.private_key_file)
ssh.connect(
- self._connection_info.remote_addr,
- username=self._connection_info.remote_user,
+ self._play_context.remote_addr,
+ username=self._play_context.remote_user,
allow_agent=allow_agent,
look_for_keys=True,
key_filename=key_filename,
- password=self._connection_info.password,
- timeout=self._connection_info.timeout,
+ password=self._play_context.password,
+ timeout=self._play_context.timeout,
+ compress=True,
port=port,
)
except Exception as e:
@@ -180,16 +188,18 @@ class Connection(ConnectionBase):
raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u .' % (
- self._connection_info.remote_user, self._connection_info.remote_addr, port, msg)
+ self._play_context.remote_user, self._play_context.remote_addr, port, msg)
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
return ssh
- def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
+ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True):
''' run a command on the remote host '''
+ super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
+
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
@@ -210,7 +220,7 @@ class Connection(ConnectionBase):
if C.PARAMIKO_PTY:
chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
- self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr)
+ self._display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
no_prompt_out = ''
no_prompt_err = ''
@@ -218,27 +228,28 @@ class Connection(ConnectionBase):
try:
chan.exec_command(cmd)
- if self._connection_info.become_pass:
- while True:
- if success_key in become_output or \
- (prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output):
- break
- chunk = chan.recv(bufsize)
- if not chunk:
- if 'unknown user' in become_output:
- raise AnsibleError(
- 'user %s does not exist' % become_user)
- else:
- raise AnsibleError('ssh connection ' +
- 'closed waiting for password prompt')
- become_output += chunk
- if success_key not in become_output:
- if self._connection_info.become:
- chan.sendall(self._connection_info.become_pass + '\n')
- else:
- no_prompt_out += become_output
- no_prompt_err += become_output
+ if self._play_context.prompt:
+ if self._play_context.become and self._play_context.become_pass:
+ while True:
+ self._display.debug('Waiting for Privilege Escalation input')
+ if self.check_become_success(become_output) or self.check_password_prompt(become_output):
+ break
+ chunk = chan.recv(bufsize)
+ print("chunk is: %s" % chunk)
+ if not chunk:
+ if 'unknown user' in become_output:
+ raise AnsibleError(
+ 'user %s does not exist' % become_user)
+ else:
+ raise AnsibleError('ssh connection ' +
+ 'closed waiting for password prompt')
+ become_output += chunk
+ if not self.check_become_success(become_output):
+ if self._play_context.become:
+ chan.sendall(self._play_context.become_pass + '\n')
+ else:
+ no_prompt_out += become_output
+ no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
@@ -250,7 +261,9 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
- self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
@@ -267,17 +280,19 @@ class Connection(ConnectionBase):
def _connect_sftp(self):
- cache_key = "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user)
+ cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
- result = SFTP_CONNECTION_CACHE[cache_key] = self.connect().ssh.open_sftp()
+ result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
return result
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
- self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
try:
self.sftp = self._connect_sftp()
@@ -309,8 +324,7 @@ class Connection(ConnectionBase):
return False
path = os.path.expanduser("~/.ssh")
- if not os.path.exists(path):
- os.makedirs(path)
+ makedirs_safe(path)
f = open(filename, 'w')
@@ -347,8 +361,7 @@ class Connection(ConnectionBase):
# add any new SSH host keys -- warning -- this could be slow
lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
+ makedirs_safe(dirname)
KEY_LOCK = open(lockfile, 'w')
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
@@ -370,7 +383,7 @@ class Connection(ConnectionBase):
# the file will be moved into place rather than cleaned up.
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
- os.chmod(tmp_keyfile.name, key_stat.st_mode & 07777)
+ os.chmod(tmp_keyfile.name, key_stat.st_mode & 0o7777)
os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid)
self._save_ssh_host_keys(tmp_keyfile.name)
diff --git a/v2/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py
similarity index 58%
rename from v2/ansible/plugins/connections/ssh.py
rename to lib/ansible/plugins/connections/ssh.py
index 49e1e3b9660..5231c8ae8c0 100644
--- a/v2/ansible/plugins/connections/ssh.py
+++ b/lib/ansible/plugins/connections/ssh.py
@@ -18,18 +18,20 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import gettext
+import fcntl
+import hmac
import os
-import re
-import subprocess
-import shlex
import pipes
+import pty
+import pwd
import random
+import re
import select
-import fcntl
-import hmac
-import pwd
-import gettext
-import pty
+import shlex
+import subprocess
+import time
+
from hashlib import sha1
from ansible import constants as C
@@ -39,10 +41,13 @@ from ansible.plugins.connections import ConnectionBase
class Connection(ConnectionBase):
''' ssh based connections '''
+ has_pipelining = True
+ become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
+
def __init__(self, *args, **kwargs):
# SSH connection specific init stuff
+ self._common_args = []
self.HASHED_KEY_MAGIC = "|1|"
- self._has_pipelining = True
# FIXME: move the lockfile locations to ActionBase?
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
@@ -52,6 +57,8 @@ class Connection(ConnectionBase):
super(Connection, self).__init__(*args, **kwargs)
+ self.host = self._play_context.remote_addr
+
@property
def transport(self):
''' used to identify this connection object from other classes '''
@@ -60,12 +67,11 @@ class Connection(ConnectionBase):
def _connect(self):
''' connect to the remote host '''
- self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._connection_info.remote_user), host=self._connection_info.remote_addr)
+ self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
if self._connected:
return self
- self._common_args = []
extra_args = C.ANSIBLE_SSH_ARGS
if extra_args is not None:
# make sure there is no empty string added as this can produce weird errors
@@ -93,25 +99,20 @@ class Connection(ConnectionBase):
if not C.HOST_KEY_CHECKING:
self._common_args += ("-o", "StrictHostKeyChecking=no")
- if self._connection_info.port is not None:
- self._common_args += ("-o", "Port={0}".format(self._connection_info.port))
- # FIXME: need to get this from connection info
- #if self.private_key_file is not None:
- # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.private_key_file)))
- #elif self.runner.private_key_file is not None:
- # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.runner.private_key_file)))
- if self._connection_info.password:
+ if self._play_context.port is not None:
+ self._common_args += ("-o", "Port={0}".format(self._play_context.port))
+ if self._play_context.private_key_file is not None:
+ self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self._play_context.private_key_file)))
+ if self._play_context.password:
self._common_args += ("-o", "GSSAPIAuthentication=no",
"-o", "PubkeyAuthentication=no")
else:
self._common_args += ("-o", "KbdInteractiveAuthentication=no",
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o", "PasswordAuthentication=no")
- if self._connection_info.remote_user is not None and self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]:
- self._common_args += ("-o", "User={0}".format(self._connection_info.remote_user))
- # FIXME: figure out where this goes
- #self._common_args += ("-o", "ConnectTimeout={0}".format(self.runner.timeout))
- self._common_args += ("-o", "ConnectTimeout=15")
+ if self._play_context.remote_user is not None and self._play_context.remote_user != pwd.getpwuid(os.geteuid())[0]:
+ self._common_args += ("-o", "User={0}".format(self._play_context.remote_user))
+ self._common_args += ("-o", "ConnectTimeout={0}".format(self._play_context.timeout))
self._connected = True
@@ -137,23 +138,23 @@ class Connection(ConnectionBase):
return (p, stdin)
def _password_cmd(self):
- if self._connection_info.password:
+ if self._play_context.password:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
except OSError:
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
(self.rfd, self.wfd) = os.pipe()
- return ("sshpass", "-d{0}".format(self.rfd))
+ return ["sshpass", "-d{0}".format(self.rfd)]
return []
def _send_password(self):
- if self._connection_info.password:
+ if self._play_context.password:
os.close(self.rfd)
- os.write(self.wfd, "{0}\n".format(self._connection_info.password))
+ os.write(self.wfd, "{0}\n".format(self._play_context.password))
os.close(self.wfd)
- def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
+ def _communicate(self, p, stdin, indata, sudoable=True):
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
# We can't use p.communicate here because the ControlMaster may have stdout open as well
@@ -170,35 +171,24 @@ class Connection(ConnectionBase):
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
- # FIXME: su/sudo stuff
- # fail early if the sudo/su password is wrong
- #if self.runner.sudo and sudoable:
- # if self.runner.sudo_pass:
- # incorrect_password = gettext.dgettext(
- # "sudo", "Sorry, try again.")
- # if stdout.endswith("%s\r\n%s" % (incorrect_password,
- # prompt)):
- # raise AnsibleError('Incorrect sudo password')
- #
- # if stdout.endswith(prompt):
- # raise AnsibleError('Missing sudo password')
- #
- #if self.runner.su and su and self.runner.su_pass:
- # incorrect_password = gettext.dgettext(
- # "su", "Sorry")
- # if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- # raise AnsibleError('Incorrect su password')
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), 9000)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
+ # fail early if the become password is wrong
+ if self._play_context.become and sudoable:
+ if self._play_context.become_pass:
+ self.check_incorrect_password(stdout)
+ elif self.check_password_prompt(stdout):
+ raise AnsibleError('Missing %s password' % self._play_context.become_method)
+
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 9000)
stderr += dat
if dat == '':
rpipes.remove(p.stderr)
+ elif p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), 9000)
+ stdout += dat
+ if dat == '':
+ rpipes.remove(p.stdout)
+
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
@@ -249,29 +239,92 @@ class Connection(ConnectionBase):
tokens = line.split()
if not tokens:
continue
- if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
+
+ if isinstance(tokens, list) and tokens: # skip invalid hostlines
+ if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
+ # this is a hashed known host entry
+ try:
+ (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
+ hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+ hash.update(host)
+ if hash.digest() == kn_host.decode('base64'):
+ return False
+ except:
+ # invalid hashed host key, skip it
+ continue
+ else:
+ # standard host file entry
+ if host in tokens[0]:
return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
if (hfiles_not_found == len(host_file_list)):
self._display.vvv("EXEC previous known host file not found for {0}".format(host))
return True
- def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
+ def lock_host_keys(self, lock):
+
+ if C.HOST_KEY_CHECKING and self.not_in_host_file(self.host):
+ if lock:
+ action = fcntl.LOCK_EX
+ else:
+ action = fcntl.LOCK_UN
+
+ # lock around the initial SSH connectivity so the user prompt about whether to add
+ # the host to known hosts is not intermingled with multiprocess output.
+ # FIXME: move the locations of these lock files, same as init above, these came from runner, probably need to be in task_executor
+ # fcntl.lockf(self.process_lockfile, action)
+ # fcntl.lockf(self.output_lockfile, action)
+
+ def exec_command(self, *args, **kwargs):
+ """
+ Wrapper around _exec_command to retry in the case of an ssh failure
+
+ Will retry if:
+ * an exception is caught
+ * ssh returns 255
+ Will not retry if
+ * remaining_tries is <2
+ * retries limit reached
+ """
+
+ remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
+ cmd_summary = "%s..." % args[0]
+ for attempt in xrange(remaining_tries):
+ try:
+ return_tuple = self._exec_command(*args, **kwargs)
+ # 0 = success
+ # 1-254 = remote command return code
+ # 255 = failure from the ssh command itself
+ if return_tuple[0] != 255 or attempt == (remaining_tries - 1):
+ break
+ else:
+ raise AnsibleConnectionFailure("Failed to connect to the host via ssh.")
+ except (AnsibleConnectionFailure, Exception) as e:
+ if attempt == remaining_tries - 1:
+ raise e
+ else:
+ pause = 2 ** attempt - 1
+ if pause > 30:
+ pause = 30
+
+ if isinstance(e, AnsibleConnectionFailure):
+ msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
+ else:
+ msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
+
+ self._display.vv(msg)
+
+ time.sleep(pause)
+ continue
+
+
+ return return_tuple
+
+ def _exec_command(self, cmd, tmp_path, in_data=None, sudoable=True):
''' run a command on the remote host '''
+ super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
+
ssh_cmd = self._password_cmd()
ssh_cmd += ("ssh", "-C")
if not in_data:
@@ -279,29 +332,18 @@ class Connection(ConnectionBase):
# inside a tty automatically invokes the python interactive-mode but the modules are not
# compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_cmd.append("-tt")
- if self._connection_info.verbosity > 3:
+ if self._play_context.verbosity > 3:
ssh_cmd.append("-vvv")
else:
ssh_cmd.append("-q")
ssh_cmd += self._common_args
- # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however
- # not sure if it's all working yet so this remains commented out
- #if self._ipv6:
- # ssh_cmd += ['-6']
- ssh_cmd.append(self._connection_info.remote_addr)
+ ssh_cmd.append(self.host)
ssh_cmd.append(cmd)
- self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self._connection_info.remote_addr)
+ self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self.host)
- not_in_host_file = self.not_in_host_file(self._connection_info.remote_addr)
-
- # FIXME: move the locations of these lock files, same as init above
- #if C.HOST_KEY_CHECKING and not_in_host_file:
- # # lock around the initial SSH connectivity so the user prompt about whether to add
- # # the host to known hosts is not intermingled with multiprocess output.
- # fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- # fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
+ self.lock_host_keys(True)
# create process
(p, stdin) = self._run(ssh_cmd, in_data)
@@ -310,71 +352,60 @@ class Connection(ConnectionBase):
no_prompt_out = ''
no_prompt_err = ''
- # FIXME: su/sudo stuff
- #if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
- # (self.runner.su and su and self.runner.su_pass):
- # # several cases are handled for sudo privileges with password
- # # * NOPASSWD (tty & no-tty): detect success_key on stdout
- # # * without NOPASSWD:
- # # * detect prompt on stdout (tty)
- # # * detect prompt on stderr (no-tty)
- # fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- # fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- # sudo_output = ''
- # sudo_errput = ''
- #
- # while True:
- # if success_key in sudo_output or \
- # (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
- # (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
- # break
- #
- # rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- # [p.stdout], self.runner.timeout)
- # if p.stderr in rfd:
- # chunk = p.stderr.read()
- # if not chunk:
- # raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
- # sudo_errput += chunk
- # incorrect_password = gettext.dgettext(
- # "sudo", "Sorry, try again.")
- # if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
- # raise AnsibleError('Incorrect sudo password')
- # elif sudo_errput.endswith(prompt):
- # stdin.write(self.runner.sudo_pass + '\n')
- #
- # if p.stdout in rfd:
- # chunk = p.stdout.read()
- # if not chunk:
- # raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
- # sudo_output += chunk
- #
- # if not rfd:
- # # timeout. wrap up process communication
- # stdout = p.communicate()
- # raise AnsibleError('ssh connection error waiting for sudo or su password prompt')
- #
- # if success_key not in sudo_output:
- # if sudoable:
- # stdin.write(self.runner.sudo_pass + '\n')
- # elif su:
- # stdin.write(self.runner.su_pass + '\n')
- # else:
- # no_prompt_out += sudo_output
- # no_prompt_err += sudo_errput
-
- #(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
- # FIXME: the prompt won't be here anymore
- prompt=""
- (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt)
-
- #if C.HOST_KEY_CHECKING and not_in_host_file:
- # # lock around the initial SSH connectivity so the user prompt about whether to add
- # # the host to known hosts is not intermingled with multiprocess output.
- # fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- # fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+
+ if self._play_context.prompt:
+ '''
+ Several cases are handled for privileges with password
+ * NOPASSWD (tty & no-tty): detect success_key on stdout
+ * without NOPASSWD:
+ * detect prompt on stdout (tty)
+ * detect prompt on stderr (no-tty)
+ '''
+
+ self._display.debug("Handling privilege escalation password prompt.")
+
+ if self._play_context.become and self._play_context.become_pass:
+
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ become_output = ''
+ become_errput = ''
+ while True:
+ self._display.debug('Waiting for Privilege Escalation input')
+ if self.check_become_success(become_output + become_errput) or self.check_password_prompt(become_output + become_errput):
+ break
+
+ rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._play_context.timeout)
+ if not rfd:
+ # timeout. wrap up process communication
+ stdout, stderr = p.communicate()
+ raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output)
+
+ elif p.stderr in rfd:
+ chunk = p.stderr.read()
+ become_errput += chunk
+ self.check_incorrect_password(become_errput)
+
+ elif p.stdout in rfd:
+ chunk = p.stdout.read()
+ become_output += chunk
+
+ if not chunk:
+ raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output)
+
+ if not self.check_become_success(become_output + become_errput):
+ self._display.debug("Sending privilege escalation password.")
+ stdin.write(self._play_context.become_pass + '\n')
+ else:
+ no_prompt_out = become_output
+ no_prompt_err = become_errput
+
+
+ (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable)
+
+ self.lock_host_keys(False)
+
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
if C.HOST_KEY_CHECKING:
@@ -392,27 +423,26 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
- self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr)
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
cmd = self._password_cmd()
- # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
- host = self._connection_info.remote_addr
-
- # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however
- # not sure if it's all working yet so this remains commented out
- #if self._ipv6:
- # host = '[%s]' % host
+ # scp and sftp require square brackets for IPv6 addresses, but
+ # accept them for hostnames and IPv4 addresses too.
+ host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
cmd.append('scp')
- cmd += self._common_args
- cmd.append(in_path,host + ":" + pipes.quote(out_path))
+ cmd.extend(self._common_args)
+ cmd.extend([in_path, '{0}:{1}'.format(host, pipes.quote(out_path))])
indata = None
else:
cmd.append('sftp')
- cmd += self._common_args
+ cmd.extend(self._common_args)
cmd.append(host)
indata = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
@@ -427,26 +457,28 @@ class Connection(ConnectionBase):
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
- self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr)
- cmd = self._password_cmd()
- # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
- host = self._connection_info.remote_addr
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
+ cmd = self._password_cmd()
- # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however
- # not sure if it's all working yet so this remains commented out
- #if self._ipv6:
- # host = '[%s]' % self._connection_info.remote_addr
if C.DEFAULT_SCP_IF_SSH:
cmd.append('scp')
- cmd += self._common_args
- cmd += ('{0}:{1}'.format(host, in_path), out_path)
+ cmd.extend(self._common_args)
+ cmd.extend(['{0}:{1}'.format(self.host, in_path), out_path])
indata = None
else:
cmd.append('sftp')
- cmd += self._common_args
- cmd.append(host)
+ # sftp batch mode allows us to correctly catch failed transfers,
+ # but can be disabled if for some reason the client side doesn't
+ # support the option
+ if C.DEFAULT_SFTP_BATCH_MODE:
+ cmd.append('-b')
+ cmd.append('-')
+ cmd.extend(self._common_args)
+ cmd.append(self.host)
indata = "get {0} {1}\n".format(in_path, out_path)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -458,5 +490,16 @@ class Connection(ConnectionBase):
def close(self):
''' not applicable since we're executing openssh binaries '''
- self._connected = False
+
+ if self._connected:
+
+ if 'ControlMaster' in self._common_args:
+ cmd = ['ssh','-O','stop']
+ cmd.extend(self._common_args)
+ cmd.append(self._play_context.remote_addr)
+
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+
+ self._connected = False
diff --git a/v2/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py
similarity index 70%
rename from v2/ansible/plugins/connections/winrm.py
rename to lib/ansible/plugins/connections/winrm.py
index 8a42da2534b..0e19b93ac24 100644
--- a/v2/ansible/plugins/connections/winrm.py
+++ b/lib/ansible/plugins/connections/winrm.py
@@ -44,6 +44,8 @@ from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connections import ConnectionBase
from ansible.plugins import shell_loader
+from ansible.utils.path import makedirs_safe
+from ansible.utils.unicode import to_bytes, to_unicode
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
@@ -60,8 +62,7 @@ class Connection(ConnectionBase):
self.protocol = None
self.shell_id = None
self.delegate = None
-
- self._shell = shell_loader.get('powershell')
+ self._shell_type = 'powershell'
# TODO: Add runas support
self.become_methods_supported=[]
@@ -77,28 +78,28 @@ class Connection(ConnectionBase):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
- port = self._connection_info.port or 5986
+ port = self._play_context.port or 5986
self._display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
- (self._connection_info.remote_user, port, self._connection_info.remote_addr), host=self._connection_info.remote_addr)
- netloc = '%s:%d' % (self._connection_info.remote_addr, port)
+ (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr)
+ netloc = '%s:%d' % (self._play_context.remote_addr, port)
exc = None
for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']:
- if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self._connection_info.remote_user):
+ if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self._play_context.remote_user):
continue
if transport == 'kerberos':
- realm = self._connection_info.remote_user.split('@', 1)[1].strip() or None
+ realm = self._play_context.remote_user.split('@', 1)[1].strip() or None
else:
realm = None
endpoint = parse.urlunsplit((scheme, netloc, '/wsman', '', ''))
- self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._connection_info.remote_addr)
+ self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._play_context.remote_addr)
protocol = Protocol(
endpoint,
transport=transport,
- username=self._connection_info.remote_user,
- password=self._connection_info.password,
+ username=self._play_context.remote_user,
+ password=self._play_context.password,
realm=realm
)
@@ -116,30 +117,30 @@ class Connection(ConnectionBase):
raise AnsibleError("the username/password specified for this server was incorrect")
elif code == 411:
return protocol
- self._display.vvvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self._connection_info.remote_addr)
+ self._display.vvvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self._play_context.remote_addr)
continue
if exc:
raise AnsibleError(str(exc))
def _winrm_exec(self, command, args=(), from_exec=False):
if from_exec:
- self._display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._connection_info.remote_addr)
+ self._display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
else:
- self._display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._connection_info.remote_addr)
+ self._display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._play_context.remote_addr)
if not self.protocol:
self.protocol = self._winrm_connect()
if not self.shell_id:
- self.shell_id = self.protocol.open_shell()
+ self.shell_id = self.protocol.open_shell(codepage=65001) # UTF-8
command_id = None
try:
- command_id = self.protocol.run_command(self.shell_id, command, args)
+ command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args))
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
if from_exec:
- self._display.vvvvv('WINRM RESULT %r' % response, host=self._connection_info.remote_addr)
+ self._display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._play_context.remote_addr)
else:
- self._display.vvvvvv('WINRM RESULT %r' % response, host=self._connection_info.remote_addr)
- self._display.vvvvvv('WINRM STDOUT %s' % response.std_out, host=self._connection_info.remote_addr)
- self._display.vvvvvv('WINRM STDERR %s' % response.std_err, host=self._connection_info.remote_addr)
+ self._display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._play_context.remote_addr)
+ self._display.vvvvvv('WINRM STDOUT %s' % to_unicode(response.std_out), host=self._play_context.remote_addr)
+ self._display.vvvvvv('WINRM STDERR %s' % to_unicode(response.std_err), host=self._play_context.remote_addr)
return response
finally:
if command_id:
@@ -150,31 +151,44 @@ class Connection(ConnectionBase):
self.protocol = self._winrm_connect()
return self
- def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
-
- cmd = cmd.encode('utf-8')
- cmd_parts = shlex.split(cmd, posix=False)
+ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
+ cmd_parts = shlex.split(to_bytes(cmd), posix=False)
+ cmd_parts = map(to_unicode, cmd_parts)
+ script = None
+ cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
+ # Support running .ps1 files (via script/raw).
+ if cmd_ext == '.ps1':
+ script = ' '.join(['&'] + cmd_parts)
+ # Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
+ elif cmd_ext in ('.bat', '.cmd'):
+ script = ' '.join(['[System.Console]::OutputEncoding = [System.Text.Encoding]::Default;', '&'] + cmd_parts)
+ # Encode the command if not already encoded; supports running simple PowerShell commands via raw.
+ elif '-EncodedCommand' not in cmd_parts:
+ script = ' '.join(cmd_parts)
+ if script:
+ cmd_parts = self._shell._encode_script(script, as_list=True)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
- decoded_cmd = base64.b64decode(encoded_cmd)
- self._display.vvv("EXEC %s" % decoded_cmd, host=self._connection_info.remote_addr)
+ decoded_cmd = to_unicode(base64.b64decode(encoded_cmd))
+ self._display.vvv("EXEC %s" % decoded_cmd, host=self._play_context.remote_addr)
else:
- self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr)
- # For script/raw support.
- if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
- script = self._shell._build_file_cmd(cmd_parts, quote_args=False)
- cmd_parts = self._shell._encode_script(script, as_list=True)
+ self._display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception as e:
traceback.print_exc()
raise AnsibleError("failed to exec cmd %s" % cmd)
- return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
+ result.std_out = to_unicode(result.std_out)
+ result.std_err = to_unicode(result.std_err)
+ return (result.status_code, '', result.std_out, result.std_err)
def put_file(self, in_path, out_path):
- self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
+ super(Connection, self).put_file(in_path, out_path)
+ out_path = self._shell._unquote(out_path)
+ self._display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(in_path):
- raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+ raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
with open(in_path) as in_file:
in_size = os.path.getsize(in_path)
script_template = '''
@@ -200,21 +214,22 @@ class Connection(ConnectionBase):
out_path = out_path + '.ps1'
b64_data = base64.b64encode(out_data)
script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size)
- self._display.vvvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self._connection_info.remote_addr)
+ self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._play_context.remote_addr)
cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
- raise IOError(result.std_err.encode('utf-8'))
+ raise IOError(to_unicode(result.std_err))
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file to %s" % out_path)
+ raise AnsibleError('failed to transfer file to "%s"' % out_path)
def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+ in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
- self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
+ self._display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._play_context.remote_addr)
buffer_size = 2**19 # 0.5MB chunks
- if not os.path.exists(os.path.dirname(out_path)):
- os.makedirs(os.path.dirname(out_path))
+ makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
@@ -241,18 +256,17 @@ class Connection(ConnectionBase):
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
- self._display.vvvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self._connection_info.remote_addr)
+ self._display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._play_context.remote_addr)
cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
- raise IOError(result.std_err.encode('utf-8'))
+ raise IOError(to_unicode(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
- if not os.path.exists(out_path):
- os.makedirs(out_path)
+ makedirs_safe(out_path)
break
else:
if not out_file:
@@ -266,7 +280,7 @@ class Connection(ConnectionBase):
offset += len(data)
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file to %s" % out_path)
+ raise AnsibleError('failed to transfer file to "%s"' % out_path)
finally:
if out_file:
out_file.close()
diff --git a/v2/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py
similarity index 63%
rename from v2/ansible/plugins/connections/zone.py
rename to lib/ansible/plugins/connections/zone.py
index f7e19c3bb44..82256742a14 100644
--- a/v2/ansible/plugins/connections/zone.py
+++ b/lib/ansible/plugins/connections/zone.py
@@ -2,6 +2,7 @@
# and chroot.py (c) 2013, Maykel Moya
# and jail.py (c) 2013, Michael Scherer
# (c) 2015, Dagobert Michelsen
+# (c) 2015, Toshio Kuratomi
#
# This file is part of Ansible
#
@@ -23,13 +24,15 @@ __metaclass__ = type
import distutils.spawn
import traceback
import os
-import shutil
+import shlex
import subprocess
-from subprocess import Popen,PIPE
from ansible import errors
+from ansible.utils.unicode import to_bytes
from ansible.callbacks import vvv
import ansible.constants as C
+BUFSIZE = 65536
+
class Connection(object):
''' Local zone based connections '''
@@ -44,7 +47,7 @@ class Connection(object):
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- #stdout, stderr = p.communicate()
+
zones = []
for l in pipe.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
@@ -97,13 +100,24 @@ class Connection(object):
# a modifier
def _generate_cmd(self, executable, cmd):
if executable:
+ ### TODO: Why was "-c" removed from here? (vs jail.py)
local_cmd = [self.zlogin_cmd, self.zone, executable, cmd]
else:
- local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd)
+ # Prev to python2.7.3, shlex couldn't handle unicode type strings
+ cmd = to_bytes(cmd)
+ cmd = shlex.split(cmd)
+ local_cmd = [self.zlogin_cmd, self.zone]
+ local_cmd += cmd
return local_cmd
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
- ''' run a command on the zone '''
+ def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None, stdin=subprocess.PIPE):
+ ''' run a command on the zone. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
@@ -111,53 +125,74 @@ class Connection(object):
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # We happily ignore privilege escalation
- if executable == '/bin/sh':
- executable = None
+ # We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.zone)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
+ p = subprocess.Popen(local_cmd, shell=False,
cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
+ stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
+ return p
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
+ ''' run a command on the zone '''
- def _copy_file(self, in_path, out_path):
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
+ ### TODO: Why all the precautions not to specify /bin/sh? (vs jail.py)
+ if executable == '/bin/sh':
+ executable = None
+
+ p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
+
+ stdout, stderr = p.communicate()
+ return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to zone '''
- out_path = self._normalize_path(out_path, self.get_zone_path())
vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
- self._copy_file(in_path, out_path)
+ try:
+ with open(in_path, 'rb') as in_file:
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
+ except OSError:
+ raise errors.AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from zone to local '''
- in_path = self._normalize_path(in_path, self.get_zone_path())
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
- self._copy_file(in_path, out_path)
+
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
+ except OSError:
+ raise errors.AnsibleError("zone connection requires dd command in the zone")
+
+
+ with open(out_path, 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
diff --git a/lib/ansible/runner/action_plugins/__init__.py b/lib/ansible/plugins/filter/__init__.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/__init__.py
rename to lib/ansible/plugins/filter/__init__.py
diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py
new file mode 100644
index 00000000000..fa8d0e5cbbd
--- /dev/null
+++ b/lib/ansible/plugins/filter/core.py
@@ -0,0 +1,289 @@
+# (c) 2012, Jeroen Hoekx
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from __future__ import absolute_import
+
+import sys
+import base64
+import json
+import os.path
+import types
+import pipes
+import glob
+import re
+import crypt
+import hashlib
+import string
+from functools import partial
+import operator as py_operator
+from random import SystemRandom, shuffle
+import uuid
+
+import yaml
+from jinja2.filters import environmentfilter
+from distutils.version import LooseVersion, StrictVersion
+
+from ansible import errors
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.utils.hashing import md5s, checksum_s
+from ansible.utils.unicode import unicode_wrap, to_unicode
+
+try:
+ import passlib.hash
+ HAS_PASSLIB = True
+except:
+ HAS_PASSLIB = False
+
+
+UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
+
+def to_yaml(a, *args, **kw):
+ '''Make verbose, human readable yaml'''
+ transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw)
+ return to_unicode(transformed)
+
+def to_nice_yaml(a, *args, **kw):
+ '''Make verbose, human readable yaml'''
+ transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=4, allow_unicode=True, default_flow_style=False, **kw)
+ return to_unicode(transformed)
+
+def to_json(a, *args, **kw):
+ ''' Convert the value to JSON '''
+ return json.dumps(a, *args, **kw)
+
+def to_nice_json(a, *args, **kw):
+ '''Make verbose, human readable JSON'''
+ # python-2.6's json encoder is buggy (can't encode hostvars)
+ if sys.version_info < (2, 7):
+ try:
+ import simplejson
+ except ImportError:
+ pass
+ else:
+ try:
+ major = int(simplejson.__version__.split('.')[0])
+ except:
+ pass
+ else:
+ if major >= 2:
+ return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw)
+ # Fallback to the to_json filter
+ return to_json(a, *args, **kw)
+ return json.dumps(a, indent=4, sort_keys=True, *args, **kw)
+
+def bool(a):
+ ''' return a bool for the arg '''
+ if a is None or type(a) == bool:
+ return a
+ if type(a) in types.StringTypes:
+ a = a.lower()
+ if a in ['yes', 'on', '1', 'true', 1]:
+ return True
+ else:
+ return False
+
+def quote(a):
+ ''' return its argument quoted for shell usage '''
+ return pipes.quote(a)
+
+def fileglob(pathname):
+ ''' return list of matched files for glob '''
+ return glob.glob(pathname)
+
+def regex_replace(value='', pattern='', replacement='', ignorecase=False):
+ ''' Perform a `re.sub` returning a string '''
+
+ if not isinstance(value, basestring):
+ value = str(value)
+
+ if ignorecase:
+ flags = re.I
+ else:
+ flags = 0
+ _re = re.compile(pattern, flags=flags)
+ return _re.sub(replacement, value)
+
+def ternary(value, true_val, false_val):
+ ''' value ? true_val : false_val '''
+ if value:
+ return true_val
+ else:
+ return false_val
+
+
+def version_compare(value, version, operator='eq', strict=False):
+ ''' Perform a version comparison on a value '''
+ op_map = {
+ '==': 'eq', '=': 'eq', 'eq': 'eq',
+ '<': 'lt', 'lt': 'lt',
+ '<=': 'le', 'le': 'le',
+ '>': 'gt', 'gt': 'gt',
+ '>=': 'ge', 'ge': 'ge',
+ '!=': 'ne', '<>': 'ne', 'ne': 'ne'
+ }
+
+ if strict:
+ Version = StrictVersion
+ else:
+ Version = LooseVersion
+
+ if operator in op_map:
+ operator = op_map[operator]
+ else:
+ raise errors.AnsibleFilterError('Invalid operator type')
+
+ try:
+ method = getattr(py_operator, operator)
+ return method(Version(str(value)), Version(str(version)))
+ except Exception, e:
+ raise errors.AnsibleFilterError('Version comparison: %s' % e)
+
+def regex_escape(string):
+ '''Escape all regular expressions special characters from STRING.'''
+ return re.escape(string)
+
+@environmentfilter
+def rand(environment, end, start=None, step=None):
+ r = SystemRandom()
+ if isinstance(end, (int, long)):
+ if not start:
+ start = 0
+ if not step:
+ step = 1
+ return r.randrange(start, end, step)
+ elif hasattr(end, '__iter__'):
+ if start or step:
+ raise errors.AnsibleFilterError('start and step can only be used with integer values')
+ return r.choice(end)
+ else:
+ raise errors.AnsibleFilterError('random can only be used on sequences and integers')
+
+def randomize_list(mylist):
+ try:
+ mylist = list(mylist)
+ shuffle(mylist)
+ except:
+ pass
+ return mylist
+
+def get_hash(data, hashtype='sha1'):
+
+ try: # see if hash is supported
+ h = hashlib.new(hashtype)
+ except:
+ return None
+
+ h.update(data)
+ return h.hexdigest()
+
+def get_encrypted_password(password, hashtype='sha512', salt=None):
+
+ # TODO: find a way to construct dynamically from system
+ cryptmethod= {
+ 'md5': '1',
+ 'blowfish': '2a',
+ 'sha256': '5',
+ 'sha512': '6',
+ }
+
+ hastype = hashtype.lower()
+ if hashtype in cryptmethod:
+ if salt is None:
+ r = SystemRandom()
+ salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)])
+
+ if not HAS_PASSLIB:
+ if sys.platform.startswith('darwin'):
+ raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin')
+ saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
+ encrypted = crypt.crypt(password, saltstring)
+ else:
+ cls = getattr(passlib.hash, '%s_crypt' % hashtype)
+ encrypted = cls.encrypt(password, salt=salt)
+
+ return encrypted
+
+ return None
+
+def to_uuid(string):
+ return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
+
+class FilterModule(object):
+ ''' Ansible core jinja2 filters '''
+
+ def filters(self):
+ return {
+ # base 64
+ 'b64decode': partial(unicode_wrap, base64.b64decode),
+ 'b64encode': partial(unicode_wrap, base64.b64encode),
+
+ # uuid
+ 'to_uuid': to_uuid,
+
+ # json
+ 'to_json': to_json,
+ 'to_nice_json': to_nice_json,
+ 'from_json': json.loads,
+
+ # yaml
+ 'to_yaml': to_yaml,
+ 'to_nice_yaml': to_nice_yaml,
+ 'from_yaml': yaml.safe_load,
+
+ # path
+ 'basename': partial(unicode_wrap, os.path.basename),
+ 'dirname': partial(unicode_wrap, os.path.dirname),
+ 'expanduser': partial(unicode_wrap, os.path.expanduser),
+ 'realpath': partial(unicode_wrap, os.path.realpath),
+ 'relpath': partial(unicode_wrap, os.path.relpath),
+ 'splitext': partial(unicode_wrap, os.path.splitext),
+
+ # value as boolean
+ 'bool': bool,
+
+ # quote string for shell usage
+ 'quote': quote,
+
+ # hash filters
+ # md5 hex digest of string
+ 'md5': md5s,
+ # sha1 hex digeset of string
+ 'sha1': checksum_s,
+ # checksum of string as used by ansible for checksuming files
+ 'checksum': checksum_s,
+ # generic hashing
+ 'password_hash': get_encrypted_password,
+ 'hash': get_hash,
+
+ # file glob
+ 'fileglob': fileglob,
+
+ # regex
+ 'regex_replace': regex_replace,
+ 'regex_escape': regex_escape,
+
+ # ? : ;
+ 'ternary': ternary,
+
+ # list
+ # version comparison
+ 'version_compare': version_compare,
+
+ # random stuff
+ 'random': rand,
+ 'shuffle': randomize_list,
+ }
diff --git a/lib/ansible/plugins/filter/ipaddr.py b/lib/ansible/plugins/filter/ipaddr.py
new file mode 100644
index 00000000000..b2de8759e12
--- /dev/null
+++ b/lib/ansible/plugins/filter/ipaddr.py
@@ -0,0 +1,693 @@
+# (c) 2014, Maciej Delmanowski
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from functools import partial
+import types
+
+try:
+ import netaddr
+except ImportError:
+ # in this case, we'll make the filters return error messages (see bottom)
+ netaddr = None
+else:
+ class mac_linux(netaddr.mac_unix):
+ pass
+ mac_linux.word_fmt = '%.2x'
+
+from ansible import errors
+
+
+# ---- IP address and network query helpers ----
+
+def _empty_ipaddr_query(v, vtype):
+ # We don't have any query to process, so just check what type the user
+ # expects, and return the IP address in a correct format
+ if v:
+ if vtype == 'address':
+ return str(v.ip)
+ elif vtype == 'network':
+ return str(v)
+
+def _6to4_query(v, vtype, value):
+ if v.version == 4:
+
+ if v.size == 1:
+ ipconv = str(v.ip)
+ elif v.size > 1:
+ if v.ip != v.network:
+ ipconv = str(v.ip)
+ else:
+ ipconv = False
+
+ if ipaddr(ipconv, 'public'):
+ numbers = list(map(int, ipconv.split('.')))
+
+ try:
+ return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
+ except:
+ return False
+
+ elif v.version == 6:
+ if vtype == 'address':
+ if ipaddr(str(v), '2002::/16'):
+ return value
+ elif vtype == 'network':
+ if v.ip != v.network:
+ if ipaddr(str(v.ip), '2002::/16'):
+ return value
+ else:
+ return False
+
+def _ip_query(v):
+ if v.size == 1:
+ return str(v.ip)
+ if v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip)
+
+def _gateway_query(v):
+ if v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + '/' + str(v.prefixlen)
+
+def _bool_ipaddr_query(v):
+ if v:
+ return True
+
+def _broadcast_query(v):
+ if v.size > 1:
+ return str(v.broadcast)
+
+def _cidr_query(v):
+ return str(v)
+
+def _cidr_lookup_query(v, iplist, value):
+ try:
+ if v in iplist:
+ return value
+ except:
+ return False
+
+def _host_query(v):
+ if v.size == 1:
+ return str(v)
+ elif v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + '/' + str(v.prefixlen)
+
+def _hostmask_query(v):
+ return str(v.hostmask)
+
+def _int_query(v, vtype):
+ if vtype == 'address':
+ return int(v.ip)
+ elif vtype == 'network':
+ return str(int(v.ip)) + '/' + str(int(v.prefixlen))
+
+def _ipv4_query(v, value):
+ if v.version == 6:
+ try:
+ return str(v.ipv4())
+ except:
+ return False
+ else:
+ return value
+
+def _ipv6_query(v, value):
+ if v.version == 4:
+ return str(v.ipv6())
+ else:
+ return value
+
+def _link_local_query(v, value):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ if v.version == 4:
+ if ipaddr(str(v_ip), '169.254.0.0/24'):
+ return value
+
+ elif v.version == 6:
+ if ipaddr(str(v_ip), 'fe80::/10'):
+ return value
+
+def _loopback_query(v, value):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ if v_ip.is_loopback():
+ return value
+
+def _multicast_query(v, value):
+ if v.is_multicast():
+ return value
+
+def _net_query(v):
+ if v.size > 1:
+ if v.ip == v.network:
+ return str(v.network) + '/' + str(v.prefixlen)
+
+def _netmask_query(v):
+ if v.size > 1:
+ return str(v.netmask)
+
+def _network_query(v):
+ if v.size > 1:
+ return str(v.network)
+
+def _prefix_query(v):
+ return int(v.prefixlen)
+
+def _private_query(v, value):
+ if v.is_private():
+ return value
+
+def _public_query(v, value):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ if v_ip.is_unicast() and not v_ip.is_private() and \
+ not v_ip.is_loopback() and not v_ip.is_netmask() and \
+ not v_ip.is_hostmask():
+ return value
+
+def _revdns_query(v):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ return v_ip.reverse_dns
+
+def _size_query(v):
+ return v.size
+
+def _subnet_query(v):
+ return str(v.cidr)
+
+def _type_query(v):
+ if v.size == 1:
+ return 'address'
+ if v.size > 1:
+ if v.ip != v.network:
+ return 'address'
+ else:
+ return 'network'
+
+def _unicast_query(v, value):
+ if v.is_unicast():
+ return value
+
+def _version_query(v):
+ return v.version
+
+def _wrap_query(v, vtype, value):
+ if v.version == 6:
+ if vtype == 'address':
+ return '[' + str(v.ip) + ']'
+ elif vtype == 'network':
+ return '[' + str(v.ip) + ']/' + str(v.prefixlen)
+ else:
+ return value
+
+
+# ---- HWaddr query helpers ----
+def _bare_query(v):
+ v.dialect = netaddr.mac_bare
+ return str(v)
+
+def _bool_hwaddr_query(v):
+ if v:
+ return True
+
+def _cisco_query(v):
+ v.dialect = netaddr.mac_cisco
+ return str(v)
+
+def _empty_hwaddr_query(v, value):
+ if v:
+ return value
+
+def _linux_query(v):
+ v.dialect = mac_linux
+ return str(v)
+
+def _postgresql_query(v):
+ v.dialect = netaddr.mac_pgsql
+ return str(v)
+
+def _unix_query(v):
+ v.dialect = netaddr.mac_unix
+ return str(v)
+
+def _win_query(v):
+ v.dialect = netaddr.mac_eui48
+ return str(v)
+
+
+# ---- IP address and network filters ----
+
+def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
+ ''' Check if string is an IP address or network and filter it '''
+
+ query_func_extra_args = {
+ '': ('vtype',),
+ '6to4': ('vtype', 'value'),
+ 'cidr_lookup': ('iplist', 'value'),
+ 'int': ('vtype',),
+ 'ipv4': ('value',),
+ 'ipv6': ('value',),
+ 'link-local': ('value',),
+ 'loopback': ('value',),
+ 'lo': ('value',),
+ 'multicast': ('value',),
+ 'private': ('value',),
+ 'public': ('value',),
+ 'unicast': ('value',),
+ 'wrap': ('vtype', 'value'),
+ }
+ query_func_map = {
+ '': _empty_ipaddr_query,
+ '6to4': _6to4_query,
+ 'address': _ip_query,
+ 'address/prefix': _gateway_query,
+ 'bool': _bool_ipaddr_query,
+ 'broadcast': _broadcast_query,
+ 'cidr': _cidr_query,
+ 'cidr_lookup': _cidr_lookup_query,
+ 'gateway': _gateway_query,
+ 'gw': _gateway_query,
+ 'host': _host_query,
+ 'host/prefix': _gateway_query,
+ 'hostmask': _hostmask_query,
+ 'hostnet': _gateway_query,
+ 'int': _int_query,
+ 'ip': _ip_query,
+ 'ipv4': _ipv4_query,
+ 'ipv6': _ipv6_query,
+ 'link-local': _link_local_query,
+ 'lo': _loopback_query,
+ 'loopback': _loopback_query,
+ 'multicast': _multicast_query,
+ 'net': _net_query,
+ 'netmask': _netmask_query,
+ 'network': _network_query,
+ 'prefix': _prefix_query,
+ 'private': _private_query,
+ 'public': _public_query,
+ 'revdns': _revdns_query,
+ 'router': _gateway_query,
+ 'size': _size_query,
+ 'subnet': _subnet_query,
+ 'type': _type_query,
+ 'unicast': _unicast_query,
+ 'v4': _ipv4_query,
+ 'v6': _ipv6_query,
+ 'version': _version_query,
+ 'wrap': _wrap_query,
+ }
+
+ vtype = None
+
+ if not value:
+ return False
+
+ elif value == True:
+ return False
+
+ # Check if value is a list and parse each element
+ elif isinstance(value, (list, tuple, types.GeneratorType)):
+
+ _ret = []
+ for element in value:
+ if ipaddr(element, str(query), version):
+ _ret.append(ipaddr(element, str(query), version))
+
+ if _ret:
+ return _ret
+ else:
+ return list()
+
+ # Check if value is a number and convert it to an IP address
+ elif str(value).isdigit():
+
+ # We don't know what IP version to assume, so let's check IPv4 first,
+ # then IPv6
+ try:
+ if ((not version) or (version and version == 4)):
+ v = netaddr.IPNetwork('0.0.0.0/0')
+ v.value = int(value)
+ v.prefixlen = 32
+ elif version and version == 6:
+ v = netaddr.IPNetwork('::/0')
+ v.value = int(value)
+ v.prefixlen = 128
+
+ # IPv4 didn't work the first time, so it definitely has to be IPv6
+ except:
+ try:
+ v = netaddr.IPNetwork('::/0')
+ v.value = int(value)
+ v.prefixlen = 128
+
+ # The value is too big for IPv6. Are you a nanobot?
+ except:
+ return False
+
+ # We got an IP address, let's mark it as such
+ value = str(v)
+ vtype = 'address'
+
+ # value has not been recognized, check if it's a valid IP string
+ else:
+ try:
+ v = netaddr.IPNetwork(value)
+
+ # value is a valid IP string, check if user specified
+ # CIDR prefix or just an IP address, this will indicate default
+ # output format
+ try:
+ address, prefix = value.split('/')
+ vtype = 'network'
+ except:
+ vtype = 'address'
+
+ # value hasn't been recognized, maybe it's a numerical CIDR?
+ except:
+ try:
+ address, prefix = value.split('/')
+ address.isdigit()
+ address = int(address)
+ prefix.isdigit()
+ prefix = int(prefix)
+
+ # It's not numerical CIDR, give up
+ except:
+ return False
+
+ # It is something, so let's try and build a CIDR from the parts
+ try:
+ v = netaddr.IPNetwork('0.0.0.0/0')
+ v.value = address
+ v.prefixlen = prefix
+
+ # It's not a valid IPv4 CIDR
+ except:
+ try:
+ v = netaddr.IPNetwork('::/0')
+ v.value = address
+ v.prefixlen = prefix
+
+ # It's not a valid IPv6 CIDR. Give up.
+ except:
+ return False
+
+ # We have a valid CIDR, so let's write it in correct format
+ value = str(v)
+ vtype = 'network'
+
+ # We have a query string but it's not in the known query types. Check if
+ # that string is a valid subnet, if so, we can check later if given IP
+ # address/network is inside that specific subnet
+ try:
+ ### ?? 6to4 and link-local were True here before. Should they still?
+ if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
+ iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
+ query = 'cidr_lookup'
+ except:
+ pass
+
+ # This code checks if value maches the IP version the user wants, ie. if
+ # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
+ # If version does not match, return False
+ if version and v.version != version:
+ return False
+
+ extras = []
+ for arg in query_func_extra_args.get(query, tuple()):
+ extras.append(locals()[arg])
+ try:
+ return query_func_map[query](v, *extras)
+ except KeyError:
+ try:
+ float(query)
+ if v.size == 1:
+ if vtype == 'address':
+ return str(v.ip)
+ elif vtype == 'network':
+ return str(v)
+
+ elif v.size > 1:
+ try:
+ return str(v[query]) + '/' + str(v.prefixlen)
+ except:
+ return False
+
+ else:
+ return value
+
+ except:
+ raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
+
+ return False
+
+
+def ipwrap(value, query = ''):
+ try:
+ if isinstance(value, (list, tuple, types.GeneratorType)):
+ _ret = []
+ for element in value:
+ if ipaddr(element, query, version = False, alias = 'ipwrap'):
+ _ret.append(ipaddr(element, 'wrap'))
+ else:
+ _ret.append(element)
+
+ return _ret
+ else:
+ _ret = ipaddr(value, query, version = False, alias = 'ipwrap')
+ if _ret:
+ return ipaddr(_ret, 'wrap')
+ else:
+ return value
+
+ except:
+ return value
+
+
+def ipv4(value, query = ''):
+ return ipaddr(value, query, version = 4, alias = 'ipv4')
+
+
+def ipv6(value, query = ''):
+ return ipaddr(value, query, version = 6, alias = 'ipv6')
+
+
+# Split given subnet into smaller subnets or find out the biggest subnet of
+# a given IP address with given CIDR prefix
+# Usage:
+#
+# - address or address/prefix | ipsubnet
+# returns CIDR subnet of a given input
+#
+# - address/prefix | ipsubnet(cidr)
+# returns number of possible subnets for given CIDR prefix
+#
+# - address/prefix | ipsubnet(cidr, index)
+# returns new subnet with given CIDR prefix
+#
+# - address | ipsubnet(cidr)
+# returns biggest subnet with given CIDR prefix that address belongs to
+#
+# - address | ipsubnet(cidr, index)
+# returns next indexed subnet which contains given address
+def ipsubnet(value, query = '', index = 'x'):
+ ''' Manipulate IPv4/IPv6 subnets '''
+
+ try:
+ vtype = ipaddr(value, 'type')
+ if vtype == 'address':
+ v = ipaddr(value, 'cidr')
+ elif vtype == 'network':
+ v = ipaddr(value, 'subnet')
+
+ value = netaddr.IPNetwork(v)
+ except:
+ return False
+
+ if not query:
+ return str(value)
+
+ elif str(query).isdigit():
+ vsize = ipaddr(v, 'size')
+ query = int(query)
+
+ try:
+ float(index)
+ index = int(index)
+
+ if vsize > 1:
+ try:
+ return str(list(value.subnet(query))[index])
+ except:
+ return False
+
+ elif vsize == 1:
+ try:
+ return str(value.supernet(query)[index])
+ except:
+ return False
+
+ except:
+ if vsize > 1:
+ try:
+ return str(len(list(value.subnet(query))))
+ except:
+ return False
+
+ elif vsize == 1:
+ try:
+ return str(value.supernet(query)[0])
+ except:
+ return False
+
+ return False
+
+# Returns the nth host within a network described by value.
+# Usage:
+#
+# - address or address/prefix | nthhost(nth)
+# returns the nth host within the given network
+def nthhost(value, query=''):
+ ''' Get the nth host within a given network '''
+ try:
+ vtype = ipaddr(value, 'type')
+ if vtype == 'address':
+ v = ipaddr(value, 'cidr')
+ elif vtype == 'network':
+ v = ipaddr(value, 'subnet')
+
+ value = netaddr.IPNetwork(v)
+ except:
+ return False
+
+ if not query:
+ return False
+
+ try:
+ vsize = ipaddr(v, 'size')
+ nth = int(query)
+ if value.size > nth:
+ return value[nth]
+
+ except ValueError:
+ return False
+
+ return False
+
+# Returns the SLAAC address within a network for a given HW/MAC address.
+# Usage:
+#
+# - prefix | slaac(mac)
+def slaac(value, query = ''):
+ ''' Get the SLAAC address within given network '''
+ try:
+ vtype = ipaddr(value, 'type')
+ if vtype == 'address':
+ v = ipaddr(value, 'cidr')
+ elif vtype == 'network':
+ v = ipaddr(value, 'subnet')
+
+ if v.version != 6:
+ return False
+
+ value = netaddr.IPNetwork(v)
+ except:
+ return False
+
+ if not query:
+ return False
+
+ try:
+ mac = hwaddr(query, alias = 'slaac')
+
+ eui = netaddr.EUI(mac)
+ except:
+ return False
+
+ return eui.ipv6(value.network)
+
+
+# ---- HWaddr / MAC address filters ----
+
+def hwaddr(value, query = '', alias = 'hwaddr'):
+ ''' Check if string is a HW/MAC address and filter it '''
+
+ query_func_extra_args = {
+ '': ('value',),
+ }
+ query_func_map = {
+ '': _empty_hwaddr_query,
+ 'bare': _bare_query,
+ 'bool': _bool_hwaddr_query,
+ 'cisco': _cisco_query,
+ 'eui48': _win_query,
+ 'linux': _linux_query,
+ 'pgsql': _postgresql_query,
+ 'postgresql': _postgresql_query,
+ 'psql': _postgresql_query,
+ 'unix': _unix_query,
+ 'win': _win_query,
+ }
+
+ try:
+ v = netaddr.EUI(value)
+ except:
+ if query and query != 'bool':
+ raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
+
+ extras = []
+ for arg in query_func_extra_args.get(query, tuple()):
+ extras.append(locals()[arg])
+ try:
+ return query_func_map[query](v, *extras)
+ except KeyError:
+ raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
+
+ return False
+
+def macaddr(value, query = ''):
+ return hwaddr(value, query, alias = 'macaddr')
+
+def _need_netaddr(f_name, *args, **kwargs):
+ raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
+ ' installed on the ansible controller'.format(f_name))
+
+# ---- Ansible filters ----
+
+class FilterModule(object):
+ ''' IP address and network manipulation filters '''
+ filter_map = {
+ # IP addresses and networks
+ 'ipaddr': ipaddr,
+ 'ipwrap': ipwrap,
+ 'ipv4': ipv4,
+ 'ipv6': ipv6,
+ 'ipsubnet': ipsubnet,
+ 'nthhost': nthhost,
+ 'slaac': slaac,
+
+ # MAC / HW addresses
+ 'hwaddr': hwaddr,
+ 'macaddr': macaddr
+ }
+
+ def filters(self):
+ if netaddr:
+ return self.filter_map
+ else:
+ # Need to install python-netaddr for these filters to work
+ return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py
new file mode 100644
index 00000000000..516ef1c6774
--- /dev/null
+++ b/lib/ansible/plugins/filter/mathstuff.py
@@ -0,0 +1,155 @@
+# (c) 2014, Brian Coca
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from __future__ import absolute_import
+
+import math
+import collections
+from ansible import errors
+
+def unique(a):
+ if isinstance(a,collections.Hashable):
+ c = set(a)
+ else:
+ c = []
+ for x in a:
+ if x not in c:
+ c.append(x)
+ return c
+
+def intersect(a, b):
+ if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
+ c = set(a) & set(b)
+ else:
+ c = unique(filter(lambda x: x in b, a))
+ return c
+
+def difference(a, b):
+ if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
+ c = set(a) - set(b)
+ else:
+ c = unique(filter(lambda x: x not in b, a))
+ return c
+
+def symmetric_difference(a, b):
+ if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
+ c = set(a) ^ set(b)
+ else:
+ c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
+ return c
+
+def union(a, b):
+ if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
+ c = set(a) | set(b)
+ else:
+ c = unique(a + b)
+ return c
+
+def min(a):
+ _min = __builtins__.get('min')
+ return _min(a);
+
+def max(a):
+ _max = __builtins__.get('max')
+ return _max(a);
+
+def isnotanumber(x):
+ try:
+ return math.isnan(x)
+ except TypeError:
+ return False
+
+
+def logarithm(x, base=math.e):
+ try:
+ if base == 10:
+ return math.log10(x)
+ else:
+ return math.log(x, base)
+ except TypeError, e:
+ raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
+
+
+def power(x, y):
+ try:
+ return math.pow(x, y)
+ except TypeError, e:
+ raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
+
+
+def inversepower(x, base=2):
+ try:
+ if base == 2:
+ return math.sqrt(x)
+ else:
+ return math.pow(x, 1.0/float(base))
+ except TypeError, e:
+ raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
+
+
+def human_readable(size, isbits=False, unit=None):
+
+ base = 'bits' if isbits else 'Bytes'
+ suffix = ''
+
+ ranges = (
+ (1<<70L, 'Z'),
+ (1<<60L, 'E'),
+ (1<<50L, 'P'),
+ (1<<40L, 'T'),
+ (1<<30L, 'G'),
+ (1<<20L, 'M'),
+ (1<<10L, 'K'),
+ (1, base)
+ )
+
+ for limit, suffix in ranges:
+ if (unit is None and size >= limit) or \
+ unit is not None and unit.upper() == suffix:
+ break
+
+ if limit != 1:
+ suffix += base[0]
+
+ return '%.2f %s' % (float(size)/ limit, suffix)
+
+class FilterModule(object):
+ ''' Ansible math jinja2 filters '''
+
+ def filters(self):
+ return {
+ # general math
+ 'isnan': isnotanumber,
+ 'min' : min,
+ 'max' : max,
+
+ # exponents and logarithms
+ 'log': logarithm,
+ 'pow': power,
+ 'root': inversepower,
+
+ # set theory
+ 'unique' : unique,
+ 'intersect': intersect,
+ 'difference': difference,
+ 'symmetric_difference': symmetric_difference,
+ 'union': union,
+
+ # computer theory
+ 'human_readable' : human_readable,
+
+ }
diff --git a/v2/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py
similarity index 96%
rename from v2/ansible/plugins/inventory/__init__.py
rename to lib/ansible/plugins/inventory/__init__.py
index 03fd89429b4..74dbccc1bbc 100644
--- a/v2/ansible/plugins/inventory/__init__.py
+++ b/lib/ansible/plugins/inventory/__init__.py
@@ -23,10 +23,9 @@ __metaclass__ = type
from abc import ABCMeta, abstractmethod
-from six import add_metaclass
+from six import with_metaclass
-@add_metaclass(ABCMeta)
-class InventoryParser:
+class InventoryParser(with_metaclass(ABCMeta, object)):
'''Abstract Base Class for retrieving inventory information
Any InventoryParser functions by taking an inven_source. The caller then
diff --git a/v2/ansible/plugins/inventory/aggregate.py b/lib/ansible/plugins/inventory/aggregate.py
similarity index 100%
rename from v2/ansible/plugins/inventory/aggregate.py
rename to lib/ansible/plugins/inventory/aggregate.py
diff --git a/v2/ansible/plugins/inventory/directory.py b/lib/ansible/plugins/inventory/directory.py
similarity index 100%
rename from v2/ansible/plugins/inventory/directory.py
rename to lib/ansible/plugins/inventory/directory.py
diff --git a/v2/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py
similarity index 100%
rename from v2/ansible/plugins/inventory/ini.py
rename to lib/ansible/plugins/inventory/ini.py
diff --git a/v2/ansible/plugins/lookup/__init__.py b/lib/ansible/plugins/lookup/__init__.py
similarity index 77%
rename from v2/ansible/plugins/lookup/__init__.py
rename to lib/ansible/plugins/lookup/__init__.py
index 8c841c81d28..5abca22ac7d 100644
--- a/v2/ansible/plugins/lookup/__init__.py
+++ b/lib/ansible/plugins/lookup/__init__.py
@@ -19,11 +19,25 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
__all__ = ['LookupBase']
class LookupBase:
- def __init__(self, loader=None, **kwargs):
+ def __init__(self, loader=None, templar=None, **kwargs):
self._loader = loader
+ self._templar = templar
+ self._display = display
+
+ def get_basedir(self, variables):
+ if 'role_path' in variables:
+ return variables['role_path']
+ else:
+ return self._loader.get_basedir()
def _flatten(self, terms):
ret = []
diff --git a/v2/ansible/plugins/lookup/cartesian.py b/lib/ansible/plugins/lookup/cartesian.py
similarity index 88%
rename from v2/ansible/plugins/lookup/cartesian.py
rename to lib/ansible/plugins/lookup/cartesian.py
index 7d8e08cb94d..9ae18587aeb 100644
--- a/v2/ansible/plugins/lookup/cartesian.py
+++ b/lib/ansible/plugins/lookup/cartesian.py
@@ -29,16 +29,16 @@ class LookupModule(LookupBase):
[1, 2, 3], [a, b] -> [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]
"""
- def __lookup_variabless(self, terms, variables):
+ def __lookup_variables(self, terms):
results = []
for x in terms:
- intermediate = listify_lookup_plugin_terms(x, variables, loader=self._loader)
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
- terms = self.__lookup_variabless(terms, variables)
+ terms = self.__lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
diff --git a/v2/ansible/plugins/lookup/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py
similarity index 90%
rename from v2/ansible/plugins/lookup/csvfile.py
rename to lib/ansible/plugins/lookup/csvfile.py
index 2a98d19fe4c..1a277887222 100644
--- a/v2/ansible/plugins/lookup/csvfile.py
+++ b/lib/ansible/plugins/lookup/csvfile.py
@@ -45,7 +45,10 @@ class LookupModule(LookupBase):
if isinstance(terms, basestring):
terms = [ terms ]
+ basedir = self.get_basedir(variables)
+
ret = []
+
for term in terms:
params = term.split()
key = params[0]
@@ -69,9 +72,8 @@ class LookupModule(LookupBase):
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
- path = self._loader.path_dwim(paramvals['file'])
-
- var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
+ lookupfile = self._loader.path_dwim_relative(basedir, 'files', paramvals['file'])
+ var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
if var is not None:
if type(var) is list:
for v in var:
diff --git a/v2/ansible/plugins/lookup/dict.py b/lib/ansible/plugins/lookup/dict.py
similarity index 100%
rename from v2/ansible/plugins/lookup/dict.py
rename to lib/ansible/plugins/lookup/dict.py
diff --git a/v2/ansible/plugins/lookup/dnstxt.py b/lib/ansible/plugins/lookup/dnstxt.py
similarity index 100%
rename from v2/ansible/plugins/lookup/dnstxt.py
rename to lib/ansible/plugins/lookup/dnstxt.py
diff --git a/v2/ansible/plugins/lookup/env.py b/lib/ansible/plugins/lookup/env.py
similarity index 100%
rename from v2/ansible/plugins/lookup/env.py
rename to lib/ansible/plugins/lookup/env.py
diff --git a/v2/ansible/plugins/lookup/etcd.py b/lib/ansible/plugins/lookup/etcd.py
similarity index 85%
rename from v2/ansible/plugins/lookup/etcd.py
rename to lib/ansible/plugins/lookup/etcd.py
index 002068389f8..46a81e4d6bb 100644
--- a/v2/ansible/plugins/lookup/etcd.py
+++ b/lib/ansible/plugins/lookup/etcd.py
@@ -18,23 +18,25 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-import urllib2
+
try:
import json
except ImportError:
import simplejson as json
from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url
# this can be made configurable, not should not use ansible.cfg
ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
if os.getenv('ANSIBLE_ETCD_URL') is not None:
ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
-class etcd():
- def __init__(self, url=ANSIBLE_ETCD_URL):
+class Etcd:
+ def __init__(self, url=ANSIBLE_ETCD_URL, validate_certs=True):
self.url = url
self.baseurl = '%s/v1/keys' % (self.url)
+ self.validate_certs = validate_certs
def get(self, key):
url = "%s/%s" % (self.baseurl, key)
@@ -42,7 +44,7 @@ class etcd():
data = None
value = ""
try:
- r = urllib2.urlopen(url)
+ r = open_url(url, validate_certs=self.validate_certs)
data = r.read()
except:
return value
@@ -67,7 +69,9 @@ class LookupModule(LookupBase):
if isinstance(terms, basestring):
terms = [ terms ]
- etcd = etcd()
+ validate_certs = kwargs.get('validate_certs', True)
+
+ etcd = Etcd(validate_certs=validate_certs)
ret = []
for term in terms:
diff --git a/v2/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py
similarity index 63%
rename from v2/ansible/plugins/lookup/file.py
rename to lib/ansible/plugins/lookup/file.py
index efb039497dd..2498f90c9cc 100644
--- a/v2/ansible/plugins/lookup/file.py
+++ b/lib/ansible/plugins/lookup/file.py
@@ -31,10 +31,11 @@ class LookupModule(LookupBase):
terms = [ terms ]
ret = []
+
+ basedir = self.get_basedir(variables)
+
for term in terms:
- basedir_path = self._loader.path_dwim(term)
- relative_path = None
- playbook_path = None
+ self._display.debug("File lookup term: %s" % term)
# Special handling of the file lookup, used primarily when the
# lookup is done from a role. If the file isn't found in the
@@ -42,19 +43,15 @@ class LookupModule(LookupBase):
# role/files/ directory, and finally the playbook directory
# itself (which will be relative to the current working dir)
- # FIXME: the original file stuff still needs to be worked out, but the
- # playbook_dir stuff should be able to be removed as it should
- # be covered by the fact that the loader contains that info
- #if '_original_file' in variables:
- # relative_path = self._loader.path_dwim_relative(variables['_original_file'], 'files', term, self.basedir, check=False)
- #if 'playbook_dir' in variables:
- # playbook_path = os.path.join(variables['playbook_dir'], term)
-
- for path in (basedir_path, relative_path, playbook_path):
- if path and os.path.exists(path):
- ret.append(codecs.open(path, encoding="utf8").read().rstrip())
- break
- else:
+ lookupfile = self._loader.path_dwim_relative(basedir, 'files', term)
+ self._display.vvvv("File lookup using %s as file" % lookupfile)
+ try:
+ if lookupfile:
+ contents, show_data = self._loader._get_file_contents(lookupfile)
+ ret.append(contents.rstrip())
+ else:
+ raise AnsibleParserError()
+ except AnsibleParserError:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
diff --git a/v2/ansible/plugins/lookup/fileglob.py b/lib/ansible/plugins/lookup/fileglob.py
similarity index 80%
rename from v2/ansible/plugins/lookup/fileglob.py
rename to lib/ansible/plugins/lookup/fileglob.py
index 89859067150..7889e6e5bcf 100644
--- a/v2/ansible/plugins/lookup/fileglob.py
+++ b/lib/ansible/plugins/lookup/fileglob.py
@@ -26,9 +26,12 @@ class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
+ basedir = self.get_basedir(variables)
+
ret = []
for term in terms:
- dwimmed = self._loader.path_dwim(term)
- globbed = glob.glob(dwimmed)
+ term_file = os.path.basename(term)
+ dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term))
+ globbed = glob.glob(os.path.join(dwimmed_path, term_file))
ret.extend(g for g in globbed if os.path.isfile(g))
return ret
diff --git a/v2/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py
similarity index 95%
rename from v2/ansible/plugins/lookup/first_found.py
rename to lib/ansible/plugins/lookup/first_found.py
index 091f104c628..6e0aaee117a 100644
--- a/v2/ansible/plugins/lookup/first_found.py
+++ b/lib/ansible/plugins/lookup/first_found.py
@@ -123,9 +123,8 @@ import os
from jinja2.exceptions import UndefinedError
-from ansible.errors import AnsibleUndefinedVariable
+from ansible.errors import AnsibleLookupError, AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
-from ansible.template import Templar
from ansible.utils.boolean import boolean
class LookupModule(LookupBase):
@@ -174,11 +173,10 @@ class LookupModule(LookupBase):
else:
total_search = terms
- templar = Templar(loader=self._loader, variables=variables)
roledir = variables.get('roledir')
for fn in total_search:
try:
- fn = templar.template(fn)
+ fn = self._templar.template(fn)
except (AnsibleUndefinedVariable, UndefinedError) as e:
continue
@@ -202,5 +200,5 @@ class LookupModule(LookupBase):
if skip:
return []
else:
- return [None]
+ raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no files are found")
diff --git a/v2/ansible/plugins/lookup/flattened.py b/lib/ansible/plugins/lookup/flattened.py
similarity index 95%
rename from v2/ansible/plugins/lookup/flattened.py
rename to lib/ansible/plugins/lookup/flattened.py
index f0a8adaf5e6..7477db4b834 100644
--- a/v2/ansible/plugins/lookup/flattened.py
+++ b/lib/ansible/plugins/lookup/flattened.py
@@ -46,7 +46,7 @@ class LookupModule(LookupBase):
if isinstance(term, basestring):
# convert a variable to a list
- term2 = listify_lookup_plugin_terms(term, variables, loader=self._loader)
+ term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
# but avoid converting a plain string to a list of one string
if term2 != [ term ]:
term = term2
diff --git a/v2/ansible/plugins/lookup/indexed_items.py b/lib/ansible/plugins/lookup/indexed_items.py
similarity index 100%
rename from v2/ansible/plugins/lookup/indexed_items.py
rename to lib/ansible/plugins/lookup/indexed_items.py
diff --git a/v2/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py
similarity index 100%
rename from v2/ansible/plugins/lookup/inventory_hostnames.py
rename to lib/ansible/plugins/lookup/inventory_hostnames.py
diff --git a/v2/ansible/plugins/lookup/items.py b/lib/ansible/plugins/lookup/items.py
similarity index 100%
rename from v2/ansible/plugins/lookup/items.py
rename to lib/ansible/plugins/lookup/items.py
diff --git a/v2/ansible/plugins/lookup/lines.py b/lib/ansible/plugins/lookup/lines.py
similarity index 100%
rename from v2/ansible/plugins/lookup/lines.py
rename to lib/ansible/plugins/lookup/lines.py
diff --git a/v2/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py
similarity index 73%
rename from v2/ansible/plugins/lookup/nested.py
rename to lib/ansible/plugins/lookup/nested.py
index 52f4bed1d52..23938f6a19c 100644
--- a/v2/ansible/plugins/lookup/nested.py
+++ b/lib/ansible/plugins/lookup/nested.py
@@ -17,22 +17,29 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from ansible.errors import AnsibleError
+from jinja2.exceptions import UndefinedError
+
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
- def __lookup_variabless(self, terms, variables):
+ def __lookup_variables(self, terms, variables):
+ foo = variables.copy()
+ foo.pop('vars')
results = []
for x in terms:
- intermediate = listify_lookup_plugin_terms(x, variables, loader=self._loader)
+ try:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader, fail_on_undefined=True)
+ except UndefinedError, e:
+ raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
- terms = self.__lookup_variabless(terms, variables)
+ terms = self.__lookup_variables(terms, variables)
my_list = terms[:]
my_list.reverse()
diff --git a/v2/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py
similarity index 94%
rename from v2/ansible/plugins/lookup/password.py
rename to lib/ansible/plugins/lookup/password.py
index 2e7633a067a..47ec786429e 100644
--- a/v2/ansible/plugins/lookup/password.py
+++ b/lib/ansible/plugins/lookup/password.py
@@ -30,6 +30,7 @@ from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.encrypt import do_encrypt
+from ansible.utils.path import makedirs_safe
DEFAULT_LENGTH = 20
@@ -98,11 +99,10 @@ class LookupModule(LookupBase):
path = self._loader.path_dwim(relpath)
if not os.path.exists(path):
pathdir = os.path.dirname(path)
- if not os.path.isdir(pathdir):
- try:
- os.makedirs(pathdir, mode=0o700)
- except OSError as e:
- raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
+ try:
+ makedirs_safe(pathdir, mode=0o700)
+ except OSError as e:
+ raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'')
password = ''.join(random.choice(chars) for _ in range(length))
diff --git a/v2/ansible/plugins/lookup/pipe.py b/lib/ansible/plugins/lookup/pipe.py
similarity index 100%
rename from v2/ansible/plugins/lookup/pipe.py
rename to lib/ansible/plugins/lookup/pipe.py
diff --git a/v2/ansible/plugins/lookup/random_choice.py b/lib/ansible/plugins/lookup/random_choice.py
similarity index 100%
rename from v2/ansible/plugins/lookup/random_choice.py
rename to lib/ansible/plugins/lookup/random_choice.py
diff --git a/v2/ansible/plugins/lookup/redis_kv.py b/lib/ansible/plugins/lookup/redis_kv.py
similarity index 100%
rename from v2/ansible/plugins/lookup/redis_kv.py
rename to lib/ansible/plugins/lookup/redis_kv.py
diff --git a/v2/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py
similarity index 87%
rename from v2/ansible/plugins/lookup/sequence.py
rename to lib/ansible/plugins/lookup/sequence.py
index 1ddeba932f8..5cd87f4f524 100644
--- a/v2/ansible/plugins/lookup/sequence.py
+++ b/lib/ansible/plugins/lookup/sequence.py
@@ -22,7 +22,6 @@ from re import compile as re_compile, IGNORECASE
from ansible.errors import *
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
-from ansible.template import Templar
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
@@ -152,15 +151,26 @@ class LookupModule(LookupBase):
)
elif self.count is not None:
# convert count to end
- self.end = self.start + self.count * self.stride - 1
+ if self.count != 0:
+ self.end = self.start + self.count * self.stride - 1
+ else:
+ self.start = 0
+ self.end = 0
+ self.stride = 0
del self.count
- if self.end < self.start:
- raise AnsibleError("can't count backwards")
+ if self.stride > 0 and self.end < self.start:
+ raise AnsibleError("to count backwards make stride negative")
+ if self.stride < 0 and self.end > self.start:
+ raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
- numbers = xrange(self.start, self.end + 1, self.stride)
+ if self.stride > 0:
+ adjust = 1
+ else:
+ adjust = -1
+ numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
@@ -177,13 +187,11 @@ class LookupModule(LookupBase):
if isinstance(terms, basestring):
terms = [ terms ]
- templar = Templar(loader=self._loader, variables=variables)
-
for term in terms:
try:
self.reset() # clear out things for this iteration
- term = templar.template(term)
+ term = self._templar.template(term)
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
@@ -191,13 +199,13 @@ class LookupModule(LookupBase):
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
-
- results.extend(self.generate_sequence())
+ if self.stride != 0:
+ results.extend(self.generate_sequence())
except AnsibleError:
raise
- except Exception:
+ except Exception as e:
raise AnsibleError(
- "unknown error generating sequence"
+ "unknown error generating sequence: %s" % e
)
return results
diff --git a/lib/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py
new file mode 100644
index 00000000000..e014e382bab
--- /dev/null
+++ b/lib/ansible/plugins/lookup/subelements.py
@@ -0,0 +1,104 @@
+# (c) 2013, Serge van Ginderachter
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.boolean import boolean
+
+FLAGS = ('skip_missing',)
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ def _raise_terms_error(msg=""):
+ raise AnsibleError(
+ "subelements lookup expects a list of two or three items, "
+ + msg)
+
+ terms = listify_lookup_plugin_terms(terms, templar=self._templar, loader=self._loader)
+ terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
+
+ # check lookup terms - check number of terms
+ if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
+ _raise_terms_error()
+
+ # first term should be a list (or dict), second a string holding the subkey
+ if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring):
+ _raise_terms_error("first a dict or a list, second a string pointing to the subkey")
+ subelements = terms[1].split(".")
+
+ if isinstance(terms[0], dict): # convert to list:
+ if terms[0].get('skipped', False) is not False:
+ # the registered result was completely skipped
+ return []
+ elementlist = []
+ for key in terms[0].iterkeys():
+ elementlist.append(terms[0][key])
+ else:
+ elementlist = terms[0]
+
+ # check for optional flags in third term
+ flags = {}
+ if len(terms) == 3:
+ flags = terms[2]
+ if not isinstance(flags, dict) and not all([isinstance(key, basestring) and key in FLAGS for key in flags]):
+ _raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
+
+ # build_items
+ ret = []
+ for item0 in elementlist:
+ if not isinstance(item0, dict):
+ raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
+ if item0.get('skipped', False) is not False:
+ # this particular item is to be skipped
+ continue
+
+ skip_missing = boolean(flags.get('skip_missing', False))
+ subvalue = item0
+ lastsubkey = False
+ sublist = []
+ for subkey in subelements:
+ if subkey == subelements[-1]:
+ lastsubkey = True
+ if not subkey in subvalue:
+ if skip_missing:
+ continue
+ else:
+ raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
+ if not lastsubkey:
+ if not isinstance(subvalue[subkey], dict):
+ if skip_missing:
+ continue
+ else:
+ raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
+ else:
+ subvalue = subvalue[subkey]
+ else: # lastsubkey
+ if not isinstance(subvalue[subkey], list):
+ raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
+ else:
+ sublist = subvalue.pop(subkey, [])
+ for item1 in sublist:
+ ret.append((item0, item1))
+
+ return ret
+
diff --git a/v2/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py
similarity index 60%
rename from v2/ansible/plugins/lookup/template.py
rename to lib/ansible/plugins/lookup/template.py
index e53e1990a0d..8f793a2066f 100644
--- a/v2/ansible/plugins/lookup/template.py
+++ b/lib/ansible/plugins/lookup/template.py
@@ -19,9 +19,9 @@ __metaclass__ = type
import os
+from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
-from ansible.template import Templar
class LookupModule(LookupBase):
@@ -30,16 +30,27 @@ class LookupModule(LookupBase):
if not isinstance(terms, list):
terms = [ terms ]
- templar = Templar(loader=self._loader, variables=variables)
+ basedir = self.get_basedir(variables)
ret = []
+
for term in terms:
- path = self._loader.path_dwim(term)
- if os.path.exists(path):
- with open(path, 'r') as f:
+ self._display.debug("File lookup term: %s" % term)
+
+ lookupfile = self._loader.path_dwim_relative(basedir, 'templates', term)
+ self._display.vvvv("File lookup using %s as file" % lookupfile)
+ if lookupfile and os.path.exists(lookupfile):
+ with open(lookupfile, 'r') as f:
template_data = f.read()
- res = templar.template(template_data, preserve_trailing_newlines=True)
+
+ self._templar.environment.searchpath = [self._loader._basedir, os.path.dirname(lookupfile)]
+ if 'role_path' in variables:
+ self._templar.environment.searchpath.insert(1, C.DEFAULT_ROLES_PATH)
+ self._templar.environment.searchpath.insert(1, variables['role_path'])
+
+ res = self._templar.template(template_data, preserve_trailing_newlines=True)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
+
return ret
diff --git a/v2/ansible/plugins/lookup/together.py b/lib/ansible/plugins/lookup/together.py
similarity index 88%
rename from v2/ansible/plugins/lookup/together.py
rename to lib/ansible/plugins/lookup/together.py
index 2f53121cc8b..42c98455070 100644
--- a/v2/ansible/plugins/lookup/together.py
+++ b/lib/ansible/plugins/lookup/together.py
@@ -31,16 +31,16 @@ class LookupModule(LookupBase):
[1, 2], [3] -> [1, 3], [2, None]
"""
- def __lookup_variabless(self, terms, variables):
+ def __lookup_variables(self, terms):
results = []
for x in terms:
- intermediate = listify_lookup_plugin_terms(x, variables, loader=self._loader)
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
- terms = self.__lookup_variabless(terms, variables)
+ terms = self.__lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
diff --git a/v2/ansible/plugins/lookup/url.py b/lib/ansible/plugins/lookup/url.py
similarity index 52%
rename from v2/ansible/plugins/lookup/url.py
rename to lib/ansible/plugins/lookup/url.py
index 9f1a89f772c..216b07d1f86 100644
--- a/v2/ansible/plugins/lookup/url.py
+++ b/lib/ansible/plugins/lookup/url.py
@@ -17,30 +17,37 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from ansible.plugins.lookup import LookupBase
import urllib2
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.utils.unicode import to_unicode
+
class LookupModule(LookupBase):
- def run(self, terms, inject=None, **kwargs):
+ def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
+ validate_certs = kwargs.get('validate_certs', True)
+
ret = []
for term in terms:
+ self._display.vvvv("url lookup connecting to %s" % term)
try:
- r = urllib2.Request(term)
- response = urllib2.urlopen(r)
- except URLError as e:
- utils.warnings("Failed lookup url for %s : %s" % (term, str(e)))
- continue
- except HTTPError as e:
- utils.warnings("Received HTTP error for %s : %s" % (term, str(e)))
- continue
+ response = open_url(term, validate_certs=validate_certs)
+ except urllib2.URLError as e:
+ raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e)))
+ except urllib2.HTTPError as e:
+ raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e)))
+ except SSLValidationError as e:
+ raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e)))
+ except ConnectionError as e:
+ raise AnsibleError("Error connecting to %s: %s" % (term, str(e)))
for line in response.read().splitlines():
- ret.append(line)
-
+ ret.append(to_unicode(line))
return ret
diff --git a/v2/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py
similarity index 100%
rename from v2/ansible/plugins/shell/__init__.py
rename to lib/ansible/plugins/shell/__init__.py
diff --git a/v2/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py
similarity index 100%
rename from v2/ansible/plugins/shell/csh.py
rename to lib/ansible/plugins/shell/csh.py
diff --git a/v2/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py
similarity index 100%
rename from v2/ansible/plugins/shell/fish.py
rename to lib/ansible/plugins/shell/fish.py
diff --git a/v2/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py
similarity index 60%
rename from v2/ansible/plugins/shell/powershell.py
rename to lib/ansible/plugins/shell/powershell.py
index e4331e46c65..0e16d34e160 100644
--- a/v2/ansible/plugins/shell/powershell.py
+++ b/lib/ansible/plugins/shell/powershell.py
@@ -24,7 +24,9 @@ import random
import shlex
import time
-_common_args = ['PowerShell', '-NoProfile', '-NonInteractive']
+from ansible.utils.unicode import to_bytes, to_unicode
+
+_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
# Primarily for testing, allow explicitly specifying PowerShell version via
# an environment variable.
@@ -38,33 +40,54 @@ class ShellModule(object):
return ''
def join_path(self, *args):
- return os.path.join(*args).replace('/', '\\')
+ parts = []
+ for arg in args:
+ arg = self._unquote(arg).replace('/', '\\')
+ parts.extend([a for a in arg.split('\\') if a])
+ path = '\\'.join(parts)
+ if path.startswith('~'):
+ return path
+ return '"%s"' % path
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
+ path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, mode, path):
return ''
def remove(self, path, recurse=False):
- path = self._escape(path)
+ path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile, system=False, mode=None):
- basefile = self._escape(basefile)
+ basefile = self._escape(self._unquote(basefile))
# FIXME: Support system temp path!
return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
- def md5(self, path):
- path = self._escape(path)
+ def expand_user(self, user_home_path):
+ # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
+ # not seem to work remotely, though by default we are always starting
+ # in the user's home directory.
+ user_home_path = self._unquote(user_home_path)
+ if user_home_path == '~':
+ script = 'Write-Host (Get-Location).Path'
+ elif user_home_path.startswith('~\\'):
+ script = 'Write-Host ((Get-Location).Path + "%s")' % self._escape(user_home_path[1:])
+ else:
+ script = 'Write-Host "%s"' % self._escape(user_home_path)
+ return self._encode_script(script)
+
+ def checksum(self, path, *args, **kwargs):
+ path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
- $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
+ $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
@@ -81,16 +104,37 @@ class ShellModule(object):
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
- cmd = cmd.encode('utf-8')
- cmd_parts = shlex.split(cmd, posix=False)
- if not cmd_parts[0].lower().endswith('.ps1'):
- cmd_parts[0] = '%s.ps1' % cmd_parts[0]
- script = self._build_file_cmd(cmd_parts)
+ cmd_parts = shlex.split(to_bytes(cmd), posix=False)
+ cmd_parts = map(to_unicode, cmd_parts)
+ if shebang and shebang.lower() == '#!powershell':
+ if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
+ cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
+ cmd_parts.insert(0, '&')
+ elif shebang and shebang.startswith('#!'):
+ cmd_parts.insert(0, shebang[2:])
+ catch = '''
+ $_obj = @{ failed = $true; $msg = $_ }
+ echo $_obj | ConvertTo-Json -Compress -Depth 99
+ Exit 1
+ '''
+ script = 'Try { %s }\nCatch { %s }' % (' '.join(cmd_parts), 'throw')
if rm_tmp:
- rm_tmp = self._escape(rm_tmp)
- script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp)
+ rm_tmp = self._escape(self._unquote(rm_tmp))
+ rm_cmd = 'Remove-Item "%s" -Force -Recurse -ErrorAction SilentlyContinue' % rm_tmp
+ script = '%s\nFinally { %s }' % (script, rm_cmd)
return self._encode_script(script)
+ def _unquote(self, value):
+ '''Remove any matching quotes that wrap the given value.'''
+ value = to_unicode(value or '')
+ m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
+ if m:
+ return m.group(1)
+ m = re.match(r'^\s*?"(.*?)"\s*?$', value)
+ if m:
+ return m.group(1)
+ return value
+
def _escape(self, value, include_vars=False):
'''Return value escaped for use in PowerShell command.'''
# http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
@@ -107,14 +151,10 @@ class ShellModule(object):
def _encode_script(self, script, as_list=False):
'''Convert a PowerShell script to a single base64-encoded command.'''
+ script = to_unicode(script)
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = base64.b64encode(script.encode('utf-16-le'))
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
-
- def _build_file_cmd(self, cmd_parts):
- '''Build command line to run a file, given list of file name plus args.'''
- return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts])
-
diff --git a/v2/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
similarity index 90%
rename from v2/ansible/plugins/shell/sh.py
rename to lib/ansible/plugins/shell/sh.py
index 628df9bbfbf..70ec91d6e55 100644
--- a/v2/ansible/plugins/shell/sh.py
+++ b/lib/ansible/plugins/shell/sh.py
@@ -34,8 +34,9 @@ class ShellModule(object):
def env_prefix(self, **kwargs):
'''Build command prefix with environment variables.'''
env = dict(
- LANG = C.DEFAULT_MODULE_LANG,
- LC_CTYPE = C.DEFAULT_MODULE_LANG,
+ LANG = C.DEFAULT_MODULE_LANG,
+ LC_CTYPE = C.DEFAULT_MODULE_LANG,
+ LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
env.update(kwargs)
return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()])
@@ -61,12 +62,12 @@ class ShellModule(object):
if not basefile:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
- if system and basetmp.startswith('$HOME'):
+ if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
basetmp = self.join_path('/tmp', basefile)
- cmd = 'mkdir -p %s' % basetmp
+ cmd = 'mkdir -p "%s"' % basetmp
if mode:
- cmd += ' && chmod %s %s' % (mode, basetmp)
- cmd += ' && echo %s' % basetmp
+ cmd += ' && chmod %s "%s"' % (mode, basetmp)
+ cmd += ' && echo "%s"' % basetmp
return cmd
def expand_user(self, user_home_path):
@@ -126,8 +127,12 @@ class ShellModule(object):
return cmd
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
+ # don't quote the cmd if it's an empty string, because this will
+ # break pipelining mode
+ if cmd.strip() != '':
+ cmd = pipes.quote(cmd)
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
new_cmd = " ".join(cmd_parts)
if rm_tmp:
- new_cmd = '%s; rm -rf %s >/dev/null 2>&1' % (new_cmd, rm_tmp)
+ new_cmd = '%s; rm -rf "%s" >/dev/null 2>&1' % (new_cmd, rm_tmp)
return new_cmd
diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py
new file mode 100644
index 00000000000..83ddd1d2c36
--- /dev/null
+++ b/lib/ansible/plugins/strategies/__init__.py
@@ -0,0 +1,512 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six.moves import queue as Queue
+import time
+
+from ansible.errors import *
+from ansible.executor.task_result import TaskResult
+from ansible.inventory.host import Host
+from ansible.inventory.group import Group
+from ansible.playbook.handler import Handler
+from ansible.playbook.helpers import load_list_of_blocks
+from ansible.playbook.included_file import IncludedFile
+from ansible.playbook.role import hash_params
+from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader
+from ansible.template import Templar
+
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
+__all__ = ['StrategyBase']
+
+# FIXME: this should probably be in the plugins/__init__.py, with
+# a smarter mechanism to set all of the attributes based on
+# the loaders created there
+class SharedPluginLoaderObj:
+ '''
+ A simple object to make pass the various plugin loaders to
+ the forked processes over the queue easier
+ '''
+ def __init__(self):
+ self.basedirs = _basedirs[:]
+ self.filter_loader = filter_loader
+ self.lookup_loader = lookup_loader
+ self.module_loader = module_loader
+
+class StrategyBase:
+
+ '''
+ This is the base class for strategy plugins, which contains some common
+ code useful to all strategies like running handlers, cleanup actions, etc.
+ '''
+
+ def __init__(self, tqm):
+ self._tqm = tqm
+ self._inventory = tqm.get_inventory()
+ self._workers = tqm.get_workers()
+ self._notified_handlers = tqm.get_notified_handlers()
+ self._variable_manager = tqm.get_variable_manager()
+ self._loader = tqm.get_loader()
+ self._final_q = tqm._final_q
+ self._step = getattr(tqm._options, 'step', False)
+ self._diff = getattr(tqm._options, 'diff', False)
+ self._display = display
+
+ # internal counters
+ self._pending_results = 0
+ self._cur_worker = 0
+
+ # this dictionary is used to keep track of hosts that have
+ # outstanding tasks still in queue
+ self._blocked_hosts = dict()
+
+ def run(self, iterator, play_context, result=True):
+ # save the failed/unreachable hosts, as the run_handlers()
+ # method will clear that information during its execution
+ failed_hosts = self._tqm._failed_hosts.keys()
+ unreachable_hosts = self._tqm._unreachable_hosts.keys()
+
+ self._display.debug("running handlers")
+ result &= self.run_handlers(iterator, play_context)
+
+ # now update with the hosts (if any) that failed or were
+ # unreachable during the handler execution phase
+ failed_hosts = set(failed_hosts).union(self._tqm._failed_hosts.keys())
+ unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
+
+ # send the stats callback
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+
+ if len(unreachable_hosts) > 0:
+ return 3
+ elif len(failed_hosts) > 0:
+ return 2
+ elif not result:
+ return 1
+ else:
+ return 0
+
+ def get_hosts_remaining(self, play):
+ return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
+
+ def get_failed_hosts(self, play):
+ return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
+
+ def add_tqm_variables(self, vars, play):
+ '''
+ Base class method to add extra variables/information to the list of task
+ vars sent through the executor engine regarding the task queue manager state.
+ '''
+
+ new_vars = vars.copy()
+ new_vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
+ new_vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
+ return new_vars
+
+ def _queue_task(self, host, task, task_vars, play_context):
+ ''' handles queueing the task up to be sent to a worker '''
+
+ self._display.debug("entering _queue_task() for %s/%s" % (host, task))
+
+ # and then queue the new task
+ self._display.debug("%s - putting task (%s) in queue" % (host, task))
+ try:
+ self._display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
+
+ (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
+ self._cur_worker += 1
+ if self._cur_worker >= len(self._workers):
+ self._cur_worker = 0
+
+ # create a dummy object with plugin loaders set as an easier
+ # way to share them with the forked processes
+ shared_loader_obj = SharedPluginLoaderObj()
+
+ main_q.put((host, task, self._loader.get_basedir(), task_vars, play_context, shared_loader_obj), block=False)
+ self._pending_results += 1
+ except (EOFError, IOError, AssertionError) as e:
+ # most likely an abort
+ self._display.debug("got an error while queuing: %s" % e)
+ return
+ self._display.debug("exiting _queue_task() for %s/%s" % (host, task))
+
+ def _process_pending_results(self, iterator):
+ '''
+ Reads results off the final queue and takes appropriate action
+ based on the result (executing callbacks, updating state, etc.).
+ '''
+
+ ret_results = []
+
+ while not self._final_q.empty() and not self._tqm._terminated:
+ try:
+ result = self._final_q.get(block=False)
+ self._display.debug("got result from result worker: %s" % ([unicode(x) for x in result],))
+
+ # all host status messages contain 2 entries: (msg, task_result)
+ if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
+ task_result = result[1]
+ host = task_result._host
+ task = task_result._task
+ if result[0] == 'host_task_failed' or task_result.is_failed():
+ if not task.ignore_errors:
+ self._display.debug("marking %s as failed" % host.name)
+ iterator.mark_host_failed(host)
+ self._tqm._failed_hosts[host.name] = True
+ self._tqm._stats.increment('failures', host.name)
+ else:
+ self._tqm._stats.increment('ok', host.name)
+ self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
+ elif result[0] == 'host_unreachable':
+ self._tqm._unreachable_hosts[host.name] = True
+ self._tqm._stats.increment('dark', host.name)
+ self._tqm.send_callback('v2_runner_on_unreachable', task_result)
+ elif result[0] == 'host_task_skipped':
+ self._tqm._stats.increment('skipped', host.name)
+ self._tqm.send_callback('v2_runner_on_skipped', task_result)
+ elif result[0] == 'host_task_ok':
+ self._tqm._stats.increment('ok', host.name)
+ if 'changed' in task_result._result and task_result._result['changed']:
+ self._tqm._stats.increment('changed', host.name)
+ self._tqm.send_callback('v2_runner_on_ok', task_result)
+
+ if self._diff and 'diff' in task_result._result:
+ self._tqm.send_callback('v2_on_file_diff', task_result)
+
+ self._pending_results -= 1
+ if host.name in self._blocked_hosts:
+ del self._blocked_hosts[host.name]
+
+ # If this is a role task, mark the parent role as being run (if
+ # the task was ok or failed, but not skipped or unreachable)
+ if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
+ # lookup the role in the ROLE_CACHE to make sure we're dealing
+ # with the correct object and mark it as executed
+ for (entry, role_obj) in iterator._play.ROLE_CACHE[task_result._task._role._role_name].iteritems():
+ params = task_result._task._role._role_params
+ if task_result._task._role.tags is not None:
+ params['tags'] = task_result._task._role.tags
+ if task_result._task._role.when is not None:
+ params['when'] = task_result._task._role.when
+ hashed_entry = hash_params(params)
+ if entry == hashed_entry:
+ role_obj._had_task_run = True
+
+ ret_results.append(task_result)
+
+ elif result[0] == 'add_host':
+ task_result = result[1]
+ new_host_info = task_result.get('add_host', dict())
+
+ self._add_host(new_host_info)
+
+ elif result[0] == 'add_group':
+ task = result[1]
+ self._add_group(task, iterator)
+
+ elif result[0] == 'notify_handler':
+ task_result = result[1]
+ handler_name = result[2]
+
+ original_task = iterator.get_original_task(task_result._host, task_result._task)
+ if handler_name not in self._notified_handlers:
+ self._notified_handlers[handler_name] = []
+
+ if task_result._host not in self._notified_handlers[handler_name]:
+ self._notified_handlers[handler_name].append(task_result._host)
+
+ elif result[0] == 'register_host_var':
+ # essentially the same as 'set_host_var' below, however we
+ # never follow the delegate_to value for registered vars
+ host = result[1]
+ var_name = result[2]
+ var_value = result[3]
+ self._variable_manager.set_host_variable(host, var_name, var_value)
+
+ elif result[0] in ('set_host_var', 'set_host_facts'):
+ host = result[1]
+ task = result[2]
+ item = result[3]
+
+ if task.delegate_to is not None:
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
+ task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
+ if item is not None:
+ task_vars['item'] = item
+ templar = Templar(loader=self._loader, variables=task_vars)
+ host_name = templar.template(task.delegate_to)
+ target_host = self._inventory.get_host(host_name)
+ if target_host is None:
+ target_host = Host(name=host_name)
+ else:
+ target_host = host
+
+ if result[0] == 'set_host_var':
+ var_name = result[4]
+ var_value = result[5]
+ self._variable_manager.set_host_variable(target_host, var_name, var_value)
+ elif result[0] == 'set_host_facts':
+ facts = result[4]
+ self._variable_manager.set_host_facts(target_host, facts)
+
+ else:
+ raise AnsibleError("unknown result message received: %s" % result[0])
+ except Queue.Empty:
+ pass
+
+ return ret_results
+
+ def _wait_on_pending_results(self, iterator):
+ '''
+ Wait for the shared counter to drop to zero, using a short sleep
+ between checks to ensure we don't spin lock
+ '''
+
+ ret_results = []
+
+ self._display.debug("waiting for pending results...")
+ while self._pending_results > 0 and not self._tqm._terminated:
+ results = self._process_pending_results(iterator)
+ ret_results.extend(results)
+ time.sleep(0.01)
+ self._display.debug("no more pending results, returning what we have")
+
+ return ret_results
+
+ def _add_host(self, host_info):
+ '''
+ Helper function to add a new host to inventory based on a task result.
+ '''
+
+ host_name = host_info.get('host_name')
+
+ # Check if host in cache, add if not
+ if host_name in self._inventory._hosts_cache:
+ new_host = self._inventory._hosts_cache[host_name]
+ else:
+ new_host = Host(name=host_name)
+ self._inventory._hosts_cache[host_name] = new_host
+
+ allgroup = self._inventory.get_group('all')
+ allgroup.add_host(new_host)
+
+ # Set/update the vars for this host
+ # FIXME: probably should have a set vars method for the host?
+ new_vars = host_info.get('host_vars', dict())
+ new_host.vars.update(new_vars)
+
+ new_groups = host_info.get('groups', [])
+ for group_name in new_groups:
+ if not self._inventory.get_group(group_name):
+ new_group = Group(group_name)
+ self._inventory.add_group(new_group)
+ new_group.vars = self._inventory.get_group_variables(group_name)
+ else:
+ new_group = self._inventory.get_group(group_name)
+
+ new_group.add_host(new_host)
+
+ # add this host to the group cache
+ if self._inventory._groups_list is not None:
+ if group_name in self._inventory._groups_list:
+ if new_host.name not in self._inventory._groups_list[group_name]:
+ self._inventory._groups_list[group_name].append(new_host.name)
+
+ # clear pattern caching completely since it's unpredictable what
+ # patterns may have referenced the group
+ # FIXME: is this still required?
+ self._inventory.clear_pattern_cache()
+
+ def _add_group(self, task, iterator):
+ '''
+ Helper function to add a group (if it does not exist), and to assign the
+ specified host to that group.
+ '''
+
+ # the host here is from the executor side, which means it was a
+ # serialized/cloned copy and we'll need to look up the proper
+ # host object from the master inventory
+ groups = {}
+ changed = False
+
+ for host in self._inventory.get_hosts():
+ original_task = iterator.get_original_task(host, task)
+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=original_task)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ group_name = templar.template(original_task.args.get('key'))
+ if task.evaluate_conditional(templar=templar, all_vars=all_vars):
+ if group_name not in groups:
+ groups[group_name] = []
+ groups[group_name].append(host)
+
+ for group_name, hosts in groups.iteritems():
+ new_group = self._inventory.get_group(group_name)
+ if not new_group:
+ # create the new group and add it to inventory
+ new_group = Group(name=group_name)
+ self._inventory.add_group(new_group)
+
+ # and add the group to the proper hierarchy
+ allgroup = self._inventory.get_group('all')
+ allgroup.add_child_group(new_group)
+ changed = True
+ for host in hosts:
+ if group_name not in host.get_groups():
+ new_group.add_host(host)
+ changed = True
+
+ return changed
+
+ def _load_included_file(self, included_file, iterator, is_handler=False):
+ '''
+ Loads an included YAML file of tasks, applying the optional set of variables.
+ '''
+
+ try:
+ data = self._loader.load_from_file(included_file._filename)
+ if data is None:
+ return []
+ except AnsibleError, e:
+ for host in included_file._hosts:
+ tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
+ iterator.mark_host_failed(host)
+ self._tqm._failed_hosts[host.name] = True
+ self._tqm._stats.increment('failures', host.name)
+ self._tqm.send_callback('v2_runner_on_failed', tr)
+ return []
+
+ if not isinstance(data, list):
+ raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds)
+
+ block_list = load_list_of_blocks(
+ data,
+ play=included_file._task._block._play,
+ parent_block=included_file._task._block,
+ task_include=included_file._task,
+ role=included_file._task._role,
+ use_handlers=is_handler,
+ loader=self._loader
+ )
+
+ # set the vars for this task from those specified as params to the include
+ for b in block_list:
+ b.vars = included_file._args.copy()
+
+ return block_list
+
+ def run_handlers(self, iterator, play_context):
+ '''
+ Runs handlers on those hosts which have been notified.
+ '''
+
+ result = True
+
+ for handler_block in iterator._play.handlers:
+ # FIXME: handlers need to support the rescue/always portions of blocks too,
+ # but this may take some work in the iterator and gets tricky when
+ # we consider the ability of meta tasks to flush handlers
+ for handler in handler_block.block:
+ handler_name = handler.get_name()
+ if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
+ # FIXME: need to use iterator.get_failed_hosts() instead?
+ #if not len(self.get_hosts_remaining(iterator._play)):
+ # self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ # result = False
+ # break
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
+ host_results = []
+ for host in self._notified_handlers[handler_name]:
+ if not handler.has_triggered(host) and (host.name not in self._tqm._failed_hosts or play_context.force_handlers):
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
+ task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
+ self._queue_task(host, handler, task_vars, play_context)
+ #handler.flag_for_host(host)
+ results = self._process_pending_results(iterator)
+ host_results.extend(results)
+ results = self._wait_on_pending_results(iterator)
+ host_results.extend(results)
+
+ # wipe the notification list
+ self._notified_handlers[handler_name] = []
+
+ try:
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ self._tqm,
+ iterator=iterator,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
+ except AnsibleError, e:
+ return False
+
+ if len(included_files) > 0:
+ for included_file in included_files:
+ try:
+ new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
+ # for every task in each block brought in by the include, add the list
+ # of hosts which included the file to the notified_handlers dict
+ for block in new_blocks:
+ for task in block.block:
+ if task.name in self._notified_handlers:
+ for host in included_file._hosts:
+ if host.name not in self._notified_handlers[task.name]:
+ self._notified_handlers[task.name].append(host)
+ else:
+ self._notified_handlers[task.name] = included_file._hosts[:]
+ # and add the new blocks to the list of handler blocks
+ handler_block.block.extend(block.block)
+ #iterator._play.handlers.extend(new_blocks)
+ except AnsibleError, e:
+ for host in included_file._hosts:
+ iterator.mark_host_failed(host)
+ self._tqm._failed_hosts[host.name] = True
+ self._display.warning(str(e))
+ continue
+ self._display.debug("done running handlers, result is: %s" % result)
+ return result
+
+ def _take_step(self, task, host=None):
+
+ ret=False
+ if host:
+ msg = u'Perform task: %s on %s (y/n/c): ' % (task, host)
+ else:
+ msg = u'Perform task: %s (y/n/c): ' % task
+ resp = self._display.prompt(msg)
+
+ if resp.lower() in ['y','yes']:
+ self._display.debug("User ran task")
+ ret = True
+ elif resp.lower() in ['c', 'continue']:
+ self._display.debug("User ran task and cancled step mode")
+ self._step = False
+ ret = True
+ else:
+ self._display.debug("User skipped task")
+
+ self._display.banner(msg)
+
+ return ret
diff --git a/v2/ansible/plugins/strategies/free.py b/lib/ansible/plugins/strategies/free.py
similarity index 78%
rename from v2/ansible/plugins/strategies/free.py
rename to lib/ansible/plugins/strategies/free.py
index d0506d37dda..ea894c5bf65 100644
--- a/v2/ansible/plugins/strategies/free.py
+++ b/lib/ansible/plugins/strategies/free.py
@@ -22,11 +22,16 @@ __metaclass__ = type
import time
from ansible.plugins.strategies import StrategyBase
-from ansible.utils.debug import debug
+
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
class StrategyModule(StrategyBase):
- def run(self, iterator, connection_info):
+ def run(self, iterator, play_context):
'''
The "free" strategy is a bit more complex, in that it allows tasks to
be sent to hosts as quickly as they can be processed. This means that
@@ -41,14 +46,14 @@ class StrategyModule(StrategyBase):
'''
# the last host to be given a task
- last_host = 0
+ last_host = 0
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
- hosts_left = self.get_hosts_remaining(iterator._play)
+ hosts_left = self._inventory.get_hosts(iterator._play.hosts)
if len(hosts_left) == 0:
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
@@ -62,31 +67,31 @@ class StrategyModule(StrategyBase):
host_results = []
while True:
host = hosts_left[last_host]
- debug("next free host: %s" % host)
+ self._display.debug("next free host: %s" % host)
host_name = host.get_name()
# peek at the next task for the host, to see if there's
# anything to do do for this host
(state, task) = iterator.get_next_task_for_host(host, peek=True)
- debug("free host state: %s" % state)
- debug("free host task: %s" % task)
+ self._display.debug("free host state: %s" % state)
+ self._display.debug("free host task: %s" % task)
if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
# set the flag so the outer loop knows we've still found
# some work which needs to be done
work_to_do = True
- debug("this host has work to do")
+ self._display.debug("this host has work to do")
# check to see if this host is blocked (still executing a previous task)
- if not host_name in self._blocked_hosts:
+ if not host_name in self._blocked_hosts or not self._blocked_hosts[host_name]:
# pop the task, mark the host blocked, and queue it
self._blocked_hosts[host_name] = True
(state, task) = iterator.get_next_task_for_host(host)
- debug("getting variables")
+ self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
- debug("done getting variables")
+ self._display.debug("done getting variables")
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
@@ -94,11 +99,11 @@ class StrategyModule(StrategyBase):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
- debug("'%s' skipped because role has already run" % task)
+ self._display.debug("'%s' skipped because role has already run" % task)
continue
- if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup':
- debug("'%s' failed tag evaluation" % task)
+ if not task.evaluate_tags(play_context.only_tags, play_context.skip_tags, task_vars) and task.action != 'setup':
+ self._display.debug("'%s' failed tag evaluation" % task)
continue
if task.action == 'meta':
@@ -111,14 +116,16 @@ class StrategyModule(StrategyBase):
elif meta_action == 'flush_handlers':
# FIXME: in the 'free' mode, flushing handlers should result in
# only those handlers notified for the host doing the flush
- self.run_handlers(iterator, connection_info)
+ self.run_handlers(iterator, play_context)
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
self._blocked_hosts[host_name] = False
else:
- self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
- self._queue_task(host, task, task_vars, connection_info)
+ # handle step if needed, skip meta actions as they are used internally
+ if not self._step or self._take_step(task, host_name):
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ self._queue_task(host, task, task_vars, play_context)
# move on to the next host and make sure we
# haven't gone past the end of our hosts list
@@ -142,10 +149,9 @@ class StrategyModule(StrategyBase):
except Exception as e:
# FIXME: ctrl+c can cause some failures here, so catch them
# with the appropriate error type
- print("wtf: %s" % e)
pass
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
- super(StrategyModule, self).run(iterator, connection_info)
+ return super(StrategyModule, self).run(iterator, play_context, result)
diff --git a/v2/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py
similarity index 71%
rename from v2/ansible/plugins/strategies/linear.py
rename to lib/ansible/plugins/strategies/linear.py
index 95ecac1451f..6de217f3f1b 100644
--- a/v2/ansible/plugins/strategies/linear.py
+++ b/lib/ansible/plugins/strategies/linear.py
@@ -22,10 +22,11 @@ __metaclass__ = type
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
+from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
-from ansible.utils.debug import debug
+from ansible.template import Templar
class StrategyModule(StrategyBase):
@@ -80,7 +81,10 @@ class StrategyModule(StrategyBase):
# specified in the given hosts array
rvals = []
for host in hosts:
- (s, t) = host_tasks[host.name]
+ host_state_task = host_tasks[host.name]
+ if host_state_task is None:
+ continue
+ (s, t) = host_state_task
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
#if new_t != t:
@@ -90,6 +94,7 @@ class StrategyModule(StrategyBase):
rvals.append((host, noop_task))
return rvals
+
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
@@ -114,30 +119,22 @@ class StrategyModule(StrategyBase):
# return None for all hosts in the list
return [(host, None) for host in hosts]
-
- def run(self, iterator, connection_info):
+ def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
- result = True
-
# iteratate over each task, while there is one left to run
+ result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
- debug("getting the remaining hosts for this loop")
- self._tqm._failed_hosts = iterator.get_failed_hosts()
- hosts_left = self.get_hosts_remaining(iterator._play)
- debug("done getting the remaining hosts for this loop")
- if len(hosts_left) == 0:
- debug("out of hosts to run on")
- self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
- result = False
- break
+ self._display.debug("getting the remaining hosts for this loop")
+ hosts_left = self._inventory.get_hosts(iterator._play.hosts)
+ self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
@@ -145,6 +142,11 @@ class StrategyModule(StrategyBase):
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
+
+ # skip control
+ skip_rest = False
+ choose_step = True
+
for (host, task) in host_tasks:
if not task:
continue
@@ -152,6 +154,7 @@ class StrategyModule(StrategyBase):
run_once = False
work_to_do = True
+
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
@@ -165,17 +168,13 @@ class StrategyModule(StrategyBase):
# corresponding action plugin
pass
- debug("getting variables")
- task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
- debug("done getting variables")
-
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run():
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
- debug("'%s' skipped because role has already run" % task)
+ self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
@@ -186,16 +185,37 @@ class StrategyModule(StrategyBase):
# FIXME: issue a callback for the noop here?
continue
elif meta_action == 'flush_handlers':
- self.run_handlers(iterator, connection_info)
+ self.run_handlers(iterator, play_context)
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
else:
+ # handle step if needed, skip meta actions as they are used internally
+ if self._step and choose_step:
+ if self._take_step(task):
+ choose_step = False
+ else:
+ break
+ skip_rest = True
+
+ self._display.debug("getting variables")
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
+ task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
+ templar = Templar(loader=self._loader, variables=task_vars)
+ self._display.debug("done getting variables")
+
if not callback_sent:
- self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ temp_task = task.copy()
+ try:
+ temp_task.name = unicode(templar.template(temp_task.name, fail_on_undefined=False))
+ except:
+ # just ignore any errors during task name templating,
+ # we don't care if it just shows the raw name
+ pass
+ self._tqm.send_callback('v2_playbook_on_task_start', temp_task, is_conditional=False)
callback_sent = True
self._blocked_hosts[host.get_name()] = True
- self._queue_task(host, task, task_vars, connection_info)
+ self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
@@ -204,62 +224,25 @@ class StrategyModule(StrategyBase):
if run_once:
break
- debug("done queuing things up, now waiting for results queue to drain")
+ # go to next host/task group
+ if skip_rest:
+ continue
+
+ self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
- # FIXME: this needs to be somewhere else
- class IncludedFile:
- def __init__(self, filename, args, task):
- self._filename = filename
- self._args = args
- self._task = task
- self._hosts = []
- def add_host(self, host):
- if host not in self._hosts:
- self._hosts.append(host)
- def __eq__(self, other):
- return other._filename == self._filename and other._args == self._args
- def __repr__(self):
- return "%s (%s): %s" % (self._filename, self._args, self._hosts)
-
- # FIXME: this should also be moved to the base class in a method
- included_files = []
- for res in host_results:
- if res._task.action == 'include':
- if res._task.loop:
- include_results = res._result['results']
- else:
- include_results = [ res._result ]
-
- for include_result in include_results:
- # if the task result was skipped or failed, continue
- if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
- continue
-
- original_task = iterator.get_original_task(res._host, res._task)
- if original_task and original_task._role:
- include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
- else:
- include_file = self._loader.path_dwim(res._task.args.get('_raw_params'))
-
- include_variables = include_result.get('include_variables', dict())
- if 'item' in include_result:
- include_variables['item'] = include_result['item']
-
- inc_file = IncludedFile(include_file, include_variables, original_task)
-
- try:
- pos = included_files.index(inc_file)
- inc_file = included_files[pos]
- except ValueError:
- included_files.append(inc_file)
+ if not work_to_do and len(iterator.get_failed_hosts()) > 0:
+ self._display.debug("out of hosts to run on")
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
+ break
- inc_file.add_host(res._host)
+ try:
+ included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
+ except AnsibleError, e:
+ return False
- # FIXME: should this be moved into the iterator class? Main downside would be
- # that accessing the TQM's callback member would be more difficult, if
- # we do want to send callbacks from here
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
@@ -271,12 +254,12 @@ class StrategyModule(StrategyBase):
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
- new_blocks = self._load_included_file(included_file)
+ new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
- # FIXME: callback here?
- print(e)
+ self._display.warning(str(e))
+ continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
@@ -286,7 +269,7 @@ class StrategyModule(StrategyBase):
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
- final_block = new_block.filter_tagged_tasks(connection_info, task_vars)
+ final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
@@ -294,14 +277,14 @@ class StrategyModule(StrategyBase):
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
- debug("results queue empty")
+ self._display.debug("results queue empty")
except (IOError, EOFError), e:
- debug("got IOError/EOFError in task loop: %s" % e)
+ self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
- return 1
+ return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
- return super(StrategyModule, self).run(iterator, connection_info, result)
+ return super(StrategyModule, self).run(iterator, play_context, result)
diff --git a/lib/ansible/runner/connection_plugins/__init__.py b/lib/ansible/plugins/test/__init__.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/__init__.py
rename to lib/ansible/plugins/test/__init__.py
diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py
new file mode 100644
index 00000000000..cc8c702d754
--- /dev/null
+++ b/lib/ansible/plugins/test/core.py
@@ -0,0 +1,113 @@
+# (c) 2012, Jeroen Hoekx
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import re
+from ansible import errors
+
+def failed(*a, **kw):
+ ''' Test if task result yields failed '''
+ item = a[0]
+ if type(item) != dict:
+ raise errors.AnsibleFilterError("|failed expects a dictionary")
+ rc = item.get('rc',0)
+ failed = item.get('failed',False)
+ if rc != 0 or failed:
+ return True
+ else:
+ return False
+
+def success(*a, **kw):
+ ''' Test if task result yields success '''
+ return not failed(*a, **kw)
+
+def changed(*a, **kw):
+ ''' Test if task result yields changed '''
+ item = a[0]
+ if type(item) != dict:
+ raise errors.AnsibleFilterError("|changed expects a dictionary")
+ if not 'changed' in item:
+ changed = False
+ if ('results' in item # some modules return a 'results' key
+ and type(item['results']) == list
+ and type(item['results'][0]) == dict):
+ for result in item['results']:
+ changed = changed or result.get('changed', False)
+ else:
+ changed = item.get('changed', False)
+ return changed
+
+def skipped(*a, **kw):
+ ''' Test if task result yields skipped '''
+ item = a[0]
+ if type(item) != dict:
+ raise errors.AnsibleFilterError("|skipped expects a dictionary")
+ skipped = item.get('skipped', False)
+ return skipped
+
+def mandatory(a):
+ ''' Make a variable mandatory '''
+ try:
+ a
+ except NameError:
+ raise errors.AnsibleFilterError('Mandatory variable not defined.')
+ else:
+ return a
+
+def regex(value='', pattern='', ignorecase=False, match_type='search'):
+ ''' Expose `re` as a boolean filter using the `search` method by default.
+ This is likely only useful for `search` and `match` which already
+ have their own filters.
+ '''
+ if ignorecase:
+ flags = re.I
+ else:
+ flags = 0
+ _re = re.compile(pattern, flags=flags)
+ _bool = __builtins__.get('bool')
+ return _bool(getattr(_re, match_type, 'search')(value))
+
+def match(value, pattern='', ignorecase=False):
+ ''' Perform a `re.match` returning a boolean '''
+ return regex(value, pattern, ignorecase, 'match')
+
+def search(value, pattern='', ignorecase=False):
+ ''' Perform a `re.search` returning a boolean '''
+ return regex(value, pattern, ignorecase, 'search')
+
+class TestModule(object):
+ ''' Ansible core jinja2 tests '''
+
+ def tests(self):
+ return {
+ # failure testing
+ 'failed' : failed,
+ 'success' : success,
+
+ # changed testing
+ 'changed' : changed,
+
+ # skip testing
+ 'skipped' : skipped,
+
+ # variable existence
+ 'mandatory': mandatory,
+
+ # regex
+ 'match': match,
+ 'search': search,
+ 'regex': regex,
+ }
diff --git a/lib/ansible/plugins/test/math.py b/lib/ansible/plugins/test/math.py
new file mode 100644
index 00000000000..3ac871c4357
--- /dev/null
+++ b/lib/ansible/plugins/test/math.py
@@ -0,0 +1,36 @@
+# (c) 2014, Brian Coca
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from __future__ import absolute_import
+
+import math
+from ansible import errors
+
+def isnotanumber(x):
+ try:
+ return math.isnan(x)
+ except TypeError:
+ return False
+
+class TestModule(object):
+ ''' Ansible math jinja2 tests '''
+
+ def tests(self):
+ return {
+ # general math
+ 'isnan': isnotanumber,
+ }
diff --git a/v2/ansible/plugins/vars/__init__.py b/lib/ansible/plugins/vars/__init__.py
similarity index 100%
rename from v2/ansible/plugins/vars/__init__.py
rename to lib/ansible/plugins/vars/__init__.py
diff --git a/v2/ansible/template/__init__.py b/lib/ansible/template/__init__.py
similarity index 69%
rename from v2/ansible/template/__init__.py
rename to lib/ansible/template/__init__.py
index 19e091b9b27..ec9866c3143 100644
--- a/v2/ansible/template/__init__.py
+++ b/lib/ansible/template/__init__.py
@@ -19,16 +19,18 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import ast
import re
from jinja2 import Environment
+from jinja2.loaders import FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.utils import concat as j2_concat
from jinja2.runtime import StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable
-from ansible.plugins import filter_loader, lookup_loader
+from ansible.plugins import _basedirs, filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
@@ -40,25 +42,28 @@ __all__ = ['Templar']
# A regex for checking to see if a variable we're trying to
# expand is just a single variable name.
-SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$")
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = ( bool, Number )
JINJA2_OVERRIDE = '#jinja2:'
-JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
- def __init__(self, loader, shared_loader_obj=None, variables=dict(), fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR):
+ def __init__(self, loader, shared_loader_obj=None, variables=dict()):
self._loader = loader
- self._basedir = loader.get_basedir()
self._filters = None
+ self._tests = None
self._available_variables = variables
+ if loader:
+ self._basedir = loader.get_basedir()
+ else:
+ self._basedir = './'
+
if shared_loader_obj:
self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
@@ -70,7 +75,18 @@ class Templar:
# should result in fatal errors being raised
self._fail_on_lookup_errors = True
self._fail_on_filter_errors = True
- self._fail_on_undefined_errors = fail_on_undefined
+ self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
+
+ self.environment = Environment(
+ trim_blocks=True,
+ undefined=StrictUndefined,
+ extensions=self._get_extensions(),
+ finalize=self._finalize,
+ loader=FileSystemLoader(self._basedir),
+ )
+ self.environment.template_class = AnsibleJ2Template
+
+ self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
def _count_newlines_from_end(self, in_str):
'''
@@ -100,11 +116,28 @@ class Templar:
self._filters = dict()
for fp in plugins:
self._filters.update(fp.filters())
+ self._filters.update(self._get_tests())
return self._filters.copy()
+ def _get_tests(self):
+ '''
+ Returns tests plugins, after loading and caching them if need be
+ '''
+
+ if self._tests is not None:
+ return self._tests.copy()
+
+ plugins = [x for x in test_loader.all()]
+
+ self._tests = dict()
+ for fp in plugins:
+ self._tests.update(fp.tests())
+
+ return self._tests.copy()
+
def _get_extensions(self):
- '''
+ '''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
@@ -129,7 +162,7 @@ class Templar:
assert isinstance(variables, dict)
self._available_variables = variables.copy()
- def template(self, variable, convert_bare=False, preserve_trailing_newlines=False):
+ def template(self, variable, convert_bare=False, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None, convert_data=True):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
@@ -147,7 +180,7 @@ class Templar:
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
- only_one = SINGLE_VAR.match(variable)
+ only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
@@ -155,25 +188,27 @@ class Templar:
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
- result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines)
+ result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
- # if this looks like a dictionary or list, convert it to such using the safe_eval method
- if (result.startswith("{") and not result.startswith("{{")) or result.startswith("["):
- eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
- if eval_results[1] is None:
- result = eval_results[0]
- else:
- # FIXME: if the safe_eval raised an error, should we do something with it?
- pass
+ if convert_data:
+ # if this looks like a dictionary or list, convert it to such using the safe_eval method
+ if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
+ result.startswith("[") or result in ("True", "False"):
+ eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
+ if eval_results[1] is None:
+ result = eval_results[0]
+ else:
+ # FIXME: if the safe_eval raised an error, should we do something with it?
+ pass
return result
elif isinstance(variable, (list, tuple)):
- return [self.template(v, convert_bare=convert_bare) for v in variable]
+ return [self.template(v, convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable]
elif isinstance(variable, dict):
d = {}
for (k, v) in variable.iteritems():
- d[k] = self.template(v, convert_bare=convert_bare)
+ d[k] = self.template(v, convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides)
return d
else:
return variable
@@ -188,7 +223,7 @@ class Templar:
'''
returns True if the data contains a variable pattern
'''
- return "$" in data or "{{" in data or '{%' in data
+ return self.environment.block_start_string in data or self.environment.variable_start_string in data
def _convert_bare_variable(self, variable):
'''
@@ -197,9 +232,10 @@ class Templar:
'''
if isinstance(variable, basestring):
- first_part = variable.split(".")[0].split("[")[0]
- if first_part in self._available_variables and '{{' not in variable and '$' not in variable:
- return "{{%s}}" % variable
+ contains_filters = "|" in variable
+ first_part = variable.split("|")[0].split(".")[0].split("[")[0]
+ if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
+ return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
@@ -212,13 +248,13 @@ class Templar:
return thing if thing is not None else ''
def _lookup(self, name, *args, **kwargs):
- instance = self._lookup_loader.get(name.lower(), loader=self._loader)
+ instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
# safely catch run failures per #5059
try:
ran = instance.run(*args, variables=self._available_variables, **kwargs)
- except AnsibleUndefinedVariable:
+ except (AnsibleUndefinedVariable, UndefinedError):
raise
except Exception, e:
if self._fail_on_lookup_errors:
@@ -230,24 +266,34 @@ class Templar:
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
- def _do_template(self, data, preserve_trailing_newlines=False):
+ def _do_template(self, data, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None):
- try:
-
- environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize)
- environment.filters.update(self._get_filters())
- environment.template_class = AnsibleJ2Template
+ if fail_on_undefined is None:
+ fail_on_undefined = self._fail_on_undefined_errors
- # FIXME: may not be required anymore, as the basedir stuff will
- # be handled by the loader?
- #if '_original_file' in vars:
- # basedir = os.path.dirname(vars['_original_file'])
- # filesdir = os.path.abspath(os.path.join(basedir, '..', 'files'))
- # if os.path.exists(filesdir):
- # basedir = filesdir
+ try:
+ # allows template header overrides to change jinja2 options.
+ if overrides is None:
+ myenv = self.environment.overlay()
+ else:
+ myenv = self.environment.overlay(overrides)
+
+ # Get jinja env overrides from template
+ if data.startswith(JINJA2_OVERRIDE):
+ eol = data.find('\n')
+ line = data[len(JINJA2_OVERRIDE):eol]
+ data = data[eol+1:]
+ for pair in line.split(','):
+ (key,val) = pair.split(':')
+ key = key.strip()
+ setattr(myenv, key, ast.literal_eval(val.strip()))
+
+ #FIXME: add tests
+ myenv.filters.update(self._get_filters())
+ myenv.tests.update(self._get_tests())
try:
- t = environment.from_string(data)
+ t = myenv.from_string(data)
except TemplateSyntaxError, e:
raise AnsibleError("template error while templating string: %s" % str(e))
except Exception, e:
@@ -288,8 +334,9 @@ class Templar:
return res
except (UndefinedError, AnsibleUndefinedVariable), e:
- if self._fail_on_undefined_errors:
+ if fail_on_undefined:
raise
else:
+ #TODO: return warning about undefined var
return data
diff --git a/v2/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py
similarity index 94%
rename from v2/ansible/template/safe_eval.py
rename to lib/ansible/template/safe_eval.py
index 26899495044..5e2d1e1fe38 100644
--- a/v2/ansible/template/safe_eval.py
+++ b/lib/ansible/template/safe_eval.py
@@ -23,7 +23,7 @@ import sys
from six.moves import builtins
from ansible import constants as C
-from ansible.plugins import filter_loader
+from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
@@ -77,7 +77,11 @@ def safe_eval(expr, locals={}, include_exceptions=False):
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
- CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
+ test_list = []
+ for test in test_loader.all():
+ test_list.extend(test.tests().keys())
+
+ CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
diff --git a/v2/ansible/template/template.py b/lib/ansible/template/template.py
similarity index 100%
rename from v2/ansible/template/template.py
rename to lib/ansible/template/template.py
diff --git a/v2/ansible/template/vars.py b/lib/ansible/template/vars.py
similarity index 85%
rename from v2/ansible/template/vars.py
rename to lib/ansible/template/vars.py
index 3c0bb61ecb0..8f9af9506b0 100644
--- a/v2/ansible/template/vars.py
+++ b/lib/ansible/template/vars.py
@@ -19,6 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from jinja2.utils import missing
__all__ = ['AnsibleJ2Vars']
@@ -33,7 +34,7 @@ class AnsibleJ2Vars:
To facilitate using builtin jinja2 things like range, globals are also handled here.
'''
- def __init__(self, templar, globals, *extras):
+ def __init__(self, templar, globals, locals=None, *extras):
'''
Initializes this object with a valid Templar() object, as
well as several dictionaries of variables representing
@@ -43,10 +44,17 @@ class AnsibleJ2Vars:
self._templar = templar
self._globals = globals
self._extras = extras
+ self._locals = dict()
+ if isinstance(locals, dict):
+ for key, val in locals.iteritems():
+ if key[:2] == 'l_' and val is not missing:
+ self._locals[key[2:]] = val
def __contains__(self, k):
if k in self._templar._available_variables:
return True
+ if k in self._locals:
+ return True
for i in self._extras:
if k in i:
return True
@@ -55,10 +63,9 @@ class AnsibleJ2Vars:
return False
def __getitem__(self, varname):
- # FIXME: are we still going to need HostVars?
- #from ansible.runner import HostVars
-
if varname not in self._templar._available_variables:
+ if varname in self._locals:
+ return self._locals[varname]
for i in self._extras:
if varname in i:
return i[varname]
@@ -84,5 +91,5 @@ class AnsibleJ2Vars:
'''
if locals is None:
return self
- return AnsibleJ2Vars(self._templar, self._globals, locals, *self._extras)
+ return AnsibleJ2Vars(self._templar, self._globals, locals=locals, *self._extras)
diff --git a/v2/test-requirements.txt b/lib/ansible/test-requirements.txt
similarity index 100%
rename from v2/test-requirements.txt
rename to lib/ansible/test-requirements.txt
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
index eb6fa2a712b..ae8ccff5952 100644
--- a/lib/ansible/utils/__init__.py
+++ b/lib/ansible/utils/__init__.py
@@ -15,1648 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import errno
-import sys
-import re
-import os
-import shlex
-import yaml
-import copy
-import optparse
-import operator
-from ansible import errors
-from ansible import __version__
-from ansible.utils.display_functions import *
-from ansible.utils.plugins import *
-from ansible.utils.su_prompts import *
-from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
-from ansible.callbacks import display
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.module_utils.basic import heuristic_log_sanitize
-from ansible.utils.unicode import to_bytes, to_unicode
-import ansible.constants as C
-import ast
-import time
-import StringIO
-import stat
-import termios
-import tty
-import pipes
-import random
-import difflib
-import warnings
-import traceback
-import getpass
-import sys
-import subprocess
-import contextlib
-
-from vault import VaultLib
-
-VERBOSITY=0
-
-MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
-
-# caching the compilation of the regex used
-# to check for lookup calls within data
-LOOKUP_REGEX = re.compile(r'lookup\s*\(')
-PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
-CODE_REGEX = re.compile(r'(?:{%|%})')
-
-
-try:
- # simplejson can be much faster if it's available
- import simplejson as json
-except ImportError:
- import json
-
-try:
- from yaml import CSafeLoader as Loader
-except ImportError:
- from yaml import SafeLoader as Loader
-
-PASSLIB_AVAILABLE = False
-try:
- import passlib.hash
- PASSLIB_AVAILABLE = True
-except:
- pass
-
-try:
- import builtin
-except ImportError:
- import __builtin__ as builtin
-
-KEYCZAR_AVAILABLE=False
-try:
- try:
- # some versions of pycrypto may not have this?
- from Crypto.pct_warnings import PowmInsecureWarning
- except ImportError:
- PowmInsecureWarning = RuntimeWarning
-
- with warnings.catch_warnings(record=True) as warning_handler:
- warnings.simplefilter("error", PowmInsecureWarning)
- try:
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- except PowmInsecureWarning:
- system_warning(
- "The version of gmp you have installed has a known issue regarding " + \
- "timing vulnerabilities when used with pycrypto. " + \
- "If possible, you should update it (i.e. yum update gmp)."
- )
- warnings.resetwarnings()
- warnings.simplefilter("ignore")
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- KEYCZAR_AVAILABLE=True
-except ImportError:
- pass
-
-
-###############################################################
-# Abstractions around keyczar
-###############################################################
-
-def key_for_hostname(hostname):
- # fireball mode is an implementation of ansible firing up zeromq via SSH
- # to use no persistent daemons or key management
-
- if not KEYCZAR_AVAILABLE:
- raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
-
- key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
- if not os.path.exists(key_path):
- os.makedirs(key_path, mode=0700)
- os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
- elif not os.path.isdir(key_path):
- raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
-
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
-
- key_path = os.path.join(key_path, hostname)
-
- # use new AES keys every 2 hours, which means fireball must not allow running for longer either
- if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
- key = AesKey.Generate()
- fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
- fh = os.fdopen(fd, 'w')
- fh.write(str(key))
- fh.close()
- return key
- else:
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
- fh = open(key_path)
- key = AesKey.Read(fh.read())
- fh.close()
- return key
-
-def encrypt(key, msg):
- return key.Encrypt(msg)
-
-def decrypt(key, msg):
- try:
- return key.Decrypt(msg)
- except key_errors.InvalidSignatureError:
- raise errors.AnsibleError("decryption failed")
-
-###############################################################
-# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
-###############################################################
-
-def read_vault_file(vault_password_file):
- """Read a vault password from a file or if executable, execute the script and
- retrieve password from STDOUT
- """
- if vault_password_file:
- this_path = os.path.realpath(os.path.expanduser(vault_password_file))
- if is_executable(this_path):
- try:
- # STDERR not captured to make it easier for users to prompt for input in their scripts
- p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
- stdout, stderr = p.communicate()
- vault_pass = stdout.strip('\r\n')
- else:
- try:
- f = open(this_path, "rb")
- vault_pass=f.read().strip()
- f.close()
- except (OSError, IOError), e:
- raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
-
- return vault_pass
- else:
- return None
-
-def err(msg):
- ''' print an error message to stderr '''
-
- print >> sys.stderr, msg
-
-def exit(msg, rc=1):
- ''' quit with an error to stdout and a failure code '''
-
- err(msg)
- sys.exit(rc)
-
-def jsonify(result, format=False):
- ''' format JSON output (uncompressed or uncompressed) '''
-
- if result is None:
- return "{}"
- result2 = result.copy()
- for key, value in result2.items():
- if type(value) is str:
- result2[key] = value.decode('utf-8', 'ignore')
-
- indent = None
- if format:
- indent = 4
-
- try:
- return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
- except UnicodeDecodeError:
- return json.dumps(result2, sort_keys=True, indent=indent)
-
-def write_tree_file(tree, hostname, buf):
- ''' write something into treedir/hostname '''
-
- # TODO: might be nice to append playbook runs per host in a similar way
- # in which case, we'd want append mode.
- path = os.path.join(tree, hostname)
- fd = open(path, "w+")
- fd.write(buf)
- fd.close()
-
-def is_failed(result):
- ''' is a given JSON result a failed result? '''
-
- return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
-
-def is_changed(result):
- ''' is a given JSON result a changed result? '''
-
- return (result.get('changed', False) in [ True, 'True', 'true'])
-
-def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
- from ansible.utils import template
-
- if conditional is None or conditional == '':
- return True
-
- if isinstance(conditional, list):
- for x in conditional:
- if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
- return False
- return True
-
- if not isinstance(conditional, basestring):
- return conditional
-
- conditional = conditional.replace("jinja2_compare ","")
- # allow variable names
- if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
- conditional = to_unicode(inject[conditional], nonstring='simplerepr')
- conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
- original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
- # a Jinja2 evaluation that results in something Python can eval!
- presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
- conditional = template.template(basedir, presented, inject)
- val = conditional.strip()
- if val == presented:
- # the templating failed, meaning most likely a
- # variable was undefined. If we happened to be
- # looking for an undefined variable, return True,
- # otherwise fail
- if "is undefined" in conditional:
- return True
- elif "is defined" in conditional:
- return False
- else:
- raise errors.AnsibleError("error while evaluating conditional: %s" % original)
- elif val == "True":
- return True
- elif val == "False":
- return False
- else:
- raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
-
-def is_executable(path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
- or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
- or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
-
-def unfrackpath(path):
- '''
- returns a path that is free of symlinks, environment
- variables, relative path traversals and symbols (~)
- example:
- '$HOME/../../var/mail' becomes '/var/spool/mail'
- '''
- return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
-
-def prepare_writeable_dir(tree,mode=0777):
- ''' make sure a directory exists and is writeable '''
-
- # modify the mode to ensure the owner at least
- # has read/write access to this directory
- mode |= 0700
-
- # make sure the tree path is always expanded
- # and normalized and free of symlinks
- tree = unfrackpath(tree)
-
- if not os.path.exists(tree):
- try:
- os.makedirs(tree, mode)
- except (IOError, OSError), e:
- raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
- if not os.access(tree, os.W_OK):
- raise errors.AnsibleError("Cannot write to path %s" % tree)
- return tree
-
-def path_dwim(basedir, given):
- '''
- make relative paths work like folks expect.
- '''
-
- if given.startswith("'"):
- given = given[1:-1]
-
- if given.startswith("/"):
- return os.path.abspath(given)
- elif given.startswith("~"):
- return os.path.abspath(os.path.expanduser(given))
- else:
- if basedir is None:
- basedir = "."
- return os.path.abspath(os.path.join(basedir, given))
-
-def path_dwim_relative(original, dirname, source, playbook_base, check=True):
- ''' find one file in a directory one level up in a dir named dirname relative to current '''
- # (used by roles code)
-
- from ansible.utils import template
-
-
- basedir = os.path.dirname(original)
- if os.path.islink(basedir):
- basedir = unfrackpath(basedir)
- template2 = os.path.join(basedir, dirname, source)
- else:
- template2 = os.path.join(basedir, '..', dirname, source)
- source2 = path_dwim(basedir, template2)
- if os.path.exists(source2):
- return source2
- obvious_local_path = path_dwim(playbook_base, source)
- if os.path.exists(obvious_local_path):
- return obvious_local_path
- if check:
- raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
- return source2 # which does not exist
-
-def repo_url_to_role_name(repo_url):
- # gets the role name out of a repo like
- # http://git.example.com/repos/repo.git" => "repo"
-
- if '://' not in repo_url and '@' not in repo_url:
- return repo_url
- trailing_path = repo_url.split('/')[-1]
- if trailing_path.endswith('.git'):
- trailing_path = trailing_path[:-4]
- if trailing_path.endswith('.tar.gz'):
- trailing_path = trailing_path[:-7]
- if ',' in trailing_path:
- trailing_path = trailing_path.split(',')[0]
- return trailing_path
-
-
-def role_spec_parse(role_spec):
- # takes a repo and a version like
- # git+http://git.example.com/repos/repo.git,v1.0
- # and returns a list of properties such as:
- # {
- # 'scm': 'git',
- # 'src': 'http://git.example.com/repos/repo.git',
- # 'version': 'v1.0',
- # 'name': 'repo'
- # }
-
- role_spec = role_spec.strip()
- role_version = ''
- default_role_versions = dict(git='master', hg='tip')
- if role_spec == "" or role_spec.startswith("#"):
- return (None, None, None, None)
-
- tokens = [s.strip() for s in role_spec.split(',')]
-
- # assume https://github.com URLs are git+https:// URLs and not
- # tarballs unless they end in '.zip'
- if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
- tokens[0] = 'git+' + tokens[0]
-
- if '+' in tokens[0]:
- (scm, role_url) = tokens[0].split('+')
- else:
- scm = None
- role_url = tokens[0]
- if len(tokens) >= 2:
- role_version = tokens[1]
- if len(tokens) == 3:
- role_name = tokens[2]
- else:
- role_name = repo_url_to_role_name(tokens[0])
- if scm and not role_version:
- role_version = default_role_versions.get(scm, '')
- return dict(scm=scm, src=role_url, version=role_version, name=role_name)
-
-
-def role_yaml_parse(role):
- if 'role' in role:
- # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
- role_info = role_spec_parse(role['role'])
- if isinstance(role_info, dict):
- # Warning: Slight change in behaviour here. name may be being
- # overloaded. Previously, name was only a parameter to the role.
- # Now it is both a parameter to the role and the name that
- # ansible-galaxy will install under on the local system.
- if 'name' in role and 'name' in role_info:
- del role_info['name']
- role.update(role_info)
- else:
- # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
- if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
- role["src"] = "git+" + role["src"]
-
- if '+' in role["src"]:
- (scm, src) = role["src"].split('+')
- role["scm"] = scm
- role["src"] = src
-
- if 'name' not in role:
- role["name"] = repo_url_to_role_name(role["src"])
-
- if 'version' not in role:
- role['version'] = ''
-
- if 'scm' not in role:
- role['scm'] = None
-
- return role
-
-
-def json_loads(data):
- ''' parse a JSON string and return a data structure '''
- try:
- loaded = json.loads(data)
- except ValueError,e:
- raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
-
- return loaded
-
-def _clean_data(orig_data, from_remote=False, from_inventory=False):
- ''' remove jinja2 template tags from a string '''
-
- if not isinstance(orig_data, basestring):
- return orig_data
-
- # when the data is marked as having come from a remote, we always
- # replace any print blocks (ie. {{var}}), however when marked as coming
- # from inventory we only replace print blocks that contain a call to
- # a lookup plugin (ie. {{lookup('foo','bar'))}})
- replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
-
- regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
-
- with contextlib.closing(StringIO.StringIO(orig_data)) as data:
- # these variables keep track of opening block locations, as we only
- # want to replace matched pairs of print/block tags
- print_openings = []
- block_openings = []
- for mo in regex.finditer(orig_data):
- token = mo.group(0)
- token_start = mo.start(0)
-
- if token[0] == '{':
- if token == '{%':
- block_openings.append(token_start)
- elif token == '{{':
- print_openings.append(token_start)
-
- elif token[1] == '}':
- prev_idx = None
- if token == '%}' and block_openings:
- prev_idx = block_openings.pop()
- elif token == '}}' and print_openings:
- prev_idx = print_openings.pop()
-
- if prev_idx is not None:
- # replace the opening
- data.seek(prev_idx, os.SEEK_SET)
- data.write('{#')
- # replace the closing
- data.seek(token_start, os.SEEK_SET)
- data.write('#}')
-
- else:
- assert False, 'Unhandled regex match'
-
- return data.getvalue()
-
-def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
- '''
- walk a complex data structure, and use _clean_data() to
- remove any template tags that may exist
- '''
- if not from_remote and not from_inventory:
- raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
- if isinstance(orig_data, dict):
- data = orig_data.copy()
- for key in data:
- new_key = _clean_data_struct(key, from_remote, from_inventory)
- new_val = _clean_data_struct(data[key], from_remote, from_inventory)
- if key != new_key:
- del data[key]
- data[new_key] = new_val
- elif isinstance(orig_data, list):
- data = orig_data[:]
- for i in range(0, len(data)):
- data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
- elif isinstance(orig_data, basestring):
- data = _clean_data(orig_data, from_remote, from_inventory)
- else:
- data = orig_data
- return data
-
-def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
- ''' this version for module return data only '''
-
- orig_data = raw_data
-
- # ignore stuff like tcgetattr spewage or other warnings
- data = filter_leading_non_json_lines(raw_data)
-
- try:
- results = json.loads(data)
- except:
- if no_exceptions:
- return dict(failed=True, parsed=False, msg=raw_data)
- else:
- raise
-
- if from_remote:
- results = _clean_data_struct(results, from_remote, from_inventory)
-
- return results
-
-def serialize_args(args):
- '''
- Flattens a dictionary args to a k=v string
- '''
- module_args = ""
- for (k,v) in args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- elif isinstance(v, bool):
- module_args = "%s=%s %s" % (k, str(v), module_args)
- return module_args.strip()
-
-def merge_module_args(current_args, new_args):
- '''
- merges either a dictionary or string of k=v pairs with another string of k=v pairs,
- and returns a new k=v string without duplicates.
- '''
- if not isinstance(current_args, basestring):
- raise errors.AnsibleError("expected current_args to be a basestring")
- # we use parse_kv to split up the current args into a dictionary
- final_args = parse_kv(current_args)
- if isinstance(new_args, dict):
- final_args.update(new_args)
- elif isinstance(new_args, basestring):
- new_args_kv = parse_kv(new_args)
- final_args.update(new_args_kv)
- return serialize_args(final_args)
-
-def parse_yaml(data, path_hint=None):
- ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
-
- stripped_data = data.lstrip()
- loaded = None
- if stripped_data.startswith("{") or stripped_data.startswith("["):
- # since the line starts with { or [ we can infer this is a JSON document.
- try:
- loaded = json.loads(data)
- except ValueError, ve:
- if path_hint:
- raise errors.AnsibleError(path_hint + ": " + str(ve))
- else:
- raise errors.AnsibleError(str(ve))
- else:
- # else this is pretty sure to be a YAML document
- loaded = yaml.load(data, Loader=Loader)
-
- return loaded
-
-def process_common_errors(msg, probline, column):
- replaced = probline.replace(" ","")
-
- if ":{{" in replaced and "}}" in replaced:
- msg = msg + """
-This one looks easy to fix. YAML thought it was looking for the start of a
-hash/dictionary and was confused to see a second "{". Most likely this was
-meant to be an ansible template evaluation instead, so we have to give the
-parser a small hint that we wanted a string instead. The solution here is to
-just quote the entire value.
-
-For instance, if the original line was:
-
- app_path: {{ base_path }}/foo
-
-It should be written as:
-
- app_path: "{{ base_path }}/foo"
-"""
- return msg
-
- elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
- msg = msg + """
-This one looks easy to fix. There seems to be an extra unquoted colon in the line
-and this is confusing the parser. It was only expecting to find one free
-colon. The solution is just add some quotes around the colon, or quote the
-entire line after the first colon.
-
-For instance, if the original line was:
-
- copy: src=file.txt dest=/path/filename:with_colon.txt
-
-It can be written as:
-
- copy: src=file.txt dest='/path/filename:with_colon.txt'
-
-Or:
-
- copy: 'src=file.txt dest=/path/filename:with_colon.txt'
-
-
-"""
- return msg
- else:
- parts = probline.split(":")
- if len(parts) > 1:
- middle = parts[1].strip()
- match = False
- unbalanced = False
- if middle.startswith("'") and not middle.endswith("'"):
- match = True
- elif middle.startswith('"') and not middle.endswith('"'):
- match = True
- if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
- unbalanced = True
- if match:
- msg = msg + """
-This one looks easy to fix. It seems that there is a value started
-with a quote, and the YAML parser is expecting to see the line ended
-with the same kind of quote. For instance:
-
- when: "ok" in result.stdout
-
-Could be written as:
-
- when: '"ok" in result.stdout'
-
-or equivalently:
-
- when: "'ok' in result.stdout"
-
-"""
- return msg
-
- if unbalanced:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-unbalanced quotes. If starting a value with a quote, make sure the
-line ends with the same set of quotes. For instance this arbitrary
-example:
-
- foo: "bad" "wolf"
-
-Could be written as:
-
- foo: '"bad" "wolf"'
-
-"""
- return msg
-
- return msg
-
-def process_yaml_error(exc, data, path=None, show_content=True):
- if hasattr(exc, 'problem_mark'):
- mark = exc.problem_mark
- if show_content:
- if mark.line -1 >= 0:
- before_probline = data.split("\n")[mark.line-1]
- else:
- before_probline = ''
- probline = data.split("\n")[mark.line]
- arrow = " " * mark.column + "^"
- msg = """Syntax Error while loading YAML script, %s
-Note: The error may actually appear before this position: line %s, column %s
-
-%s
-%s
-%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
-
- unquoted_var = None
- if '{{' in probline and '}}' in probline:
- if '"{{' not in probline or "'{{" not in probline:
- unquoted_var = True
-
- if not unquoted_var:
- msg = process_common_errors(msg, probline, mark.column)
- else:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-missing quotes. Always quote template expression brackets when they
-start a value. For instance:
-
- with_items:
- - {{ foo }}
-
-Should be written as:
-
- with_items:
- - "{{ foo }}"
-
-"""
- else:
- # most likely displaying a file with sensitive content,
- # so don't show any of the actual lines of yaml just the
- # line number itself
- msg = """Syntax error while loading YAML script, %s
-The error appears to have been on line %s, column %s, but may actually
-be before there depending on the exact syntax problem.
-""" % (path, mark.line + 1, mark.column + 1)
-
- else:
- # No problem markers means we have to throw a generic
- # "stuff messed up" type message. Sry bud.
- if path:
- msg = "Could not parse YAML. Check over %s again." % path
- else:
- msg = "Could not parse YAML."
- raise errors.AnsibleYAMLValidationFailed(msg)
-
-
-def parse_yaml_from_file(path, vault_password=None):
- ''' convert a yaml file to a data structure '''
-
- data = None
- show_content = True
-
- try:
- data = open(path).read()
- except IOError:
- raise errors.AnsibleError("file could not read: %s" % path)
-
- vault = VaultLib(password=vault_password)
- if vault.is_encrypted(data):
- # if the file is encrypted and no password was specified,
- # the decrypt call would throw an error, but we check first
- # since the decrypt function doesn't know the file name
- if vault_password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
- data = vault.decrypt(data)
- show_content = False
-
- try:
- return parse_yaml(data, path_hint=path)
- except yaml.YAMLError, exc:
- process_yaml_error(exc, data, path, show_content)
-
-def parse_kv(args):
- ''' convert a string of key/value items to a dict '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except ValueError, ve:
- if 'no closing quotation' in str(ve).lower():
- raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
- else:
- raise
- for x in vargs:
- if "=" in x:
- k, v = x.split("=",1)
- options[k.strip()] = unquote(v.strip())
- return options
-
-def _validate_both_dicts(a, b):
-
- if not (isinstance(a, dict) and isinstance(b, dict)):
- raise errors.AnsibleError(
- "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
- )
-
-def merge_hash(a, b):
- ''' recursively merges hash b into a
- keys from b take precedence over keys from a '''
-
- result = {}
-
- # we check here as well as in combine_vars() since this
- # function can work recursively with nested dicts
- _validate_both_dicts(a, b)
-
- for dicts in a, b:
- # next, iterate over b keys and values
- for k, v in dicts.iteritems():
- # if there's already such key in a
- # and that key contains dict
- if k in result and isinstance(result[k], dict):
- # merge those dicts recursively
- result[k] = merge_hash(a[k], v)
- else:
- # otherwise, just copy a value from b to a
- result[k] = v
-
- return result
-
-def default(value, function):
- ''' syntactic sugar around lazy evaluation of defaults '''
- if value is None:
- return function()
- return value
-
-
-def _git_repo_info(repo_path):
- ''' returns a string containing git branch, commit id and commit date '''
- result = None
- if os.path.exists(repo_path):
- # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
- if os.path.isfile(repo_path):
- try:
- gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
- # There is a possibility the .git file to have an absolute path.
- if os.path.isabs(gitdir):
- repo_path = gitdir
- else:
- repo_path = os.path.join(repo_path[:-4], gitdir)
- except (IOError, AttributeError):
- return ''
- f = open(os.path.join(repo_path, "HEAD"))
- branch = f.readline().split('/')[-1].rstrip("\n")
- f.close()
- branch_path = os.path.join(repo_path, "refs", "heads", branch)
- if os.path.exists(branch_path):
- f = open(branch_path)
- commit = f.readline()[:10]
- f.close()
- else:
- # detached HEAD
- commit = branch[:10]
- branch = 'detached HEAD'
- branch_path = os.path.join(repo_path, "HEAD")
-
- date = time.localtime(os.stat(branch_path).st_mtime)
- if time.daylight == 0:
- offset = time.timezone
- else:
- offset = time.altzone
- result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
- time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
- else:
- result = ''
- return result
-
-
-def _gitinfo():
- basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
- repo_path = os.path.join(basedir, '.git')
- result = _git_repo_info(repo_path)
- submodules = os.path.join(basedir, '.gitmodules')
- if not os.path.exists(submodules):
- return result
- f = open(submodules)
- for line in f:
- tokens = line.strip().split(' ')
- if tokens[0] == 'path':
- submodule_path = tokens[2]
- submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
- if not submodule_info:
- submodule_info = ' not found - use git submodule update --init ' + submodule_path
- result += "\n {0}: {1}".format(submodule_path, submodule_info)
- f.close()
- return result
-
-
-def version(prog):
- result = "{0} {1}".format(prog, __version__)
- gitinfo = _gitinfo()
- if gitinfo:
- result = result + " {0}".format(gitinfo)
- result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
- return result
-
-def version_info(gitinfo=False):
- if gitinfo:
- # expensive call, user with care
- ansible_version_string = version('')
- else:
- ansible_version_string = __version__
- ansible_version = ansible_version_string.split()[0]
- ansible_versions = ansible_version.split('.')
- for counter in range(len(ansible_versions)):
- if ansible_versions[counter] == "":
- ansible_versions[counter] = 0
- try:
- ansible_versions[counter] = int(ansible_versions[counter])
- except:
- pass
- if len(ansible_versions) < 3:
- for counter in range(len(ansible_versions), 3):
- ansible_versions.append(0)
- return {'string': ansible_version_string.strip(),
- 'full': ansible_version,
- 'major': ansible_versions[0],
- 'minor': ansible_versions[1],
- 'revision': ansible_versions[2]}
-
-def getch():
- ''' read in a single character '''
- fd = sys.stdin.fileno()
- old_settings = termios.tcgetattr(fd)
- try:
- tty.setraw(sys.stdin.fileno())
- ch = sys.stdin.read(1)
- finally:
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
- return ch
-
-def sanitize_output(arg_string):
- ''' strips private info out of a string '''
-
- private_keys = ('password', 'login_password')
-
- output = []
- for part in arg_string.split():
- try:
- (k, v) = part.split('=', 1)
- except ValueError:
- v = heuristic_log_sanitize(part)
- output.append(v)
- continue
-
- if k in private_keys:
- v = 'VALUE_HIDDEN'
- else:
- v = heuristic_log_sanitize(v)
- output.append('%s=%s' % (k, v))
-
- output = ' '.join(output)
- return output
-
-
-####################################################################
-# option handling code for /usr/bin/ansible and ansible-playbook
-# below this line
-
-class SortedOptParser(optparse.OptionParser):
- '''Optparser which sorts the options by opt before outputting --help'''
-
- def format_help(self, formatter=None):
- self.option_list.sort(key=operator.methodcaller('get_opt_string'))
- return optparse.OptionParser.format_help(self, formatter=None)
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
- ''' create an options parser for any ansible script '''
-
- parser = SortedOptParser(usage, version=version("%prog"))
- parser.add_option('-v','--verbose', default=False, action="callback",
- callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
-
- parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
- help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
- default=constants.DEFAULT_HOST_LIST)
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
- help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
- parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
- help='ask for SSH password')
- parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
- help='use this file to authenticate the connection')
- parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
- help='ask for vault password')
- parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
- dest='vault_password_file', help="vault password file")
- parser.add_option('--list-hosts', dest='listhosts', action='store_true',
- help='outputs a list of matching hosts; does not execute anything else')
- parser.add_option('-M', '--module-path', dest='module_path',
- help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
- default=None)
-
- if subset_opts:
- parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
- help='further limit selected hosts to an additional pattern')
-
- parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
- dest='timeout',
- help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
-
- if output_opts:
- parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
- help='condense output')
- parser.add_option('-t', '--tree', dest='tree', default=None,
- help='log output to this directory')
-
- if runas_opts:
- # priv user defaults to root later on to enable detecting when this option was given here
- parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password (deprecated, use become)')
- parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
- help='ask for su password (deprecated, use become)')
- parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
- help="run operations with sudo (nopasswd) (deprecated, use become)")
- parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
- help='desired sudo user (default=root) (deprecated, use become)')
- parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
- help='run operations with su (deprecated, use become)')
- parser.add_option('-R', '--su-user', default=None,
- help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
-
- # consolidated privilege escalation (become)
- parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
- help="run operations with become (nopasswd implied)")
- parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
- help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
- parser.add_option('--become-user', default=None, dest='become_user', type='string',
- help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
- parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
- help='ask for privilege escalation password')
-
-
- if connect_opts:
- parser.add_option('-c', '--connection', dest='connection',
- default=constants.DEFAULT_TRANSPORT,
- help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
-
- if async_opts:
- parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
- dest='poll_interval',
- help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
- parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
- help='run asynchronously, failing after X seconds (default=N/A)')
-
- if check_opts:
- parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
- help="don't make any changes; instead, try to predict some of the changes that may occur"
- )
-
- if diff_opts:
- parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
- help="when changing (small) files and templates, show the differences in those files; works great with --check"
- )
-
- return parser
-
-def parse_extra_vars(extra_vars_opts, vault_pass):
- extra_vars = {}
- for extra_vars_opt in extra_vars_opts:
- extra_vars_opt = to_unicode(extra_vars_opt)
- if extra_vars_opt.startswith(u"@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
- elif extra_vars_opt and extra_vars_opt[0] in u'[{':
- # Arguments as YAML
- extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
- else:
- # Arguments as Key-value
- extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
- return extra_vars
-
-def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
-
- vault_pass = None
- new_vault_pass = None
-
- if ask_vault_pass:
- vault_pass = getpass.getpass(prompt="Vault password: ")
-
- if ask_vault_pass and confirm_vault:
- vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
- if vault_pass != vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- if ask_new_vault_pass:
- new_vault_pass = getpass.getpass(prompt="New Vault password: ")
-
- if ask_new_vault_pass and confirm_new:
- new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
- if new_vault_pass != new_vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- # enforce no newline chars at the end of passwords
- if vault_pass:
- vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
- if new_vault_pass:
- new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
-
- return vault_pass, new_vault_pass
-
-def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
- sshpass = None
- becomepass = None
- vaultpass = None
- become_prompt = ''
-
- if ask_pass:
- sshpass = getpass.getpass(prompt="SSH password: ")
- become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
- if sshpass:
- sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
- else:
- become_prompt = "%s password: " % become_method.upper()
-
- if become_ask_pass:
- becomepass = getpass.getpass(prompt=become_prompt)
- if ask_pass and becomepass == '':
- becomepass = sshpass
- if becomepass:
- becomepass = to_bytes(becomepass)
-
- if ask_vault_pass:
- vaultpass = getpass.getpass(prompt="Vault password: ")
- if vaultpass:
- vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
-
- return (sshpass, becomepass, vaultpass)
-
-
-def choose_pass_prompt(options):
-
- if options.ask_su_pass:
- return 'su'
- elif options.ask_sudo_pass:
- return 'sudo'
-
- return options.become_method
-
-def normalize_become_options(options):
-
- options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
- options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
-
- if options.become:
- pass
- elif options.sudo:
- options.become = True
- options.become_method = 'sudo'
- elif options.su:
- options.become = True
- options.become_method = 'su'
-
-
-def do_encrypt(result, encrypt, salt_size=None, salt=None):
- if PASSLIB_AVAILABLE:
- try:
- crypt = getattr(passlib.hash, encrypt)
- except:
- raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
-
- if salt_size:
- result = crypt.encrypt(result, salt_size=salt_size)
- elif salt:
- result = crypt.encrypt(result, salt=salt)
- else:
- result = crypt.encrypt(result)
- else:
- raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
-
- return result
-
-def last_non_blank_line(buf):
-
- all_lines = buf.splitlines()
- all_lines.reverse()
- for line in all_lines:
- if (len(line) > 0):
- return line
- # shouldn't occur unless there's no output
- return ""
-
-def filter_leading_non_json_lines(buf):
- '''
- used to avoid random output from SSH at the top of JSON output, like messages from
- tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
-
- need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
- filter only leading lines since multiline JSON is valid.
- '''
-
- filtered_lines = StringIO.StringIO()
- stop_filtering = False
- for line in buf.splitlines():
- if stop_filtering or line.startswith('{') or line.startswith('['):
- stop_filtering = True
- filtered_lines.write(line + '\n')
- return filtered_lines.getvalue()
-
-def boolean(value):
- val = str(value)
- if val.lower() in [ "true", "t", "y", "1", "yes" ]:
- return True
- else:
- return False
-
-def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
- """
- helper function for connection plugins to create privilege escalation commands
- """
-
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- success_key = 'BECOME-SUCCESS-%s' % randbits
- prompt = None
- becomecmd = None
-
- shell = shell or '$SHELL'
-
- if method == 'sudo':
- # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
- # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
- # string to the user's shell. We loop reading output until we see the randomly-generated
- # sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = exe or C.DEFAULT_SUDO_EXE
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'su':
- exe = exe or C.DEFAULT_SU_EXE
- flags = flags or C.DEFAULT_SU_FLAGS
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'pbrun':
- prompt = 'assword:'
- exe = exe or 'pbrun'
- flags = flags or ''
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- elif method == 'pfexec':
- exe = exe or 'pfexec'
- flags = flags or ''
- # No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- if becomecmd is None:
- raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
-
- return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
-
-
-def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
- """
- helper function for connection plugins to create sudo commands
- """
- return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
-
-
-def make_su_cmd(su_user, executable, cmd):
- """
- Helper function for connection plugins to create direct su commands
- """
- return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
-
-def get_diff(diff):
- # called by --diff usage in playbook and runner via callbacks
- # include names in diffs 'before' and 'after' and do diff -U 10
-
- try:
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- ret = []
- if 'dst_binary' in diff:
- ret.append("diff skipped: destination file appears to be binary\n")
- if 'src_binary' in diff:
- ret.append("diff skipped: source file appears to be binary\n")
- if 'dst_larger' in diff:
- ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
- if 'src_larger' in diff:
- ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
- if 'before' in diff and 'after' in diff:
- if 'before_header' in diff:
- before_header = "before: %s" % diff['before_header']
- else:
- before_header = 'before'
- if 'after_header' in diff:
- after_header = "after: %s" % diff['after_header']
- else:
- after_header = 'after'
- differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
- for line in list(differ):
- ret.append(line)
- return u"".join(ret)
- except UnicodeDecodeError:
- return ">> the files are different, but the diff library cannot compare unicode strings"
-
-def is_list_of_strings(items):
- for x in items:
- if not isinstance(x, basestring):
- return False
- return True
-
-def list_union(a, b):
- result = []
- for x in a:
- if x not in result:
- result.append(x)
- for x in b:
- if x not in result:
- result.append(x)
- return result
-
-def list_intersection(a, b):
- result = []
- for x in a:
- if x in b and x not in result:
- result.append(x)
- return result
-
-def list_difference(a, b):
- result = []
- for x in a:
- if x not in b and x not in result:
- result.append(x)
- for x in b:
- if x not in a and x not in result:
- result.append(x)
- return result
-
-def contains_vars(data):
- '''
- returns True if the data contains a variable pattern
- '''
- return "$" in data or "{{" in data
-
-def safe_eval(expr, locals={}, include_exceptions=False):
- '''
- This is intended for allowing things like:
- with_items: a_list_variable
-
- Where Jinja2 would return a string but we do not want to allow it to
- call functions (outside of Jinja2, where the env is constrained). If
- the input data to this function came from an untrusted (remote) source,
- it should first be run through _clean_data_struct() to ensure the data
- is further sanitized prior to evaluation.
-
- Based on:
- http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
- '''
-
- # this is the whitelist of AST nodes we are going to
- # allow in the evaluation. Any node type other than
- # those listed here will raise an exception in our custom
- # visitor class defined below.
- SAFE_NODES = set(
- (
- ast.Add,
- ast.BinOp,
- ast.Call,
- ast.Compare,
- ast.Dict,
- ast.Div,
- ast.Expression,
- ast.List,
- ast.Load,
- ast.Mult,
- ast.Num,
- ast.Name,
- ast.Str,
- ast.Sub,
- ast.Tuple,
- ast.UnaryOp,
- )
- )
-
- # AST node types were expanded after 2.6
- if not sys.version.startswith('2.6'):
- SAFE_NODES.union(
- set(
- (ast.Set,)
- )
- )
-
- filter_list = []
- for filter in filter_loader.all():
- filter_list.extend(filter.filters().keys())
-
- CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
-
- class CleansingNodeVisitor(ast.NodeVisitor):
- def generic_visit(self, node, inside_call=False):
- if type(node) not in SAFE_NODES:
- raise Exception("invalid expression (%s)" % expr)
- elif isinstance(node, ast.Call):
- inside_call = True
- elif isinstance(node, ast.Name) and inside_call:
- if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
- raise Exception("invalid function: %s" % node.id)
- # iterate over all child nodes
- for child_node in ast.iter_child_nodes(node):
- self.generic_visit(child_node, inside_call)
-
- if not isinstance(expr, basestring):
- # already templated to a datastructure, perhaps?
- if include_exceptions:
- return (expr, None)
- return expr
-
- cnv = CleansingNodeVisitor()
- try:
- parsed_tree = ast.parse(expr, mode='eval')
- cnv.visit(parsed_tree)
- compiled = compile(parsed_tree, expr, 'eval')
- result = eval(compiled, {}, locals)
-
- if include_exceptions:
- return (result, None)
- else:
- return result
- except SyntaxError, e:
- # special handling for syntax errors, we just return
- # the expression string back as-is
- if include_exceptions:
- return (expr, None)
- return expr
- except Exception, e:
- if include_exceptions:
- return (expr, e)
- return expr
-
-
-def listify_lookup_plugin_terms(terms, basedir, inject):
-
- from ansible.utils import template
-
- if isinstance(terms, basestring):
- # someone did:
- # with_items: alist
- # OR
- # with_items: {{ alist }}
-
- stripped = terms.strip()
- if not (stripped.startswith('{') or stripped.startswith('[')) and \
- not stripped.startswith("/") and \
- not stripped.startswith('set([') and \
- not LOOKUP_REGEX.search(terms):
- # if not already a list, get ready to evaluate with Jinja2
- # not sure why the "/" is in above code :)
- try:
- new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
- if isinstance(new_terms, basestring) and "{{" in new_terms:
- pass
- else:
- terms = new_terms
- except:
- pass
-
- if '{' in terms or '[' in terms:
- # Jinja2 already evaluated a variable to a list.
- # Jinja2-ified list needs to be converted back to a real type
- # TODO: something a bit less heavy than eval
- return safe_eval(terms)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- return terms
-
-def combine_vars(a, b):
-
- _validate_both_dicts(a, b)
-
- if C.DEFAULT_HASH_BEHAVIOUR == "merge":
- return merge_hash(a, b)
- else:
- return dict(a.items() + b.items())
-
-def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
- '''Return a random password string of length containing only chars.'''
-
- password = []
- while len(password) < length:
- new_char = os.urandom(1)
- if new_char in chars:
- password.append(new_char)
-
- return ''.join(password)
-
-def before_comment(msg):
- ''' what's the part of a string before a comment? '''
- msg = msg.replace("\#","**NOT_A_COMMENT**")
- msg = msg.split("#")[0]
- msg = msg.replace("**NOT_A_COMMENT**","#")
- return msg
-
-def load_vars(basepath, results, vault_password=None):
- """
- Load variables from any potential yaml filename combinations of basepath,
- returning result.
- """
-
- paths_to_check = [ "".join([basepath, ext])
- for ext in C.YAML_FILENAME_EXTENSIONS ]
-
- found_paths = []
-
- for path in paths_to_check:
- found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- if found:
- found_paths.append(path)
-
-
- # disallow the potentially confusing situation that there are multiple
- # variable files for the same name. For example if both group_vars/all.yml
- # and group_vars/all.yaml
- if len(found_paths) > 1:
- raise errors.AnsibleError("Multiple variable files found. "
- "There should only be one. %s" % ( found_paths, ))
-
- return results
-
-## load variables from yaml files/dirs
-# e.g. host/group_vars
-#
-def _load_vars_from_path(path, results, vault_password=None):
- """
- Robustly access the file at path and load variables, carefully reporting
- errors in a friendly/informative way.
-
- Return the tuple (found, new_results, )
- """
-
- try:
- # in the case of a symbolic link, we want the stat of the link itself,
- # not its target
- pathstat = os.lstat(path)
- except os.error, err:
- # most common case is that nothing exists at that path.
- if err.errno == errno.ENOENT:
- return False, results
- # otherwise this is a condition we should report to the user
- raise errors.AnsibleError(
- "%s is not accessible: %s."
- " Please check its permissions." % ( path, err.strerror))
-
- # symbolic link
- if stat.S_ISLNK(pathstat.st_mode):
- try:
- target = os.path.realpath(path)
- except os.error, err2:
- raise errors.AnsibleError("The symbolic link at %s "
- "is not readable: %s. Please check its permissions."
- % (path, err2.strerror, ))
- # follow symbolic link chains by recursing, so we repeat the same
- # permissions checks above and provide useful errors.
- return _load_vars_from_path(target, results, vault_password)
-
- # directory
- if stat.S_ISDIR(pathstat.st_mode):
-
- # support organizing variables across multiple files in a directory
- return True, _load_vars_from_folder(path, results, vault_password=vault_password)
-
- # regular file
- elif stat.S_ISREG(pathstat.st_mode):
- data = parse_yaml_from_file(path, vault_password=vault_password)
- if data and type(data) != dict:
- raise errors.AnsibleError(
- "%s must be stored as a dictionary/hash" % path)
- elif data is None:
- data = {}
-
- # combine vars overrides by default but can be configured to do a
- # hash merge in settings
- results = combine_vars(results, data)
- return True, results
-
- # something else? could be a fifo, socket, device, etc.
- else:
- raise errors.AnsibleError("Expected a variable file or directory "
- "but found a non-file object at path %s" % (path, ))
-
-def _load_vars_from_folder(folder_path, results, vault_password=None):
- """
- Load all variables within a folder recursively.
- """
-
- # this function and _load_vars_from_path are mutually recursive
-
- try:
- names = os.listdir(folder_path)
- except os.error, err:
- raise errors.AnsibleError(
- "This folder cannot be listed: %s: %s."
- % ( folder_path, err.strerror))
-
- # evaluate files in a stable order rather than whatever order the
- # filesystem lists them.
- names.sort()
-
- # do not parse hidden files or dirs, e.g. .svn/
- paths = [os.path.join(folder_path, name) for name in names
- if not name.startswith('.')
- and os.path.splitext(name)[1] in C.YAML_FILENAME_EXTENSIONS]
- for path in paths:
- _found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- return results
-
-def update_hash(hash, key, new_value):
- ''' used to avoid nested .update calls on the parent '''
-
- value = hash.get(key, {})
- value.update(new_value)
- hash[key] = value
-
-def censor_unlogged_data(data):
- '''
- used when the no_log: True attribute is passed to a task to keep data from a callback.
- NOT intended to prevent variable registration, but only things from showing up on
- screen
- '''
- new_data = {}
- for (x,y) in data.iteritems():
- if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
- new_data[x] = y
- new_data['censored'] = 'results hidden due to no_log parameter'
- return new_data
-
-def check_mutually_exclusive_privilege(options, parser):
-
- # privilege escalation command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
- (options.su or options.su_user or options.ask_su_pass) and \
- (options.become or options.become_user or options.become_ask_pass) or \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
- (options.become or options.become_user or options.become_ask_pass):
-
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
- "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
- " are exclusive of each other")
-
-
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/v2/ansible/utils/boolean.py b/lib/ansible/utils/boolean.py
similarity index 100%
rename from v2/ansible/utils/boolean.py
rename to lib/ansible/utils/boolean.py
diff --git a/lib/ansible/utils/cmd_functions.py b/lib/ansible/utils/cmd_functions.py
index 6525260f107..7cb1912d07c 100644
--- a/lib/ansible/utils/cmd_functions.py
+++ b/lib/ansible/utils/cmd_functions.py
@@ -27,7 +27,7 @@ def run_cmd(cmd, live=False, readsize=10):
cmdargs = shlex.split(cmd)
p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
+
stdout = ''
stderr = ''
rpipes = [p.stdout, p.stderr]
diff --git a/v2/ansible/utils/color.py b/lib/ansible/utils/color.py
similarity index 100%
rename from v2/ansible/utils/color.py
rename to lib/ansible/utils/color.py
diff --git a/v2/ansible/utils/debug.py b/lib/ansible/utils/debug.py
similarity index 100%
rename from v2/ansible/utils/debug.py
rename to lib/ansible/utils/debug.py
diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py
new file mode 100644
index 00000000000..fa16b7af05c
--- /dev/null
+++ b/lib/ansible/utils/display.py
@@ -0,0 +1,241 @@
+# (c) 2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# FIXME: copied mostly from old code, needs py3 improvements
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import textwrap
+import os
+import random
+import subprocess
+import sys
+import time
+import logging
+import getpass
+from multiprocessing import Lock
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.utils.color import stringc
+from ansible.utils.unicode import to_bytes
+
+
+
+# These are module level as we currently fork and serialize the whole process and locks in the objects don't play well with that
+debug_lock = Lock()
+
+#TODO: make this a logging callback instead
+if C.DEFAULT_LOG_PATH:
+ path = C.DEFAULT_LOG_PATH
+ if (os.path.exists(path) and not os.access(path, os.W_OK)) and not os.access(os.path.dirname(path), os.W_OK):
+ self._display.warning("log file at %s is not writeable, aborting\n" % path)
+
+ logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s')
+ mypid = str(os.getpid())
+ user = getpass.getuser()
+ logger = logging.getLogger("p=%s u=%s | " % (mypid, user))
+else:
+ logger = None
+
+
+class Display:
+
+ def __init__(self, verbosity=0):
+
+ self.verbosity = verbosity
+
+ # list of all deprecation messages to prevent duplicate display
+ self._deprecations = {}
+ self._warns = {}
+ self._errors = {}
+
+ self.cowsay = None
+ self.noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
+ self.set_cowsay_info()
+
+
+ def set_cowsay_info(self):
+
+ if not C.ANSIBLE_NOCOWS:
+ if os.path.exists("/usr/bin/cowsay"):
+ self.cowsay = "/usr/bin/cowsay"
+ elif os.path.exists("/usr/games/cowsay"):
+ self.cowsay = "/usr/games/cowsay"
+ elif os.path.exists("/usr/local/bin/cowsay"):
+ # BSD path for cowsay
+ self.cowsay = "/usr/local/bin/cowsay"
+ elif os.path.exists("/opt/local/bin/cowsay"):
+ # MacPorts path for cowsay
+ self.cowsay = "/opt/local/bin/cowsay"
+
+ if self.cowsay and self.noncow == 'random':
+ cmd = subprocess.Popen([self.cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ cows = out.split()
+ cows.append(False)
+ self.noncow = random.choice(cows)
+
+ def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
+
+ # FIXME: this needs to be implemented
+ #msg = utils.sanitize_output(msg)
+ msg2 = self._safe_output(msg, stderr=stderr)
+ if color:
+ msg2 = stringc(msg, color)
+
+ if not log_only:
+ b_msg2 = to_bytes(msg2)
+ if not stderr:
+ print(b_msg2)
+ sys.stdout.flush()
+ else:
+ print(b_msg2, file=sys.stderr)
+ sys.stderr.flush()
+
+ if logger and not screen_only:
+ while msg.startswith("\n"):
+ msg = msg.replace("\n","")
+ b_msg = to_bytes(msg)
+ if color == 'red':
+ logger.error(b_msg)
+ else:
+ logger.info(b_msg)
+
+ def vv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=1)
+
+ def vvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=2)
+
+ def vvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=3)
+
+ def vvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=4)
+
+ def vvvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=5)
+
+ def debug(self, msg):
+ if C.DEFAULT_DEBUG:
+ debug_lock.acquire()
+ self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray')
+ debug_lock.release()
+
+ def verbose(self, msg, host=None, caplevel=2):
+ # FIXME: this needs to be implemented
+ #msg = utils.sanitize_output(msg)
+ if self.verbosity > caplevel:
+ if host is None:
+ self.display(msg, color='blue')
+ else:
+ self.display("<%s> %s" % (host, msg), color='blue', screen_only=True)
+
+ def deprecated(self, msg, version=None, removed=False):
+ ''' used to print out a deprecation message.'''
+
+ if not removed and not C.DEPRECATION_WARNINGS:
+ return
+
+ if not removed:
+ if version:
+ new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
+ else:
+ new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
+ new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
+ else:
+ raise AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
+
+ wrapped = textwrap.wrap(new_msg, 79)
+ new_msg = "\n".join(wrapped) + "\n"
+
+ if new_msg not in self._deprecations:
+ self.display(new_msg, color='purple', stderr=True)
+ self._deprecations[new_msg] = 1
+
+ def warning(self, msg):
+ new_msg = "\n[WARNING]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, 79)
+ new_msg = "\n".join(wrapped) + "\n"
+ if new_msg not in self._warns:
+ self.display(new_msg, color='bright purple', stderr=True)
+ self._warns[new_msg] = 1
+
+ def system_warning(self, msg):
+ if C.SYSTEM_WARNINGS:
+ self.warning(msg)
+
+ def banner(self, msg, color=None):
+ '''
+ Prints a header-looking line with stars taking up to 80 columns
+ of width (3 columns, minimum)
+ '''
+ if self.cowsay:
+ try:
+ self.banner_cowsay(msg)
+ return
+ except OSError:
+ self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
+
+ #FIXME: make this dynamic on tty size (look and ansible-doc)
+ msg = msg.strip()
+ star_len = (80 - len(msg))
+ if star_len < 0:
+ star_len = 3
+ stars = "*" * star_len
+ self.display("\n%s %s" % (msg, stars), color=color)
+
+ def banner_cowsay(self, msg, color=None):
+ if ": [" in msg:
+ msg = msg.replace("[","")
+ if msg.endswith("]"):
+ msg = msg[:-1]
+ runcmd = [self.cowsay,"-W", "60"]
+ if self.noncow:
+ runcmd.append('-f')
+ runcmd.append(self.noncow)
+ runcmd.append(msg)
+ cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ self.display("%s\n" % out, color=color)
+
+ def error(self, msg, wrap_text=True):
+ if wrap_text:
+ new_msg = "\n[ERROR]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, 79)
+ new_msg = "\n".join(wrapped) + "\n"
+ else:
+ new_msg = msg
+ if new_msg not in self._errors:
+ self.display(new_msg, color='red', stderr=True)
+ self._errors[new_msg] = 1
+
+ def prompt(self, msg):
+
+ return raw_input(self._safe_output(msg))
+
+ def _safe_output(self, msg, stderr=False):
+
+ if not stderr and sys.stdout.encoding:
+ msg = to_bytes(msg, sys.stdout.encoding)
+ elif stderr and sys.stderr.encoding:
+ msg = to_bytes(msg, sys.stderr.encoding)
+ else:
+ msg = to_bytes(msg)
+
+ return msg
diff --git a/v2/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py
similarity index 100%
rename from v2/ansible/utils/encrypt.py
rename to lib/ansible/utils/encrypt.py
diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py
index a7d142e5bd4..5e378db79f4 100644
--- a/lib/ansible/utils/hashing.py
+++ b/lib/ansible/utils/hashing.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
+from ansible.errors import AnsibleError
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
@@ -43,6 +44,8 @@ def secure_hash_s(data, hash_func=sha1):
digest = hash_func()
try:
+ if not isinstance(data, basestring):
+ data = "%s" % data
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
@@ -62,8 +65,8 @@ def secure_hash(filename, hash_func=sha1):
digest.update(block)
block = infile.read(blocksize)
infile.close()
- except IOError, e:
- raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
+ except IOError as e:
+ raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py
new file mode 100644
index 00000000000..237e131613f
--- /dev/null
+++ b/lib/ansible/utils/listify.py
@@ -0,0 +1,43 @@
+# (c) 2014 Michael DeHaan,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import Iterable
+from ansible.template import Templar
+from ansible.template.safe_eval import safe_eval
+
+__all__ = ['listify_lookup_plugin_terms']
+
+#FIXME: probably just move this into lookup plugin base class
+def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=False):
+
+ if isinstance(terms, basestring):
+ stripped = terms.strip()
+ #FIXME: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override
+ terms = templar.template(terms, convert_bare=True, fail_on_undefined=fail_on_undefined)
+ #TODO: check if this is needed as template should also return correct type already
+ #terms = safe_eval(terms)
+ else:
+ terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
+
+ if isinstance(terms, basestring) or not isinstance(terms, Iterable):
+ terms = [ terms ]
+
+ return terms
diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py
index c6920571726..f1a2bbbd1a1 100644
--- a/lib/ansible/utils/module_docs.py
+++ b/lib/ansible/utils/module_docs.py
@@ -24,8 +24,7 @@ import yaml
import traceback
from collections import MutableMapping, MutableSet, MutableSequence
-
-from ansible import utils
+from ansible.plugins import fragment_loader
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
@@ -53,55 +52,63 @@ def get_docstring(filename, verbose=False):
M = ast.parse(''.join(open(filename)))
for child in M.body:
if isinstance(child, ast.Assign):
- if 'DOCUMENTATION' in (t.id for t in child.targets):
- doc = yaml.safe_load(child.value.s)
- fragment_slug = doc.get('extends_documentation_fragment',
- 'doesnotexist').lower()
-
- # Allow the module to specify a var other than DOCUMENTATION
- # to pull the fragment from, using dot notation as a separator
- if '.' in fragment_slug:
- fragment_name, fragment_var = fragment_slug.split('.', 1)
- fragment_var = fragment_var.upper()
- else:
- fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
-
-
- if fragment_slug != 'doesnotexist':
- fragment_class = utils.plugins.fragment_loader.get(fragment_name)
- assert fragment_class is not None
-
- fragment_yaml = getattr(fragment_class, fragment_var, '{}')
- fragment = yaml.safe_load(fragment_yaml)
-
- if fragment.has_key('notes'):
- notes = fragment.pop('notes')
- if notes:
- if not doc.has_key('notes'):
- doc['notes'] = []
- doc['notes'].extend(notes)
-
- if 'options' not in fragment.keys():
- raise Exception("missing options in fragment, possibly misformatted?")
-
- for key, value in fragment.items():
- if not doc.has_key(key):
- doc[key] = value
+ for t in child.targets:
+ try:
+ theid = t.id
+ except AttributeError as e:
+ continue #TODO: should log these to figure out why this happens
+
+ if 'DOCUMENTATION' in theid:
+ doc = yaml.safe_load(child.value.s)
+ fragments = doc.get('extends_documentation_fragment', [])
+
+ if isinstance(fragments, basestring):
+ fragments = [ fragments ]
+
+ # Allow the module to specify a var other than DOCUMENTATION
+ # to pull the fragment from, using dot notation as a separator
+ for fragment_slug in fragments:
+ fragment_slug = fragment_slug.lower()
+ if '.' in fragment_slug:
+ fragment_name, fragment_var = fragment_slug.split('.', 1)
+ fragment_var = fragment_var.upper()
else:
- if isinstance(doc[key], MutableMapping):
- doc[key].update(value)
- elif isinstance(doc[key], MutableSet):
- doc[key].add(value)
- elif isinstance(doc[key], MutableSequence):
- doc[key] = sorted(frozenset(doc[key] + value))
- else:
- raise Exception("Attempt to extend a documentation fragement of unknown type")
+ fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
+
+ fragment_class = fragment_loader.get(fragment_name)
+ assert fragment_class is not None
- if 'EXAMPLES' in (t.id for t in child.targets):
- plainexamples = child.value.s[1:] # Skip first empty line
+ fragment_yaml = getattr(fragment_class, fragment_var, '{}')
+ fragment = yaml.safe_load(fragment_yaml)
- if 'RETURN' in (t.id for t in child.targets):
- returndocs = child.value.s[1:]
+ if fragment.has_key('notes'):
+ notes = fragment.pop('notes')
+ if notes:
+ if not doc.has_key('notes'):
+ doc['notes'] = []
+ doc['notes'].extend(notes)
+
+ if 'options' not in fragment.keys():
+ raise Exception("missing options in fragment, possibly misformatted?")
+
+ for key, value in fragment.items():
+ if not doc.has_key(key):
+ doc[key] = value
+ else:
+ if isinstance(doc[key], MutableMapping):
+ doc[key].update(value)
+ elif isinstance(doc[key], MutableSet):
+ doc[key].add(value)
+ elif isinstance(doc[key], MutableSequence):
+ doc[key] = sorted(frozenset(doc[key] + value))
+ else:
+ raise Exception("Attempt to extend a documentation fragement of unknown type")
+
+ elif 'EXAMPLES' in theid:
+ plainexamples = child.value.s[1:] # Skip first empty line
+
+ elif 'RETURN' in theid:
+ returndocs = child.value.s[1:]
except:
traceback.print_exc() # temp
if verbose == True:
diff --git a/lib/ansible/utils/module_docs_fragments/backup.py b/lib/ansible/utils/module_docs_fragments/backup.py
new file mode 100644
index 00000000000..f6b2902512a
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/backup.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2015 Ansible, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = '''
+options:
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ required: false
+ choices: [ "yes", "no" ]
+ default: "no"
+'''
diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py
index 5a7411b199d..bafb7b4c15a 100644
--- a/lib/ansible/utils/module_docs_fragments/cloudstack.py
+++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py
@@ -27,34 +27,36 @@ options:
- API key of the CloudStack API.
required: false
default: null
- aliases: []
api_secret:
description:
- Secret key of the CloudStack API.
required: false
default: null
- aliases: []
api_url:
description:
- URL of the CloudStack API e.g. https://cloud.example.com/client/api.
required: false
default: null
- aliases: []
api_http_method:
description:
- HTTP method used.
required: false
default: 'get'
- aliases: []
+ choices: [ 'get', 'post' ]
+ api_timeout:
+ description:
+ - HTTP timeout.
+ required: false
+ default: 10
requirements:
- "python >= 2.6"
- cs
notes:
- Ansible uses the C(cs) library's configuration method if credentials are not
provided by the options C(api_url), C(api_key), C(api_secret).
- Configuration is read from several locations, in the following order":"
+ Configuration is read from several locations, in the following order.
- The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
- C(CLOUDSTACK_METHOD) environment variables.
+ C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables.
- A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
- A C(cloudstack.ini) file in the current working directory.
- A C(.cloudstack.ini) file in the users home directory.
diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py
index 5087c0cf508..9bc96c6e257 100644
--- a/lib/ansible/utils/module_docs_fragments/files.py
+++ b/lib/ansible/utils/module_docs_fragments/files.py
@@ -25,7 +25,7 @@ options:
required: false
default: null
description:
- - mode the file or directory should be, such as 0644 as would be fed to I(chmod). As of version 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
+ - mode the file or directory should be. For those used to I(/usr/bin/chmod) remember that modes are actually octal numbers (like 0644). Leaving off the leading zero will likely have unexpected results. As of version 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
owner:
required: false
default: null
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
index 7e42841d6da..84322078ec0 100644
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ b/lib/ansible/utils/module_docs_fragments/openstack.py
@@ -23,16 +23,19 @@ class ModuleDocFragment(object):
options:
cloud:
description:
- - Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
+ - Named cloud to operate against. Provides default values for I(auth) and
+ I(auth_type). This parameter is not needed if I(auth) is provided or if
+ OpenStack OS_* environment variables are present.
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
- plugin strategy. For the default I{password) plugin, this would contain
+ plugin strategy. For the default I(password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
- requires. This parameter is not needed if a named cloud is provided.
+ requires. This parameter is not needed if a named cloud is provided or
+ OpenStack OS_* environment variables are present.
required: false
auth_type:
description:
@@ -77,14 +80,17 @@ options:
- A path to a CA Cert bundle that can be used as part of verifying
SSL API requests.
required: false
+ default: None
cert:
description:
- A path to a client certificate to use as part of the SSL transaction
required: false
+ default: None
key:
description:
- A path to a client key to use as part of the SSL transaction
required: false
+ default: None
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
@@ -92,6 +98,7 @@ options:
required: false
default: public
requirements:
+ - python >= 2.7
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
@@ -100,5 +107,6 @@ notes:
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
- plays.
+ plays. More information can be found at
+ U(http://docs.openstack.org/developer/os-client-config)
'''
diff --git a/lib/ansible/utils/module_docs_fragments/validate.py b/lib/ansible/utils/module_docs_fragments/validate.py
new file mode 100644
index 00000000000..decefe113e8
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/validate.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2015 Ansible, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = '''
+options:
+ validate:
+ required: false
+ description:
+ - The validation command to run before copying into place. The path to the file to
+ validate is passed in via '%s' which must be present as in the example below.
+ The command is passed securely so shell features like expansion and pipes won't work.
+ default: None
+'''
diff --git a/v2/ansible/utils/path.py b/lib/ansible/utils/path.py
similarity index 77%
rename from v2/ansible/utils/path.py
rename to lib/ansible/utils/path.py
index e49a2f7d553..b271e7ed4bc 100644
--- a/v2/ansible/utils/path.py
+++ b/lib/ansible/utils/path.py
@@ -19,6 +19,8 @@ __metaclass__ = type
import os
import stat
+from time import sleep
+from errno import EEXIST
__all__ = ['is_executable', 'unfrackpath']
@@ -35,3 +37,14 @@ def unfrackpath(path):
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
+def makedirs_safe(path, mode=None):
+ '''Safe way to create dirs in muliprocess/thread environments'''
+ if not os.path.exists(path):
+ try:
+ if mode:
+ os.makedirs(path, mode)
+ else:
+ os.makedirs(path)
+ except OSError, e:
+ if e.errno != EEXIST:
+ raise
diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py
index 7bd035c0075..2cff2e5e45c 100644
--- a/lib/ansible/utils/unicode.py
+++ b/lib/ansible/utils/unicode.py
@@ -19,6 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from six import string_types, text_type, binary_type, PY3
+
# to_bytes and to_unicode were written by Toshio Kuratomi for the
# python-kitchen library https://pypi.python.org/pypi/kitchen
# They are licensed in kitchen under the terms of the GPLv2+
@@ -35,6 +37,9 @@ _LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
# EXCEPTION_CONVERTERS is defined below due to using to_unicode
+if PY3:
+ basestring = (str, bytes)
+
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
'''Convert an object into a :class:`unicode` string
@@ -89,12 +94,12 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring/isunicode here but we want this code to be as
# fast as possible
if isinstance(obj, basestring):
- if isinstance(obj, unicode):
+ if isinstance(obj, text_type):
return obj
if encoding in _UTF8_ALIASES:
- return unicode(obj, 'utf-8', errors)
+ return text_type(obj, 'utf-8', errors)
if encoding in _LATIN1_ALIASES:
- return unicode(obj, 'latin-1', errors)
+ return text_type(obj, 'latin-1', errors)
return obj.decode(encoding, errors)
if not nonstring:
@@ -110,19 +115,19 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
simple = None
if not simple:
try:
- simple = str(obj)
+ simple = text_type(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (UnicodeError, AttributeError):
simple = u''
- if isinstance(simple, str):
- return unicode(simple, encoding, errors)
+ if isinstance(simple, binary_type):
+ return text_type(simple, encoding, errors)
return simple
elif nonstring in ('repr', 'strict'):
obj_repr = repr(obj)
- if isinstance(obj_repr, str):
- obj_repr = unicode(obj_repr, encoding, errors)
+ if isinstance(obj_repr, binary_type):
+ obj_repr = text_type(obj_repr, encoding, errors)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_unicode was given "%(obj)s" which is neither'
@@ -198,19 +203,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring, isbytestring here but we want this to be as fast
# as possible
if isinstance(obj, basestring):
- if isinstance(obj, str):
+ if isinstance(obj, binary_type):
return obj
return obj.encode(encoding, errors)
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
- return ''
+ return b''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
- simple = str(obj)
+ simple = binary_type(obj)
except UnicodeError:
try:
simple = obj.__str__()
@@ -220,19 +225,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
try:
simple = obj.__unicode__()
except (AttributeError, UnicodeError):
- simple = ''
- if isinstance(simple, unicode):
+ simple = b''
+ if isinstance(simple, text_type):
simple = simple.encode(encoding, 'replace')
return simple
elif nonstring in ('repr', 'strict'):
try:
obj_repr = obj.__repr__()
except (AttributeError, UnicodeError):
- obj_repr = ''
- if isinstance(obj_repr, unicode):
+ obj_repr = b''
+ if isinstance(obj_repr, text_type):
obj_repr = obj_repr.encode(encoding, errors)
else:
- obj_repr = str(obj_repr)
+ obj_repr = binary_type(obj_repr)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_bytes was given "%(obj)s" which is neither'
diff --git a/v2/ansible/utils/vars.py b/lib/ansible/utils/vars.py
similarity index 68%
rename from v2/ansible/utils/vars.py
rename to lib/ansible/utils/vars.py
index c033c0c2588..bfbc9d1a821 100644
--- a/v2/ansible/utils/vars.py
+++ b/lib/ansible/utils/vars.py
@@ -21,6 +21,8 @@ __metaclass__ = type
from ansible import constants as C
+from ansible.parsing.splitter import parse_kv
+from ansible.utils.unicode import to_unicode
def combine_vars(a, b):
@@ -49,3 +51,18 @@ def merge_hash(a, b):
return result
+def load_extra_vars(loader, options):
+ extra_vars = {}
+ for extra_vars_opt in options.extra_vars:
+ extra_vars_opt = to_unicode(extra_vars_opt, errors='strict')
+ if extra_vars_opt.startswith(u"@"):
+ # Argument is a YAML file (JSON is a subset of YAML)
+ data = loader.load_from_file(extra_vars_opt[1:])
+ elif extra_vars_opt and extra_vars_opt[0] in u'[{':
+ # Arguments as YAML
+ data = loader.load(extra_vars_opt)
+ else:
+ # Arguments as Key-value
+ data = parse_kv(extra_vars_opt)
+ extra_vars = combine_vars(extra_vars, data)
+ return extra_vars
diff --git a/v2/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
similarity index 67%
rename from v2/ansible/vars/__init__.py
rename to lib/ansible/vars/__init__.py
index f30d52b7a3a..2a5be966900 100644
--- a/v2/ansible/vars/__init__.py
+++ b/lib/ansible/vars/__init__.py
@@ -22,6 +22,9 @@ __metaclass__ = type
import os
from collections import defaultdict
+from collections import MutableMapping
+
+from jinja2.exceptions import UndefinedError
try:
from hashlib import sha1
@@ -29,6 +32,7 @@ except ImportError:
from sha import sha as sha1
from ansible import constants as C
+from ansible.cli import CLI
from ansible.errors import *
from ansible.parsing import DataLoader
from ansible.plugins.cache import FactCache
@@ -71,9 +75,10 @@ class VariableManager:
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
- def set_extra_vars(self, value):
+ @extra_vars.setter
+ def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
- assert isinstance(value, dict)
+ assert isinstance(value, MutableMapping)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
@@ -83,7 +88,7 @@ class VariableManager:
'''
Validates that both arguments are dictionaries, or an error is raised.
'''
- if not (isinstance(a, dict) and isinstance(b, dict)):
+ if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
raise AnsibleError("failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__))
def _combine_vars(self, a, b):
@@ -122,7 +127,7 @@ class VariableManager:
return result
- def get_vars(self, loader, play=None, host=None, task=None):
+ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
@@ -144,7 +149,7 @@ class VariableManager:
debug("in VariableManager get_vars()")
cache_entry = self._get_cache_entry(play=play, host=host, task=task)
- if cache_entry in CACHED_VARS:
+ if cache_entry in CACHED_VARS and use_cache:
debug("vars are cached, returning them now")
return CACHED_VARS[cache_entry]
@@ -178,25 +183,49 @@ class VariableManager:
all_vars = self._combine_vars(all_vars, host.get_vars())
# next comes the facts cache and the vars cache, respectively
- all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.get_name(), dict()))
+ try:
+ all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.name, dict()))
+ except KeyError:
+ pass
if play:
all_vars = self._combine_vars(all_vars, play.get_vars())
- templar = Templar(loader=loader, variables=all_vars)
- for vars_file in play.get_vars_files():
+
+ for vars_file_item in play.get_vars_files():
try:
- vars_file = templar.template(vars_file)
- data = loader.load_from_file(vars_file)
- if data is None:
- data = dict()
- all_vars = self._combine_vars(all_vars, data)
- except:
+ # create a set of temporary vars here, which incorporate the
+ # extra vars so we can properly template the vars_files entries
+ temp_vars = self._combine_vars(all_vars, self._extra_vars)
+ templar = Templar(loader=loader, variables=temp_vars)
+
+ # we assume each item in the list is itself a list, as we
+ # support "conditional includes" for vars_files, which mimics
+ # the with_first_found mechanism.
+ vars_file_list = templar.template(vars_file_item)
+ if not isinstance(vars_file_list, list):
+ vars_file_list = [ vars_file_list ]
+
+ # now we iterate through the (potential) files, and break out
+ # as soon as we read one from the list. If none are found, we
+ # raise an error, which is silently ignored at this point.
+ for vars_file in vars_file_list:
+ data = loader.load_from_file(vars_file)
+ if data is not None:
+ all_vars = self._combine_vars(all_vars, data)
+ break
+ else:
+ raise AnsibleError("vars file %s was not found" % vars_file_item)
+ except AnsibleError, e:
# FIXME: get_vars should probably be taking a flag to determine
# whether or not vars files errors should be fatal at this
# stage, or just base it on whether a host was specified?
pass
- for role in play.get_roles():
- all_vars = self._combine_vars(all_vars, role.get_vars())
+ except UndefinedError, e:
+ continue
+
+ if not C.DEFAULT_PRIVATE_ROLE_VARS:
+ for role in play.get_roles():
+ all_vars = self._combine_vars(all_vars, role.get_vars())
if host:
all_vars = self._combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
@@ -211,17 +240,44 @@ class VariableManager:
# FIXME: make sure all special vars are here
# Finally, we create special vars
- if host and self._inventory is not None:
- hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader)
- all_vars['hostvars'] = hostvars
+ all_vars['playbook_dir'] = loader.get_basedir()
+
+ if host:
+ all_vars['groups'] = [group.name for group in host.get_groups()]
+
+ if self._inventory is not None:
+ hostvars = HostVars(vars_manager=self, play=play, inventory=self._inventory, loader=loader)
+ all_vars['hostvars'] = hostvars
+ all_vars['groups'] = self._inventory.groups_list()
+
+ if task:
+ if task._role:
+ all_vars['role_path'] = task._role._role_path
if self._inventory is not None:
all_vars['inventory_dir'] = self._inventory.basedir()
+ if play:
+ # add the list of hosts in the play, as adjusted for limit/filters
+ # FIXME: play_hosts should be deprecated in favor of ansible_play_hosts,
+ # however this would take work in the templating engine, so for now
+ # we'll add both so we can give users something transitional to use
+ host_list = [x.name for x in self._inventory.get_hosts()]
+ all_vars['play_hosts'] = host_list
+ all_vars['ansible_play_hosts'] = host_list
+
# the 'omit' value alows params to be left out if the variable they are based on is undefined
all_vars['omit'] = self._omit_token
- CACHED_VARS[cache_entry] = all_vars
+ all_vars['ansible_version'] = CLI.version_info(gitinfo=False)
+
+ # make vars self referential, so people can do things like 'vars[var_name]'
+ copied_vars = all_vars.copy()
+ if 'hostvars' in copied_vars:
+ del copied_vars['hostvars']
+ all_vars['vars'] = all_vars.copy()
+
+ #CACHED_VARS[cache_entry] = all_vars
debug("done with get_vars()")
return all_vars
@@ -260,12 +316,21 @@ class VariableManager:
paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
- data = self._combine_vars(data, results)
+ if results is not None:
+ data = self._combine_vars(data, results)
else:
- data = loader.load_from_file(path)
- if data is None:
- data = dict()
+ file_name, ext = os.path.splitext(path)
+ data = None
+ if not ext:
+ for ext in C.YAML_FILENAME_EXTENSIONS:
+ new_path = path + ext
+ if loader.path_exists(new_path):
+ data = loader.load_from_file(new_path)
+ break
+ else:
+ if loader.path_exists(path):
+ data = loader.load_from_file(path)
name = self._get_inventory_basename(path)
return (name, data)
@@ -277,9 +342,12 @@ class VariableManager:
the extension, for matching against a given inventory host name
'''
- if loader.path_exists(path):
- (name, data) = self._load_inventory_file(path, loader)
+ (name, data) = self._load_inventory_file(path, loader)
+ if data:
self._host_vars_files[name] = data
+ return data
+ else:
+ return dict()
def add_group_vars_file(self, path, loader):
'''
@@ -288,9 +356,12 @@ class VariableManager:
the extension, for matching against a given inventory host name
'''
- if loader.path_exists(path):
- (name, data) = self._load_inventory_file(path, loader)
+ (name, data) = self._load_inventory_file(path, loader)
+ if data:
self._group_vars_files[name] = data
+ return data
+ else:
+ return dict()
def set_host_facts(self, host, facts):
'''
@@ -299,11 +370,13 @@ class VariableManager:
assert isinstance(facts, dict)
- host_name = host.get_name()
- if host_name not in self._fact_cache:
- self._fact_cache[host_name] = facts
+ if host.name not in self._fact_cache:
+ self._fact_cache[host.name] = facts
else:
- self._fact_cache[host_name].update(facts)
+ try:
+ self._fact_cache[host.name].update(facts)
+ except KeyError:
+ self._fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
diff --git a/v2/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py
similarity index 59%
rename from v2/ansible/vars/hostvars.py
rename to lib/ansible/vars/hostvars.py
index 45b3340229d..766efb5ed3b 100644
--- a/v2/ansible/vars/hostvars.py
+++ b/lib/ansible/vars/hostvars.py
@@ -19,29 +19,43 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import collections
+
+from jinja2 import Undefined as j2undefined
+
from ansible.template import Templar
__all__ = ['HostVars']
-class HostVars(dict):
+# Note -- this is a Mapping, not a MutableMapping
+class HostVars(collections.Mapping):
''' A special view of vars_cache that adds values from the inventory when needed. '''
- def __init__(self, vars_manager, inventory, loader):
+ def __init__(self, vars_manager, play, inventory, loader):
self._vars_manager = vars_manager
+ self._play = play
self._inventory = inventory
self._loader = loader
self._lookup = {}
- #self.update(vars_cache)
-
def __getitem__(self, host_name):
-
+
if host_name not in self._lookup:
host = self._inventory.get_host(host_name)
- result = self._vars_manager.get_vars(loader=self._loader, host=host)
- #result.update(self._vars_cache.get(host, {}))
- #templar = Templar(variables=self._vars_cache, loader=self._loader)
- #self._lookup[host] = templar.template(result)
- self._lookup[host_name] = result
+ if not host:
+ return j2undefined
+ result = self._vars_manager.get_vars(loader=self._loader, play=self._play, host=host)
+ templar = Templar(variables=result, loader=self._loader)
+ self._lookup[host_name] = templar.template(result, fail_on_undefined=False)
return self._lookup[host_name]
+ def __contains__(self, host_name):
+ item = self.get(host_name)
+ if item and item is not j2undefined:
+ return True
+ return False
+ def __iter__(self):
+ raise NotImplementedError('HostVars does not support iteration as hosts are discovered on an as needed basis.')
+
+ def __len__(self):
+ raise NotImplementedError('HostVars does not support len. hosts entries are discovered dynamically as needed')
diff --git a/packaging/debian/README.md b/packaging/debian/README.md
index 62c6af084c0..a8150ff30f1 100644
--- a/packaging/debian/README.md
+++ b/packaging/debian/README.md
@@ -3,7 +3,7 @@ Ansible Debian Package
To create an Ansible DEB package:
- sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools sshpass
+ sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools python-six sshpass
sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc devscripts
git clone git://github.com/ansible/ansible.git
cd ansible
diff --git a/packaging/debian/control b/packaging/debian/control
index 14d737444e7..73e1cc92021 100644
--- a/packaging/debian/control
+++ b/packaging/debian/control
@@ -8,7 +8,7 @@ Homepage: http://ansible.github.com/
Package: ansible
Architecture: all
-Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-crypto (>= 2.6), sshpass, ${misc:Depends}
+Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), sshpass, ${misc:Depends}
Description: A radically simple IT automation platform
A radically simple IT automation platform that makes your applications and
systems easier to deploy. Avoid writing scripts or custom code to deploy and
diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec
index 394017dc0fb..ddda6eeb798 100644
--- a/packaging/rpm/ansible.spec
+++ b/packaging/rpm/ansible.spec
@@ -28,6 +28,7 @@ Requires: python26-jinja2
Requires: python26-keyczar
Requires: python26-httplib2
Requires: python26-setuptools
+Requires: python26-six
%endif
# RHEL == 6
@@ -45,6 +46,7 @@ Requires: python-jinja2
Requires: python-keyczar
Requires: python-httplib2
Requires: python-setuptools
+Requires: python-six
%endif
# FEDORA > 17
@@ -57,6 +59,7 @@ Requires: python-jinja2
Requires: python-keyczar
Requires: python-httplib2
Requires: python-setuptools
+Requires: python-six
%endif
# SuSE/openSuSE
@@ -69,6 +72,7 @@ Requires: python-keyczar
Requires: python-yaml
Requires: python-httplib2
Requires: python-setuptools
+Requires: python-six
%endif
Requires: sshpass
diff --git a/plugins/README.md b/plugins/README.md
deleted file mode 100644
index 8d705372a51..00000000000
--- a/plugins/README.md
+++ /dev/null
@@ -1,35 +0,0 @@
-ansible-plugins
-===============
-
-You can extend ansible with optional callback and connection plugins.
-
-callbacks
-=========
-
-Callbacks can be used to add logging or monitoring capability, or just make
-interesting sound effects.
-
-Drop callback plugins in your ansible/lib/callback_plugins/ directory.
-
-connections
-===========
-
-Connection plugins allow ansible to talk over different protocols.
-
-Drop connection plugins in your ansible/lib/runner/connection_plugins/ directory.
-
-inventory
-=========
-
-Inventory plugins allow you to store your hosts, groups, and variables in any way
-you like. Examples include discovering inventory from EC2 or pulling it from
-Cobbler. These could also be used to interface with LDAP or database.
-
-chmod +x an inventory plugin and either name it /etc/ansible/hosts or use ansible
-with -i to designate the path to the plugin.
-
-contributions welcome
-=====================
-
-Send in pull requests to add plugins of your own. The sky is the limit!
-
diff --git a/plugins/callbacks/log_plays.py b/plugins/callbacks/log_plays.py
deleted file mode 100644
index dbe16b312c1..00000000000
--- a/plugins/callbacks/log_plays.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# (C) 2012, Michael DeHaan,
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-import os
-import time
-import json
-
-# NOTE: in Ansible 1.2 or later general logging is available without
-# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
-# or log_path in the DEFAULTS section of your ansible configuration
-# file. This callback is an example of per hosts logging for those
-# that want it.
-
-TIME_FORMAT="%b %d %Y %H:%M:%S"
-MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n"
-
-if not os.path.exists("/var/log/ansible/hosts"):
- os.makedirs("/var/log/ansible/hosts")
-
-def log(host, category, data):
- if type(data) == dict:
- if 'verbose_override' in data:
- # avoid logging extraneous data from facts
- data = 'omitted'
- else:
- data = data.copy()
- invocation = data.pop('invocation', None)
- data = json.dumps(data)
- if invocation is not None:
- data = json.dumps(invocation) + " => %s " % data
-
- path = os.path.join("/var/log/ansible/hosts", host)
- now = time.strftime(TIME_FORMAT, time.localtime())
- fd = open(path, "a")
- fd.write(MSG_FORMAT % dict(now=now, category=category, data=data))
- fd.close()
-
-class CallbackModule(object):
- """
- logs playbook results, per host, in /var/log/ansible/hosts
- """
-
- def on_any(self, *args, **kwargs):
- pass
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- log(host, 'FAILED', res)
-
- def runner_on_ok(self, host, res):
- log(host, 'OK', res)
-
- def runner_on_skipped(self, host, item=None):
- log(host, 'SKIPPED', '...')
-
- def runner_on_unreachable(self, host, res):
- log(host, 'UNREACHABLE', res)
-
- def runner_on_no_hosts(self):
- pass
-
- def runner_on_async_poll(self, host, res, jid, clock):
- pass
-
- def runner_on_async_ok(self, host, res, jid):
- pass
-
- def runner_on_async_failed(self, host, res, jid):
- log(host, 'ASYNC_FAILED', res)
-
- def playbook_on_start(self):
- pass
-
- def playbook_on_notify(self, host, handler):
- pass
-
- def playbook_on_no_hosts_matched(self):
- pass
-
- def playbook_on_no_hosts_remaining(self):
- pass
-
- def playbook_on_task_start(self, name, is_conditional):
- pass
-
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def playbook_on_setup(self):
- pass
-
- def playbook_on_import_for_host(self, host, imported_file):
- log(host, 'IMPORTED', imported_file)
-
- def playbook_on_not_import_for_host(self, host, missing_file):
- log(host, 'NOTIMPORTED', missing_file)
-
- def playbook_on_play_start(self, name):
- pass
-
- def playbook_on_stats(self, stats):
- pass
-
diff --git a/plugins/callbacks/syslog_json.py b/plugins/callbacks/syslog_json.py
deleted file mode 100644
index 5ab764acfe7..00000000000
--- a/plugins/callbacks/syslog_json.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import os
-import json
-
-import logging
-import logging.handlers
-
-
-class CallbackModule(object):
- """
- logs ansible-playbook and ansible runs to a syslog server in json format
- make sure you have in ansible.cfg:
- callback_plugins =
- and put the plugin in
-
- This plugin makes use of the following environment variables:
- SYSLOG_SERVER (optional): defaults to localhost
- SYSLOG_PORT (optional): defaults to 514
- """
-
- def __init__(self):
- self.logger = logging.getLogger('ansible logger')
- self.logger.setLevel(logging.DEBUG)
-
- self.handler = logging.handlers.SysLogHandler(
- address = (os.getenv('SYSLOG_SERVER','locahost'),
- os.getenv('SYSLOG_PORT',514)),
- facility=logging.handlers.SysLogHandler.LOG_USER
- )
- self.logger.addHandler(handler)
-
- def on_any(self, *args, **kwargs):
- pass
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- self.logger.info('RUNNER_ON_FAILED ' + host + ' ' + json.dumps(res, sort_keys=True))
-
- def runner_on_ok(self, host, res):
- self.logger.info('RUNNER_ON_OK ' + host + ' ' + json.dumps(res, sort_keys=True))
-
- def runner_on_skipped(self, host, item=None):
- self.logger.info('RUNNER_ON_SKIPPED ' + host + ' ...')
-
- def runner_on_unreachable(self, host, res):
- self.logger.info('RUNNER_UNREACHABLE ' + host + ' ' + json.dumps(res, sort_keys=True))
-
- def runner_on_no_hosts(self):
- pass
-
- def runner_on_async_poll(self, host, res):
- pass
-
- def runner_on_async_ok(self, host, res):
- pass
-
- def runner_on_async_failed(self, host, res):
- self.logger.info('RUNNER_SYNC_FAILED ' + host + ' ' + json.dumps(res, sort_keys=True))
-
- def playbook_on_start(self):
- pass
-
- def playbook_on_notify(self, host, handler):
- pass
-
- def playbook_on_no_hosts_matched(self):
- pass
-
- def playbook_on_no_hosts_remaining(self):
- pass
-
- def playbook_on_task_start(self, name, is_conditional):
- pass
-
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def playbook_on_setup(self):
- pass
-
- def playbook_on_import_for_host(self, host, imported_file):
- self.logger.info('PLAYBOOK_ON_IMPORTED ' + host + ' ' + json.dumps(res, sort_keys=True))
-
- def playbook_on_not_import_for_host(self, host, missing_file):
- self.logger.info('PLAYBOOK_ON_NOTIMPORTED ' + host + ' ' + json.dumps(res, sort_keys=True))
-
- def playbook_on_play_start(self, name):
- pass
-
- def playbook_on_stats(self, stats):
- pass
diff --git a/plugins/callbacks/timer.py b/plugins/callbacks/timer.py
deleted file mode 100644
index bca867c2638..00000000000
--- a/plugins/callbacks/timer.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-import datetime
-from datetime import datetime, timedelta
-
-
-class CallbackModule(object):
- """
- This callback module tells you how long your plays ran for.
- """
-
- start_time = datetime.now()
-
- def __init__(self):
- start_time = datetime.now()
- print "Timer plugin is active."
-
- def days_hours_minutes_seconds(self, timedelta):
- minutes = (timedelta.seconds//60)%60
- r_seconds = timedelta.seconds - (minutes * 60)
- return timedelta.days, timedelta.seconds//3600, minutes, r_seconds
-
- def playbook_on_stats(self, stats):
- end_time = datetime.now()
- timedelta = end_time - self.start_time
- print "Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(timedelta))
-
-
diff --git a/plugins/connections/README.md b/plugins/connections/README.md
deleted file mode 100644
index ec857be9e24..00000000000
--- a/plugins/connections/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Connections are also pluggable, see lib/ansible/runner/connection_plugins/ for the ones that ship with ansible.
-
-When non-core alternatives are available, they can be shared here.
-
diff --git a/v2/samples/README.md b/samples/README.md
similarity index 100%
rename from v2/samples/README.md
rename to samples/README.md
diff --git a/v2/samples/common_include.yml b/samples/common_include.yml
similarity index 100%
rename from v2/samples/common_include.yml
rename to samples/common_include.yml
diff --git a/v2/samples/hosts b/samples/hosts
similarity index 100%
rename from v2/samples/hosts
rename to samples/hosts
diff --git a/v2/samples/ignore_errors.yml b/samples/ignore_errors.yml
similarity index 100%
rename from v2/samples/ignore_errors.yml
rename to samples/ignore_errors.yml
diff --git a/v2/samples/include.yml b/samples/include.yml
similarity index 100%
rename from v2/samples/include.yml
rename to samples/include.yml
diff --git a/samples/included_playbook.yml b/samples/included_playbook.yml
new file mode 100644
index 00000000000..d56e9c68f7f
--- /dev/null
+++ b/samples/included_playbook.yml
@@ -0,0 +1,6 @@
+- hosts: localhost
+ gather_facts: no
+ tags:
+ - included
+ tasks:
+ - debug: msg="incuded playbook, variable is {{a}}"
diff --git a/v2/samples/inv_lg b/samples/inv_lg
similarity index 100%
rename from v2/samples/inv_lg
rename to samples/inv_lg
diff --git a/v2/samples/inv_md b/samples/inv_md
similarity index 100%
rename from v2/samples/inv_md
rename to samples/inv_md
diff --git a/v2/samples/inv_sm b/samples/inv_sm
similarity index 100%
rename from v2/samples/inv_sm
rename to samples/inv_sm
diff --git a/v2/samples/l1_include.yml b/samples/l1_include.yml
similarity index 100%
rename from v2/samples/l1_include.yml
rename to samples/l1_include.yml
diff --git a/v2/samples/l2_include.yml b/samples/l2_include.yml
similarity index 100%
rename from v2/samples/l2_include.yml
rename to samples/l2_include.yml
diff --git a/v2/samples/l3_include.yml b/samples/l3_include.yml
similarity index 100%
rename from v2/samples/l3_include.yml
rename to samples/l3_include.yml
diff --git a/v2/samples/localhost_include.yml b/samples/localhost_include.yml
similarity index 100%
rename from v2/samples/localhost_include.yml
rename to samples/localhost_include.yml
diff --git a/v2/samples/localhosts b/samples/localhosts
similarity index 100%
rename from v2/samples/localhosts
rename to samples/localhosts
diff --git a/v2/samples/lookup_file.yml b/samples/lookup_file.yml
similarity index 100%
rename from v2/samples/lookup_file.yml
rename to samples/lookup_file.yml
diff --git a/v2/samples/lookup_password.yml b/samples/lookup_password.yml
similarity index 100%
rename from v2/samples/lookup_password.yml
rename to samples/lookup_password.yml
diff --git a/v2/samples/lookup_pipe.py b/samples/lookup_pipe.py
similarity index 100%
rename from v2/samples/lookup_pipe.py
rename to samples/lookup_pipe.py
diff --git a/v2/samples/lookup_template.yml b/samples/lookup_template.yml
similarity index 100%
rename from v2/samples/lookup_template.yml
rename to samples/lookup_template.yml
diff --git a/v2/samples/multi.py b/samples/multi.py
similarity index 100%
rename from v2/samples/multi.py
rename to samples/multi.py
diff --git a/v2/samples/multi_queues.py b/samples/multi_queues.py
similarity index 96%
rename from v2/samples/multi_queues.py
rename to samples/multi_queues.py
index 9e8f22b9a94..673bb01de60 100644
--- a/v2/samples/multi_queues.py
+++ b/samples/multi_queues.py
@@ -9,8 +9,8 @@ import multiprocessing
from ansible.inventory import Inventory
from ansible.inventory.host import Host
from ansible.playbook.play import Play
+from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
-from ansible.executor.connection_info import ConnectionInformation
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.parsing import DataLoader
@@ -144,8 +144,8 @@ inventory = Inventory(host_list='/tmp/med_inventory', loader=loader, variable_ma
hosts = inventory.get_hosts()[:]
debug("done loading inventory")
-ci = ConnectionInformation()
-ci.connection = 'local'
+play_context = PlayContext()
+play_context.connection = 'local'
for i in range(NUM_TASKS):
#for j in range(NUM_HOSTS):
@@ -158,7 +158,7 @@ for i in range(NUM_TASKS):
task_vars = dict()
new_t = t.copy()
new_t.post_validate(task_vars)
- send_data((h, t, task_vars, ci))
+ send_data((h, t, task_vars, play_context))
debug("done queuing %s %d" % (h, i))
_process_pending_results()
debug("waiting for the results to drain...")
diff --git a/v2/samples/roles/common/meta/main.yml b/samples/roles/common/meta/main.yml
similarity index 100%
rename from v2/samples/roles/common/meta/main.yml
rename to samples/roles/common/meta/main.yml
diff --git a/v2/samples/roles/common/tasks/main.yml b/samples/roles/common/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/common/tasks/main.yml
rename to samples/roles/common/tasks/main.yml
diff --git a/v2/samples/roles/role_a/meta/main.yml b/samples/roles/role_a/meta/main.yml
similarity index 100%
rename from v2/samples/roles/role_a/meta/main.yml
rename to samples/roles/role_a/meta/main.yml
diff --git a/v2/samples/roles/role_a/tasks/main.yml b/samples/roles/role_a/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/role_a/tasks/main.yml
rename to samples/roles/role_a/tasks/main.yml
diff --git a/v2/samples/roles/role_b/meta/main.yml b/samples/roles/role_b/meta/main.yml
similarity index 100%
rename from v2/samples/roles/role_b/meta/main.yml
rename to samples/roles/role_b/meta/main.yml
diff --git a/v2/samples/roles/role_b/tasks/main.yml b/samples/roles/role_b/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/role_b/tasks/main.yml
rename to samples/roles/role_b/tasks/main.yml
diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/samples/roles/test_become_r1/meta/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r1/meta/main.yml
rename to samples/roles/test_become_r1/meta/main.yml
diff --git a/v2/samples/roles/test_become_r1/tasks/main.yml b/samples/roles/test_become_r1/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r1/tasks/main.yml
rename to samples/roles/test_become_r1/tasks/main.yml
diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/samples/roles/test_become_r2/meta/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r2/meta/main.yml
rename to samples/roles/test_become_r2/meta/main.yml
diff --git a/v2/samples/roles/test_become_r2/tasks/main.yml b/samples/roles/test_become_r2/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r2/tasks/main.yml
rename to samples/roles/test_become_r2/tasks/main.yml
diff --git a/v2/samples/roles/test_role/meta/main.yml b/samples/roles/test_role/meta/main.yml
similarity index 100%
rename from v2/samples/roles/test_role/meta/main.yml
rename to samples/roles/test_role/meta/main.yml
diff --git a/v2/samples/roles/test_role/tasks/main.yml b/samples/roles/test_role/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_role/tasks/main.yml
rename to samples/roles/test_role/tasks/main.yml
diff --git a/v2/samples/roles/test_role_dep/tasks/main.yml b/samples/roles/test_role_dep/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_role_dep/tasks/main.yml
rename to samples/roles/test_role_dep/tasks/main.yml
diff --git a/v2/samples/src b/samples/src
similarity index 100%
rename from v2/samples/src
rename to samples/src
diff --git a/v2/samples/template.j2 b/samples/template.j2
similarity index 100%
rename from v2/samples/template.j2
rename to samples/template.j2
diff --git a/v2/samples/test_become.yml b/samples/test_become.yml
similarity index 100%
rename from v2/samples/test_become.yml
rename to samples/test_become.yml
diff --git a/v2/samples/test_big_debug.yml b/samples/test_big_debug.yml
similarity index 100%
rename from v2/samples/test_big_debug.yml
rename to samples/test_big_debug.yml
diff --git a/v2/samples/test_big_ping.yml b/samples/test_big_ping.yml
similarity index 100%
rename from v2/samples/test_big_ping.yml
rename to samples/test_big_ping.yml
diff --git a/v2/samples/test_block.yml b/samples/test_block.yml
similarity index 100%
rename from v2/samples/test_block.yml
rename to samples/test_block.yml
diff --git a/v2/samples/test_blocks_of_blocks.yml b/samples/test_blocks_of_blocks.yml
similarity index 100%
rename from v2/samples/test_blocks_of_blocks.yml
rename to samples/test_blocks_of_blocks.yml
diff --git a/v2/samples/test_fact_gather.yml b/samples/test_fact_gather.yml
similarity index 100%
rename from v2/samples/test_fact_gather.yml
rename to samples/test_fact_gather.yml
diff --git a/v2/samples/test_free.yml b/samples/test_free.yml
similarity index 100%
rename from v2/samples/test_free.yml
rename to samples/test_free.yml
diff --git a/v2/samples/test_include.yml b/samples/test_include.yml
similarity index 100%
rename from v2/samples/test_include.yml
rename to samples/test_include.yml
diff --git a/v2/samples/test_pb.yml b/samples/test_pb.yml
similarity index 100%
rename from v2/samples/test_pb.yml
rename to samples/test_pb.yml
diff --git a/samples/test_play_failure.yml b/samples/test_play_failure.yml
new file mode 100644
index 00000000000..b33fc2e757c
--- /dev/null
+++ b/samples/test_play_failure.yml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail:
+
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - debug: msg="you should not see me..."
diff --git a/samples/test_playbook.include b/samples/test_playbook.include
new file mode 100644
index 00000000000..95c1a821471
--- /dev/null
+++ b/samples/test_playbook.include
@@ -0,0 +1,2 @@
+- include: included_playbook.yml a=1
+ tags: include
diff --git a/v2/samples/test_role.yml b/samples/test_role.yml
similarity index 100%
rename from v2/samples/test_role.yml
rename to samples/test_role.yml
diff --git a/v2/samples/test_roles_complex.yml b/samples/test_roles_complex.yml
similarity index 100%
rename from v2/samples/test_roles_complex.yml
rename to samples/test_roles_complex.yml
diff --git a/v2/samples/test_run_once.yml b/samples/test_run_once.yml
similarity index 100%
rename from v2/samples/test_run_once.yml
rename to samples/test_run_once.yml
diff --git a/v2/samples/test_sudo.yml b/samples/test_sudo.yml
similarity index 100%
rename from v2/samples/test_sudo.yml
rename to samples/test_sudo.yml
diff --git a/v2/samples/test_tags.yml b/samples/test_tags.yml
similarity index 100%
rename from v2/samples/test_tags.yml
rename to samples/test_tags.yml
diff --git a/v2/samples/testing/extra_vars.yml b/samples/testing/extra_vars.yml
similarity index 100%
rename from v2/samples/testing/extra_vars.yml
rename to samples/testing/extra_vars.yml
diff --git a/v2/samples/testing/frag1 b/samples/testing/frag1
similarity index 100%
rename from v2/samples/testing/frag1
rename to samples/testing/frag1
diff --git a/v2/samples/testing/frag2 b/samples/testing/frag2
similarity index 100%
rename from v2/samples/testing/frag2
rename to samples/testing/frag2
diff --git a/v2/samples/testing/frag3 b/samples/testing/frag3
similarity index 100%
rename from v2/samples/testing/frag3
rename to samples/testing/frag3
diff --git a/v2/samples/testing/vars.yml b/samples/testing/vars.yml
similarity index 100%
rename from v2/samples/testing/vars.yml
rename to samples/testing/vars.yml
diff --git a/v2/samples/with_dict.yml b/samples/with_dict.yml
similarity index 100%
rename from v2/samples/with_dict.yml
rename to samples/with_dict.yml
diff --git a/v2/samples/with_env.yml b/samples/with_env.yml
similarity index 100%
rename from v2/samples/with_env.yml
rename to samples/with_env.yml
diff --git a/v2/samples/with_fileglob.yml b/samples/with_fileglob.yml
similarity index 100%
rename from v2/samples/with_fileglob.yml
rename to samples/with_fileglob.yml
diff --git a/v2/samples/with_first_found.yml b/samples/with_first_found.yml
similarity index 100%
rename from v2/samples/with_first_found.yml
rename to samples/with_first_found.yml
diff --git a/v2/samples/with_flattened.yml b/samples/with_flattened.yml
similarity index 100%
rename from v2/samples/with_flattened.yml
rename to samples/with_flattened.yml
diff --git a/v2/samples/with_indexed_items.yml b/samples/with_indexed_items.yml
similarity index 100%
rename from v2/samples/with_indexed_items.yml
rename to samples/with_indexed_items.yml
diff --git a/v2/samples/with_items.yml b/samples/with_items.yml
similarity index 100%
rename from v2/samples/with_items.yml
rename to samples/with_items.yml
diff --git a/v2/samples/with_lines.yml b/samples/with_lines.yml
similarity index 100%
rename from v2/samples/with_lines.yml
rename to samples/with_lines.yml
diff --git a/v2/samples/with_nested.yml b/samples/with_nested.yml
similarity index 100%
rename from v2/samples/with_nested.yml
rename to samples/with_nested.yml
diff --git a/v2/samples/with_random_choice.yml b/samples/with_random_choice.yml
similarity index 100%
rename from v2/samples/with_random_choice.yml
rename to samples/with_random_choice.yml
diff --git a/v2/samples/with_sequence.yml b/samples/with_sequence.yml
similarity index 100%
rename from v2/samples/with_sequence.yml
rename to samples/with_sequence.yml
diff --git a/v2/samples/with_subelements.yml b/samples/with_subelements.yml
similarity index 100%
rename from v2/samples/with_subelements.yml
rename to samples/with_subelements.yml
diff --git a/v2/samples/with_together.yml b/samples/with_together.yml
similarity index 100%
rename from v2/samples/with_together.yml
rename to samples/with_together.yml
diff --git a/setup.py b/setup.py
index 37527414067..60c7d73ffc2 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,6 @@
import os
import sys
-from glob import glob
sys.path.insert(0, os.path.abspath('lib'))
from ansible import __version__, __author__
@@ -18,15 +17,30 @@ setup(name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
- author_email='michael@ansible.com',
+ author_email='support@ansible.com',
url='http://ansible.com/',
license='GPLv3',
- install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'],
+ install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6', 'six'],
package_dir={ '': 'lib' },
packages=find_packages('lib'),
package_data={
'': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'],
},
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
+ 'Natural Language :: English',
+ 'Operating System :: POSIX',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Topic :: System :: Installation/Setup',
+ 'Topic :: System :: Systems Administration',
+ 'Topic :: Utilities',
+ ],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
diff --git a/test-requirements.txt b/test-requirements.txt
index abb61ed1e97..6cc4f9fd8e4 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,7 +3,8 @@
#
nose
-mock
+mock >= 1.0.1, < 1.1
passlib
coverage
coveralls
+unittest2
diff --git a/test/integration/Makefile b/test/integration/Makefile
index 923a29bc9fe..af1bee32017 100644
--- a/test/integration/Makefile
+++ b/test/integration/Makefile
@@ -24,11 +24,7 @@ CONSUL_RUNNING := $(shell python consul_running.py)
all: parsing test_var_precedence unicode test_templating_settings non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault test_tags
parsing:
- ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 3 ]
- ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 3 ]
- ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 3 ]
- ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 3 ]
- ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 3 ]
+ ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5
ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
includes:
@@ -55,7 +51,8 @@ test_group_by:
ansible-playbook test_group_by.yml -i inventory.group_by -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
test_handlers:
- ansible-playbook test_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
+ ansible-playbook test_handlers.yml --tags scenario1 -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
+ [ "$$(ansible-playbook test_handlers.yml --tags scenario2 -l A -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | grep -Po 'RUNNING HANDLER \[test_handlers : \K[^\]]+')" = "test handler" ]
# Not forcing, should only run on successful host
[ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ]
# Forcing from command line
@@ -76,7 +73,7 @@ test_hash:
ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}'
test_var_precedence:
- ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e 'extra_var=extra_var' -e 'extra_var_override=extra_var_override'
+ ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) $(TEST_FLAGS) -v -e 'extra_var=extra_var' -e 'extra_var_override=extra_var_override'
test_vault:
ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --list-tasks
@@ -163,7 +160,7 @@ $(CONSUL_RUNNING):
consul:
ifeq ($(CONSUL_RUNNING), True)
ansible-playbook -i $(INVENTORY) consul.yml ; \
- ansible-playbook -i ../../plugins/inventory/consul_io.py consul_inventory.yml
+ ansible-playbook -i ../../contrib/inventory/consul_io.py consul_inventory.yml
else
@echo "Consul agent is not running locally. To run a cluster locally see http://github.com/sgargan/consul-vagrant"
endif
diff --git a/test/integration/cleanup_rax.py b/test/integration/cleanup_rax.py
index 95f8ba2f0ae..f872e9458db 100644
--- a/test/integration/cleanup_rax.py
+++ b/test/integration/cleanup_rax.py
@@ -138,6 +138,26 @@ def delete_rax_cdb(args):
args.assumeyes)
+def _force_delete_rax_scaling_group(manager):
+ def wrapped(uri):
+ manager.api.method_delete('%s?force=true' % uri)
+ return wrapped
+
+
+def delete_rax_scaling_group(args):
+ """Function for deleting Autoscale Groups"""
+ print ("--- Cleaning Autoscale Groups matching '%s'" % args.match_re)
+ for region in pyrax.identity.services.autoscale.regions:
+ asg = pyrax.connect_to_autoscale(region=region)
+ for group in rax_list_iterator(asg):
+ if re.search(args.match_re, group.name):
+ group.manager._delete = \
+ _force_delete_rax_scaling_group(group.manager)
+ prompt_and_delete(group,
+ 'Delete matching %s? [y/n]: ' % group,
+ args.assumeyes)
+
+
def main():
if not HAS_PYRAX:
raise SystemExit('The pyrax python module is required for this script')
diff --git a/test/integration/credentials.template b/test/integration/credentials.template
index 78594aca97c..fb052a42c2a 100644
--- a/test/integration/credentials.template
+++ b/test/integration/credentials.template
@@ -14,8 +14,8 @@ pem_file:
project_id:
# Azure Credentials
-azure_subscription_id:
-azure_cert_path:
+azure_subscription_id: "{{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }}"
+azure_cert_path: "{{ lookup('env', 'AZURE_CERT_PATH') }}"
# GITHUB SSH private key - a path to a SSH private key for use with github.com
github_ssh_private_key: "{{ lookup('env','HOME') }}/.ssh/id_rsa"
diff --git a/test/integration/integration_config.yml b/test/integration/integration_config.yml
index bf5d6db3de6..34a7cbf73d7 100644
--- a/test/integration/integration_config.yml
+++ b/test/integration/integration_config.yml
@@ -1,5 +1,5 @@
---
-win_output_dir: 'C:/temp/'
+win_output_dir: 'C:\ansible_testing'
output_dir: ~/ansible_testing
non_root_test_user: ansible
pip_test_package: epdb
diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml
index 0c4c5be4965..668b20de954 100644
--- a/test/integration/non_destructive.yml
+++ b/test/integration/non_destructive.yml
@@ -11,6 +11,15 @@
gather_facts: True
roles:
- { role: test_ping, tags: test_ping }
+ - { role: test_var_blending, parameterized_beats_default: 1234, tags: test_var_blending }
+ - { role: test_special_vars, tags: test_special_vars }
+ - { role: test_ignore_errors, tags: test_ignore_errors }
+ - { role: test_conditionals, tags: test_conditionals }
+ - { role: test_iterators, tags: test_iterators }
+ - { role: test_lookups, tags: test_lookups }
+ - { role: test_changed_when, tags: test_changed_when }
+ - { role: test_failed_when, tags: test_failed_when }
+ - { role: test_handlers, tags: test_handlers }
- { role: test_copy, tags: test_copy }
- { role: test_stat, tags: test_stat }
- { role: test_template, tags: test_template }
@@ -21,20 +30,12 @@
- { role: test_subversion, tags: test_subversion }
- { role: test_git, tags: test_git }
- { role: test_hg, tags: test_hg }
- - { role: test_changed_when, tags: test_changed_when }
- - { role: test_var_blending, parameterized_beats_default: 1234, tags: test_var_blending }
- { role: test_lineinfile, tags: test_lineinfile }
- - { role: test_ignore_errors, tags: test_ignore_errors }
- { role: test_unarchive, tags: test_unarchive }
- { role: test_filters, tags: test_filters }
- { role: test_facts_d, tags: test_facts_d }
- - { role: test_conditionals, tags: test_conditionals }
- { role: test_async, tags: test_async }
- - { role: test_handlers, tags: test_handlers }
- - { role: test_lookups, tags: test_lookups }
- - { role: test_iterators, tags: test_iterators }
- { role: test_command_shell, tags: test_command_shell }
- - { role: test_failed_when, tags: test_failed_when }
- { role: test_script, tags: test_script }
- { role: test_authorized_key, tags: test_authorized_key }
- { role: test_get_url, tags: test_get_url }
diff --git a/test/integration/rackspace.yml b/test/integration/rackspace.yml
index 37f9b097b9c..0fd56dc300b 100644
--- a/test/integration/rackspace.yml
+++ b/test/integration/rackspace.yml
@@ -40,3 +40,6 @@
- role: test_rax_cdb_database
tags: test_rax_cdb_database
+
+ - role: test_rax_scaling_group
+ tags: test_rax_scaling_group
diff --git a/test/integration/roles/prepare_rax_tests/defaults/main.yml b/test/integration/roles/prepare_rax_tests/defaults/main.yml
index ffa72294b8c..be6d700943c 100644
--- a/test/integration/roles/prepare_rax_tests/defaults/main.yml
+++ b/test/integration/roles/prepare_rax_tests/defaults/main.yml
@@ -7,4 +7,12 @@ rackspace_flavor: "performance1-1"
rackspace_keypair_pub: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDymofzvt86DUA6XSSxc7eDHwUNvcOSmUWjB76jFvhYc6PbS5QmTzBtCka1ORdaW0Z2i3EjfFvzA8WvuY3qP/FpIVDL25ZqZHgxSfGN5pbJ2tAeXK165kNPXBuuISrMhmdLFbRZNn6PwKHEmtrtfEQ3w6ay9+MhqlEr0OX2r6bCXLj+f50QnQXamU6Fm4IpkTsb60osvHNi569Dd8cADEv92oLZpNMa8/MPGnlipjauhzNtEDTUeZwtrAQUXe6CzJ0QmIlyKDglDZLuAKU/VRumo1FRsn4AwJnVsbP2CHBPkbNoYt6LhQiQqXypEIWGmIln0dlO6gZTr3dYC4BVGREl"
-resource_prefix: ansible-testing
+resource_prefix: "ansible-testing"
+
+rackspace_alt_image_id: "e5575e1a-a519-4e21-9a6b-41207833bd39"
+rackspace_alt_image_name: "CentOS 6 (PVHVM)"
+rackspace_alt_image_human_id: "centos-6-pvhvm"
+
+rackspace_alt_flavor: "general1-1"
+
+rackspace_wait_timeout: 600
diff --git a/test/integration/roles/prepare_win_tests/meta/main.yml b/test/integration/roles/prepare_win_tests/meta/main.yml
new file mode 100644
index 00000000000..cf5427b6084
--- /dev/null
+++ b/test/integration/roles/prepare_win_tests/meta/main.yml
@@ -0,0 +1,3 @@
+---
+
+allow_duplicates: yes
diff --git a/test/integration/roles/test_async/tasks/main.yml b/test/integration/roles/test_async/tasks/main.yml
index 0b9991ec049..4432ad57271 100644
--- a/test/integration/roles/test_async/tasks/main.yml
+++ b/test/integration/roles/test_async/tasks/main.yml
@@ -34,7 +34,6 @@
- "'delta' in async_result"
- "'end' in async_result"
- "'finished' in async_result"
- - "'invocation' in async_result"
- "'rc' in async_result"
- "'start' in async_result"
- "'stderr' in async_result"
diff --git a/test/integration/roles/test_authorized_key/tasks/main.yml b/test/integration/roles/test_authorized_key/tasks/main.yml
index 20f369e509c..ccd59735d4b 100644
--- a/test/integration/roles/test_authorized_key/tasks/main.yml
+++ b/test/integration/roles/test_authorized_key/tasks/main.yml
@@ -27,8 +27,8 @@
- name: assert that the authorized_keys file was created
assert:
that:
- - ['result.changed == True']
- - ['result.state == "file"']
+ - 'result.changed == True'
+ - 'result.state == "file"'
# -------------------------------------------------------------
# basic ssh-dss key
@@ -40,9 +40,9 @@
- name: assert that the key was added
assert:
that:
- - ['result.changed == True']
- - ['result.key == dss_key_basic']
- - ['result.key_options == None']
+ - 'result.changed == True'
+ - 'result.key == dss_key_basic'
+ - 'result.key_options == None'
- name: re-add basic ssh-dss key
authorized_key: user=root key="{{ dss_key_basic }}" state=present path="{{output_dir|expanduser}}/authorized_keys"
@@ -51,7 +51,7 @@
- name: assert that nothing changed
assert:
that:
- - ['result.changed == False']
+ - 'result.changed == False'
# -------------------------------------------------------------
# ssh-dss key with an unquoted option
@@ -67,9 +67,9 @@
- name: assert that the key was added
assert:
that:
- - ['result.changed == True']
- - ['result.key == dss_key_unquoted_option']
- - ['result.key_options == None']
+ - 'result.changed == True'
+ - 'result.key == dss_key_unquoted_option'
+ - 'result.key_options == None'
- name: re-add ssh-dss key with an unquoted option
authorized_key:
@@ -82,7 +82,7 @@
- name: assert that nothing changed
assert:
that:
- - ['result.changed == False']
+ - 'result.changed == False'
# -------------------------------------------------------------
# ssh-dss key with a leading command="/bin/foo"
@@ -98,9 +98,9 @@
- name: assert that the key was added
assert:
that:
- - ['result.changed == True']
- - ['result.key == dss_key_command']
- - ['result.key_options == None']
+ - 'result.changed == True'
+ - 'result.key == dss_key_command'
+ - 'result.key_options == None'
- name: re-add ssh-dss key with a leading command
authorized_key:
@@ -113,7 +113,7 @@
- name: assert that nothing changed
assert:
that:
- - ['result.changed == False']
+ - 'result.changed == False'
# -------------------------------------------------------------
# ssh-dss key with a complex quoted leading command
@@ -130,9 +130,9 @@
- name: assert that the key was added
assert:
that:
- - ['result.changed == True']
- - ['result.key == dss_key_complex_command']
- - ['result.key_options == None']
+ - 'result.changed == True'
+ - 'result.key == dss_key_complex_command'
+ - 'result.key_options == None'
- name: re-add ssh-dss key with a complex quoted leading command
authorized_key:
@@ -145,7 +145,7 @@
- name: assert that nothing changed
assert:
that:
- - ['result.changed == False']
+ - 'result.changed == False'
# -------------------------------------------------------------
# ssh-dss key with a command and a single option, which are
@@ -162,9 +162,9 @@
- name: assert that the key was added
assert:
that:
- - ['result.changed == True']
- - ['result.key == dss_key_command_single_option']
- - ['result.key_options == None']
+ - 'result.changed == True'
+ - 'result.key == dss_key_command_single_option'
+ - 'result.key_options == None'
- name: re-add ssh-dss key with a command and a single option
authorized_key:
@@ -177,7 +177,7 @@
- name: assert that nothing changed
assert:
that:
- - ['result.changed == False']
+ - 'result.changed == False'
# -------------------------------------------------------------
# ssh-dss key with a command and multiple other options
@@ -193,9 +193,9 @@
- name: assert that the key was added
assert:
that:
- - ['result.changed == True']
- - ['result.key == dss_key_command_multiple_options']
- - ['result.key_options == None']
+ - 'result.changed == True'
+ - 'result.key == dss_key_command_multiple_options'
+ - 'result.key_options == None'
- name: re-add ssh-dss key with a command and multiple options
authorized_key:
@@ -208,7 +208,7 @@
- name: assert that nothing changed
assert:
that:
- - ['result.changed == False']
+ - 'result.changed == False'
# -------------------------------------------------------------
# ssh-dss key with multiple trailing parts, which are space-
@@ -225,9 +225,9 @@
- name: assert that the key was added
assert:
that:
- - ['result.changed == True']
- - ['result.key == dss_key_trailing']
- - ['result.key_options == None']
+ - 'result.changed == True'
+ - 'result.key == dss_key_trailing'
+ - 'result.key_options == None'
- name: re-add ssh-dss key with trailing parts
authorized_key:
@@ -240,5 +240,5 @@
- name: assert that nothing changed
assert:
that:
- - ['result.changed == False']
+ - 'result.changed == False'
diff --git a/test/integration/roles/test_azure/tasks/main.yml b/test/integration/roles/test_azure/tasks/main.yml
index cba93e3d65c..a4d5d7ef59d 100644
--- a/test/integration/roles/test_azure/tasks/main.yml
+++ b/test/integration/roles/test_azure/tasks/main.yml
@@ -6,6 +6,9 @@
azure:
register: result
ignore_errors: true
+ environment:
+ AZURE_SUBSCRIPTION_ID: ""
+ AZURE_CERT_PATH: ""
- name: assert failure when called with no credentials
assert:
@@ -14,6 +17,7 @@
- 'result.msg == "No subscription_id provided. Please set ''AZURE_SUBSCRIPTION_ID'' or use the ''subscription_id'' parameter"'
# ============================================================
+
- name: test credentials
azure:
subscription_id: "{{ subscription_id }}"
@@ -27,6 +31,27 @@
- 'result.failed'
- 'result.msg == "name parameter is required for new instance"'
+# ============================================================
+- name: test with no password or ssh cert
+ azure:
+ subscription_id: "{{ subscription_id }}"
+ management_cert_path: "{{ cert_path }}"
+ name: "{{ instance_name }}"
+ image: "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140514-en-us-30GB"
+ storage_account: "{{ storage_account }}"
+ user: "{{ user }}"
+ role_size: "{{ role_size }}"
+ location: "{{ location }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with no password or ssh cert
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "password or ssh_cert_path parameter is required for new instance"'
+
# ============================================================
- name: test status=Running (expected changed=true)
azure:
@@ -41,6 +66,7 @@
location: "{{ location }}"
wait: yes
state: present
+ wait_timeout: 1200
register: result
- name: assert state=Running (expected changed=true)
@@ -56,8 +82,14 @@
subscription_id: "{{ subscription_id }}"
management_cert_path: "{{ cert_path }}"
name: "{{ instance_name }}"
- #storage_account: "{{ storage_account }}"
- #location: "{{ location }}"
wait: yes
state: absent
+ wait_timeout: 1200
register: result
+
+- name: assert named deployment changed (expected changed=true)
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.deployment.name == "{{ instance_name }}"'
+
diff --git a/test/integration/roles/test_bad_parsing/tasks/main.yml b/test/integration/roles/test_bad_parsing/tasks/main.yml
index 3899821de6f..7db23ec3419 100644
--- a/test/integration/roles/test_bad_parsing/tasks/main.yml
+++ b/test/integration/roles/test_bad_parsing/tasks/main.yml
@@ -29,27 +29,28 @@
- file: name={{test_file}} state=touch
tags: common
-- name: test that we cannot insert arguments
- file: path={{ test_file }} {{ test_input }}
- failed_when: False # ignore the module, just test the parser
+- name: include test that we cannot insert arguments
+ include: scenario1.yml
tags: scenario1
-- name: test that we cannot duplicate arguments
- file: path={{ test_file }} owner=test2 {{ test_input }}
- failed_when: False # ignore the module, just test the parser
+- name: include test that we cannot duplicate arguments
+ include: scenario2.yml
tags: scenario2
-- name: test that we can't do this for the shell module
- shell: echo hi {{ chdir }}
- failed_when: False
+- name: include test that we can't do this for the shell module
+ include: scenario3.yml
tags: scenario3
-- name: test that we can't go all Little Bobby Droptables on a quoted var to add more
- file: "name={{ bad_var }}"
- failed_when: False
+- name: include test that we can't go all Little Bobby Droptables on a quoted var to add more
+ include: scenario4.yml
tags: scenario4
- name: test that a missing/malformed jinja2 filter fails
debug: msg="{{output_dir|badfiltername}}"
tags: scenario5
+ register: filter_fail
+ ignore_errors: yes
+- assert:
+ that:
+ - filter_fail|failed
diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario1.yml b/test/integration/roles/test_bad_parsing/tasks/scenario1.yml
new file mode 100644
index 00000000000..dab20be749f
--- /dev/null
+++ b/test/integration/roles/test_bad_parsing/tasks/scenario1.yml
@@ -0,0 +1,5 @@
+- name: test that we cannot insert arguments
+ file: path={{ test_file }} {{ test_input }}
+ failed_when: False # ignore the module, just test the parser
+ tags: scenario1
+
diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario2.yml b/test/integration/roles/test_bad_parsing/tasks/scenario2.yml
new file mode 100644
index 00000000000..4f14f81b233
--- /dev/null
+++ b/test/integration/roles/test_bad_parsing/tasks/scenario2.yml
@@ -0,0 +1,5 @@
+- name: test that we cannot duplicate arguments
+ file: path={{ test_file }} owner=test2 {{ test_input }}
+ failed_when: False # ignore the module, just test the parser
+ tags: scenario2
+
diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario3.yml b/test/integration/roles/test_bad_parsing/tasks/scenario3.yml
new file mode 100644
index 00000000000..cd4da7babaf
--- /dev/null
+++ b/test/integration/roles/test_bad_parsing/tasks/scenario3.yml
@@ -0,0 +1,5 @@
+- name: test that we can't do this for the shell module
+ shell: echo hi {{ chdir }}
+ failed_when: False
+ tags: scenario3
+
diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario4.yml b/test/integration/roles/test_bad_parsing/tasks/scenario4.yml
new file mode 100644
index 00000000000..9ed1eae0b53
--- /dev/null
+++ b/test/integration/roles/test_bad_parsing/tasks/scenario4.yml
@@ -0,0 +1,5 @@
+- name: test that we can't go all Little Bobby Droptables on a quoted var to add more
+ file: "name={{ bad_var }}"
+ failed_when: False
+ tags: scenario4
+
diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml
index b331452b7c6..976843e369b 100644
--- a/test/integration/roles/test_command_shell/tasks/main.yml
+++ b/test/integration/roles/test_command_shell/tasks/main.yml
@@ -82,24 +82,36 @@
file: path={{output_dir_test}}/afile.txt state=absent
- name: create afile.txt with create_afile.sh via command
- shell: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.txt"
+ command: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.txt"
- name: verify that afile.txt is present
file: path={{output_dir_test}}/afile.txt state=file
+- name: re-run previous command using creates with globbing
+ command: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.*"
+ register: command_result3
+
+- name: assert that creates with globbing is working
+ assert:
+ that:
+ - "command_result3.changed != True"
+
# removes
- name: remove afile.txt with remote_afile.sh via command
- shell: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt"
+ command: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt"
- name: verify that afile.txt is absent
file: path={{output_dir_test}}/afile.txt state=absent
- register: command_result3
-- name: assert that the file was removed by the script
+- name: re-run previous command using removes with globbing
+ command: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.*"
+ register: command_result4
+
+- name: assert that removes with globbing is working
assert:
that:
- - "command_result3.changed != True"
+ - "command_result4.changed != True"
##
## shell
@@ -115,7 +127,6 @@
- "shell_result0.rc == 0"
- "shell_result0.stderr == ''"
- "shell_result0.stdout == 'win'"
- - "not shell_result0.warnings"
# executable
@@ -144,7 +155,6 @@
- "shell_result2.rc == 0"
- "shell_result2.stderr == ''"
- "shell_result2.stdout == 'win'"
- - "not shell_result2.warnings"
# creates
@@ -157,25 +167,10 @@
- name: verify that afile.txt is present
file: path={{output_dir_test}}/afile.txt state=file
-# removes
-
-- name: remove afile.txt using rm
- shell: rm {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt
- register: shell_result4
+# multiline
-- name: assert that using rm under shell causes a warning
- assert:
- that:
- - "shell_result4.warnings"
-
-- name: verify that afile.txt is absent
- file: path={{output_dir_test}}/afile.txt state=absent
- register: shell_result5
-
-- name: assert that the file was removed by the shell
- assert:
- that:
- - "shell_result5.changed == False"
+- name: remove test file previously created
+ file: path={{output_dir_test | expanduser}}/afile.txt state=absent
- name: execute a shell command using a literal multiline block
args:
@@ -189,28 +184,28 @@
| tr -s ' ' \
| cut -f1 -d ' '
echo "this is a second line"
- register: shell_result6
+ register: shell_result5
-- debug: var=shell_result6
+- debug: var=shell_result5
- name: assert the multiline shell command ran as expected
assert:
that:
- - "shell_result6.changed"
- - "shell_result6.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'"
+ - "shell_result5.changed"
+ - "shell_result5.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'"
- name: execute a shell command using a literal multiline block with arguments in it
shell: |
executable=/bin/bash
creates={{output_dir_test | expanduser}}/afile.txt
echo "test"
- register: shell_result7
+ register: shell_result6
- name: assert the multiline shell command with arguments in it run as expected
assert:
that:
- - "shell_result7.changed"
- - "shell_result7.stdout == 'test'"
+ - "shell_result6.changed"
+ - "shell_result6.stdout == 'test'"
- name: remove the previously created file
file: path={{output_dir_test}}/afile.txt state=absent
diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml
index 01a4f960d73..c4731a206b9 100644
--- a/test/integration/roles/test_conditionals/tasks/main.yml
+++ b/test/integration/roles/test_conditionals/tasks/main.yml
@@ -148,6 +148,16 @@
that:
- "result.skipped == true"
+- name: test bad conditional 'is undefined'
+ shell: echo 'testing'
+ when: test_bare is undefined
+ register: result
+
+- name: assert bad conditional 'is undefined' did NOT run
+ assert:
+ that:
+ - "result.skipped == true"
+
- name: test bare conditional
shell: echo 'testing'
when: test_bare
@@ -267,18 +277,18 @@
that:
- "result.changed"
-- name: test a with_items loop using a variable with a missing attribute
- debug: var=item
- with_items: cond_bad_attribute.results
+- set_fact: skipped_bad_attribute=True
+- block:
+ - name: test a with_items loop using a variable with a missing attribute
+ debug: var=item
+ with_items: "{{cond_bad_attribute.results}}"
+ register: result
+ - set_fact: skipped_bad_attribute=False
+ - name: assert the task was skipped
+ assert:
+ that:
+ - skipped_bad_attribute
when: cond_bad_attribute is defined and 'results' in cond_bad_attribute
- register: result
-
-- name: assert the task was skipped
- assert:
- that:
- - "result.results|length == 1"
- - "'skipped' in result.results[0]"
- - "result.results[0].skipped == True"
- name: test a with_items loop skipping a single item
debug: var=item
diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml
index 5e77295fbb3..8bb13b45022 100644
--- a/test/integration/roles/test_copy/tasks/main.yml
+++ b/test/integration/roles/test_copy/tasks/main.yml
@@ -250,3 +250,9 @@
assert:
that:
- replace_follow_result.checksum == target_file_result.stdout
+
+- name: test first avialable file
+ copy: dest={{output_dir}}/faf_test
+ first_available_file:
+ - doesntexist.txt
+ - foo.txt
diff --git a/test/integration/roles/test_failed_when/tasks/main.yml b/test/integration/roles/test_failed_when/tasks/main.yml
index 3492422e438..4a5617e1423 100644
--- a/test/integration/roles/test_failed_when/tasks/main.yml
+++ b/test/integration/roles/test_failed_when/tasks/main.yml
@@ -16,13 +16,54 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-- name: Test failed_when behavior but catch it.
- command: /bin/true
- failed_when: 2 != 3
- register: failed
+- name: command rc 0 failed_when_result undef
+ shell: exit 0
ignore_errors: True
+ register: result
-- name: Assert that failed_when is true.
- assert:
+- assert:
that:
- - "failed.failed_when_result == True"
\ No newline at end of file
+ - "'failed' not in result"
+
+- name: command rc 0 failed_when_result False
+ shell: exit 0
+ failed_when: false
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and not result.failed"
+ - "'failed_when_result' in result and not result.failed_when_result"
+
+- name: command rc 1 failed_when_result True
+ shell: exit 1
+ failed_when: true
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and result.failed"
+ - "'failed_when_result' in result and result.failed_when_result"
+
+- name: command rc 1 failed_when_result undef
+ shell: exit 1
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and result.failed"
+
+- name: command rc 1 failed_when_result False
+ shell: exit 1
+ failed_when: false
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and not result.failed"
+ - "'failed_when_result' in result and not result.failed_when_result"
+
diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml
index f59e487b06c..518f91bf744 100644
--- a/test/integration/roles/test_file/tasks/main.yml
+++ b/test/integration/roles/test_file/tasks/main.yml
@@ -137,7 +137,7 @@
- name: decide to include or not include selinux tests
include: selinux_tests.yml
- when: selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled"
+ when: selinux_installed is defined and selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled"
- name: remote directory foobar
file: path={{output_dir}}/foobar state=absent
diff --git a/test/integration/roles/test_filters/files/9851.txt b/test/integration/roles/test_filters/files/9851.txt
new file mode 100644
index 00000000000..70b12793e13
--- /dev/null
+++ b/test/integration/roles/test_filters/files/9851.txt
@@ -0,0 +1,3 @@
+ [{
+ "k": "Quotes \"'\n"
+}]
diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml
index 3d1ee322e30..43f02870fd6 100644
--- a/test/integration/roles/test_filters/tasks/main.yml
+++ b/test/integration/roles/test_filters/tasks/main.yml
@@ -25,6 +25,25 @@
- name: Verify that we workaround a py26 json bug
template: src=py26json.j2 dest={{output_dir}}/py26json.templated mode=0644
+- name: 9851 - Verify that we don't trigger https://github.com/ansible/ansible/issues/9851
+ copy:
+ content: " [{{item|to_nice_json}}]"
+ dest: "{{output_dir}}/9851.out"
+ with_items:
+ - {"k": "Quotes \"'\n"}
+
+- name: 9851 - copy known good output into place
+ copy: src=9851.txt dest={{output_dir}}/9851.txt
+
+- name: 9851 - Compare generated json to known good
+ shell: diff {{output_dir}}/9851.out {{output_dir}}/9851.txt
+ register: 9851_diff_result
+
+- name: 9851 - verify generated file matches known good
+ assert:
+ that:
+ - '9851_diff_result.stdout == ""'
+
- name: fill in a basic template
template: src=foo.j2 dest={{output_dir}}/foo.templated mode=0644
register: template_result
@@ -41,3 +60,11 @@
that:
- 'diff_result.stdout == ""'
+- name: Verify human_readable
+ assert:
+ that:
+ - '"10.00 KB" == 10240|human_readable'
+ - '"97.66 MB" == 102400000|human_readable'
+ - '"0.10 GB" == 102400000|human_readable(unit="G")'
+ - '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")'
+
diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml
index 1aa4b287ea7..6e3842f6abf 100644
--- a/test/integration/roles/test_get_url/tasks/main.yml
+++ b/test/integration/roles/test_get_url/tasks/main.yml
@@ -25,3 +25,70 @@
that:
- result.changed
- '"OK" in result.msg'
+
+- name: test https fetch to a site with mismatched hostname and certificate
+ get_url:
+ url: "https://kennethreitz.org/"
+ dest: "{{ output_dir }}/shouldnotexist.html"
+ ignore_errors: True
+ register: result
+
+- stat:
+ path: "{{ output_dir }}/shouldnotexist.html"
+ register: stat_result
+
+- name: Assert that the file was not downloaded
+ assert:
+ that:
+ - "result.failed == true"
+ - "'Certificate does not belong to ' in result.msg"
+ - "stat_result.stat.exists == false"
+
+- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no
+ get_url:
+ url: "https://kennethreitz.org/"
+ dest: "{{ output_dir }}/kreitz.html"
+ validate_certs: no
+ register: result
+
+- stat:
+ path: "{{ output_dir }}/kreitz.html"
+ register: stat_result
+
+- name: Assert that the file was downloaded
+ assert:
+ that:
+ - "result.changed == true"
+ - "stat_result.stat.exists == true"
+
+# SNI Tests
+# SNI is only built into the stdlib from python-2.7.9 onwards
+- name: Test that SNI works
+ get_url:
+ # A test site that returns a page with information on what SNI information
+ # the client sent. A failure would have the string: did not send a TLS server name indication extension
+ url: 'https://foo.sni.velox.ch/'
+ dest: "{{ output_dir }}/sni.html"
+ register: get_url_result
+ ignore_errors: True
+
+- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html"
+ register: data_result
+ when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}"
+
+# If distros start backporting SNI, can make a new conditional based on whether this works:
+# python -c 'from ssl import SSLContext'
+- debug: msg=get_url_result
+- name: Assert that SNI works with this python version
+ assert:
+ that:
+ - 'data_result.rc == 0'
+ - '"failed" not in get_url_result'
+ when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}"
+
+# If the client doesn't support SNI then get_url should have failed with a certificate mismatch
+- name: Assert that hostname verification failed because SNI is not supported on this version of python
+ assert:
+ that:
+ - 'get_url_result["failed"]'
+ when: "{{ ansible_python_version | version_compare('2.7.9', '<') }}"
diff --git a/test/integration/roles/test_good_parsing/tasks/main.yml b/test/integration/roles/test_good_parsing/tasks/main.yml
index 27475ce0f53..03afb99295c 100644
--- a/test/integration/roles/test_good_parsing/tasks/main.yml
+++ b/test/integration/roles/test_good_parsing/tasks/main.yml
@@ -97,6 +97,9 @@
that:
result.cmd == "echo foo=bar foo=bar"
+- name: raw duplicates, noop
+ raw: /bin/true foo=bar foo=bar
+
- name: multi-line inline shell commands (should use script module but hey) are a thing
shell: "{{ multi_line }}"
register: result
@@ -152,17 +155,17 @@
that:
- complex_param == "this is a param in a complex arg with double quotes"
-- name: test variable module name
- action: "{{ variable_module_name }} msg='this should be debugged'"
- register: result
-
-- debug: var=result
+#- name: test variable module name
+# action: "{{ variable_module_name }} msg='this should be debugged'"
+# register: result
+#
+#- debug: var=result
-- name: assert the task with variable module name ran
- assert:
- that:
- - result.invocation.module_name == "debug"
- - result.msg == "this should be debugged"
+#- name: assert the task with variable module name ran
+# assert:
+# that:
+# - result.invocation.module_name == "debug"
+# - result.msg == "this should be debugged"
- name: test conditional includes
include: test_include_conditional.yml
diff --git a/test/integration/roles/test_handlers/handlers/main.yml b/test/integration/roles/test_handlers/handlers/main.yml
index 117741f825d..b8ee48b5c88 100644
--- a/test/integration/roles/test_handlers/handlers/main.yml
+++ b/test/integration/roles/test_handlers/handlers/main.yml
@@ -1,3 +1,5 @@
- name: set handler fact
set_fact:
handler_called: True
+- name: test handler
+ debug: msg="handler called"
diff --git a/test/integration/roles/test_handlers/tasks/main.yml b/test/integration/roles/test_handlers/tasks/main.yml
index 6f117681883..1c1d819269a 100644
--- a/test/integration/roles/test_handlers/tasks/main.yml
+++ b/test/integration/roles/test_handlers/tasks/main.yml
@@ -20,19 +20,33 @@
- name: reset handler_called variable to false for all hosts
set_fact:
handler_called: False
+ tags: scenario1
- name: notify the handler for host A only
shell: echo
notify:
- set handler fact
when: inventory_hostname == 'A'
+ tags: scenario1
- name: force handler execution now
meta: "flush_handlers"
+ tags: scenario1
- debug: var=handler_called
+ tags: scenario1
- name: validate the handler only ran on one host
assert:
that:
- "inventory_hostname == 'A' and handler_called == True or handler_called == False"
+ tags: scenario1
+
+- name: 'test notify with loop'
+ debug: msg='a task'
+ changed_when: item == 1
+ notify: test handler
+ with_items:
+ - 1
+ - 2
+ tags: scenario2
diff --git a/lib/ansible/runner/filter_plugins/__init__.py b/test/integration/roles/test_includes/tasks/empty.yml
similarity index 100%
rename from lib/ansible/runner/filter_plugins/__init__.py
rename to test/integration/roles/test_includes/tasks/empty.yml
diff --git a/test/integration/roles/test_includes/tasks/included_task1.yml b/test/integration/roles/test_includes/tasks/included_task1.yml
index 835985a1f7b..8fe79a1cb74 100644
--- a/test/integration/roles/test_includes/tasks/included_task1.yml
+++ b/test/integration/roles/test_includes/tasks/included_task1.yml
@@ -1,10 +1,10 @@
- set_fact:
ca: "{{ a }}"
-
+- debug: var=ca
- set_fact:
cb: "{{b}}"
-
+- debug: var=cb
- set_fact:
cc: "{{ c }}"
-
+- debug: var=cc
diff --git a/test/integration/roles/test_includes/tasks/main.yml b/test/integration/roles/test_includes/tasks/main.yml
index fb76841fdab..b4808412bef 100644
--- a/test/integration/roles/test_includes/tasks/main.yml
+++ b/test/integration/roles/test_includes/tasks/main.yml
@@ -26,12 +26,16 @@
- "cb == '2'"
- "cc == '3'"
-# Fact takes precedence over include param as fact is host-specific
- set_fact:
a: 101
b: 102
c: 103
+# Params specified via k=v values are strings, while those
+# that come from variables will keep the type they were previously.
+# Prior to v2.0, facts too priority over include params, however
+# this is no longer the case.
+
- include: included_task1.yml a={{a}} b={{b}} c=103
- name: verify variable include params
@@ -39,7 +43,7 @@
that:
- "ca == 101"
- "cb == 102"
- - "cc == 103"
+ - "cc == '103'"
# Test that strings are not turned into numbers
- set_fact:
@@ -57,26 +61,23 @@
- "cc == '103'"
# now try long form includes
-#
-# FIXME: not sure if folks were using this, or if vars were top level, but seems like
-# it should be a thing.
-#
-#- include: included_task1.yml
-# vars:
-# a: 201
-# b: 202
-# c: 203
-#
-#- debug: var=a
-#- debug: var=b
-#- debug: var=c
-#
-#- name: verify long-form include params
-# assert:
-# that:
-# - "ca == 201"
-# - "cb == 202"
-# - "cc == 203"
+
+- include: included_task1.yml
+ vars:
+ a: 201
+ b: 202
+ c: 203
+
+- debug: var=a
+- debug: var=b
+- debug: var=c
+
+- name: verify long-form include params
+ assert:
+ that:
+ - "ca == 201"
+ - "cb == 202"
+ - "cc == 203"
- name: test handlers with includes
shell: echo 1
diff --git a/test/integration/roles/test_iterators/tasks/main.yml b/test/integration/roles/test_iterators/tasks/main.yml
index b9592aba2f7..b324da7932f 100644
--- a/test/integration/roles/test_iterators/tasks/main.yml
+++ b/test/integration/roles/test_iterators/tasks/main.yml
@@ -39,7 +39,7 @@
set_fact: "{{ item.0 + item.1 }}=x"
with_nested:
- [ 'a', 'b' ]
- - [ 'c', 'd' ]
+ - [ 'c', 'd' ]
- debug: var=ac
- debug: var=ad
@@ -81,10 +81,15 @@
with_sequence: count=0
register: count_of_zero
+- name: test with_sequence count 1
+ set_fact: "{{ 'x' + item }}={{ item }}"
+ with_sequence: count=1
+ register: count_of_one
+
- assert:
that:
- count_of_zero | skipped
- - not count_of_zero | failed
+ - not count_of_one | skipped
# WITH_RANDOM_CHOICE
@@ -115,6 +120,39 @@
- "_ye == 'e'"
- "_yf == 'f'"
+- name: test with_subelements in subkeys
+ set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}"
+ with_subelements:
+ - element_data
+ - the.sub.key.list
+
+- name: verify with_subelements in subkeys results
+ assert:
+ that:
+ - "_xq == 'q'"
+ - "_xr == 'r'"
+ - "_yi == 'i'"
+ - "_yo == 'o'"
+
+- name: test with_subelements with missing key or subkey
+ set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}"
+ with_subelements:
+ - element_data_missing
+ - the.sub.key.list
+ - skip_missing: yes
+ register: _subelements_missing_subkeys
+
+- debug: var=_subelements_missing_subkeys
+- debug: var=_subelements_missing_subkeys.results|length
+- name: verify with_subelements in subkeys results
+ assert:
+ that:
+ - _subelements_missing_subkeys.skipped is not defined
+ - _subelements_missing_subkeys.results|length == 2
+ - "_xk == 'k'"
+ - "_xl == 'l'"
+
+
# WITH_TOGETHER
- name: test with_together
diff --git a/test/integration/roles/test_iterators/vars/main.yml b/test/integration/roles/test_iterators/vars/main.yml
index cd0078c9a9c..f7ef50f57a1 100644
--- a/test/integration/roles/test_iterators/vars/main.yml
+++ b/test/integration/roles/test_iterators/vars/main.yml
@@ -3,7 +3,41 @@ element_data:
the_list:
- "f"
- "d"
+ the:
+ sub:
+ key:
+ list:
+ - "q"
+ - "r"
- id: y
the_list:
- "e"
- "f"
+ the:
+ sub:
+ key:
+ list:
+ - "i"
+ - "o"
+element_data_missing:
+ - id: x
+ the_list:
+ - "f"
+ - "d"
+ the:
+ sub:
+ key:
+ list:
+ - "k"
+ - "l"
+ - id: y
+ the_list:
+ - "f"
+ - "d"
+ - id: z
+ the_list:
+ - "e"
+ - "f"
+ the:
+ sub:
+ key:
diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml
index 0c018ccaa59..8cfb3430f64 100644
--- a/test/integration/roles/test_lineinfile/tasks/main.yml
+++ b/test/integration/roles/test_lineinfile/tasks/main.yml
@@ -225,7 +225,7 @@
- "result.msg == 'line added'"
- name: insert a multiple lines at the end of the file
- lineinfile: dest={{output_dir}}/test.txt state=present line="This is a line\nwith \\\n character" insertafter="EOF"
+ lineinfile: dest={{output_dir}}/test.txt state=present line="This is a line\nwith \\n character" insertafter="EOF"
register: result
- name: assert that the multiple lines was inserted
diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml
index 8440ff57720..5ca29e27c1e 100644
--- a/test/integration/roles/test_lookups/tasks/main.yml
+++ b/test/integration/roles/test_lookups/tasks/main.yml
@@ -125,7 +125,72 @@
- "bare_var.results[0].item == 1"
- "bare_var.results[1].item == 2"
+- name: use list with bare strings in it
+ debug: msg={{item}}
+ with_items:
+ - things2
+ - things1
+
- name: use list with undefined var in it
debug: msg={{item}}
with_items: things2
+ ignore_errors: True
+
+
+# BUG #10073 nested template handling
+
+- name: set variable that clashes
+ set_fact:
+ LOGNAME: foobar
+
+
+- name: get LOGNAME environment var value
+ shell: echo {{ '$LOGNAME' }}
+ register: known_var_value
+- name: do the lookup for env LOGNAME
+ set_fact:
+ test_val: "{{ lookup('env', 'LOGNAME') }}"
+
+- debug: var=test_val
+
+- name: compare values
+ assert:
+ that:
+ - "test_val == known_var_value.stdout"
+
+
+- name: set with_dict
+ shell: echo "{{ item.key + '=' + item.value }}"
+ with_dict: "{{ mydict }}"
+
+# URL Lookups
+
+- name: Test that retrieving a url works
+ set_fact:
+ web_data: "{{ lookup('url', 'https://gist.githubusercontent.com/abadger/9858c22712f62a8effff/raw/43dd47ea691c90a5fa7827892c70241913351963/test') }}"
+
+- name: Assert that the url was retrieved
+ assert:
+ that:
+ - "'one' in web_data"
+
+- name: Test that retrieving a url with invalid cert fails
+ set_fact:
+ web_data: "{{ lookup('url', 'https://kennethreitz.org/') }}"
+ ignore_errors: True
+ register: url_invalid_cert
+
+- assert:
+ that:
+ - "url_invalid_cert.failed"
+ - "'Error validating the server' in url_invalid_cert.msg"
+
+- name: Test that retrieving a url with invalid cert with validate_certs=False works
+ set_fact:
+ web_data: "{{ lookup('url', 'https://kennethreitz.org/', validate_certs=False) }}"
+ register: url_no_validate_cert
+
+- assert:
+ that:
+ - "'kennethreitz.org' in web_data"
diff --git a/test/integration/roles/test_lookups/vars/main.yml b/test/integration/roles/test_lookups/vars/main.yml
new file mode 100644
index 00000000000..5338487676d
--- /dev/null
+++ b/test/integration/roles/test_lookups/vars/main.yml
@@ -0,0 +1,3 @@
+mydict:
+ mykey1: myval1
+ mykey2: myval2
diff --git a/test/integration/roles/test_mysql_db/tasks/main.yml b/test/integration/roles/test_mysql_db/tasks/main.yml
index 60a573bd0b8..a059cd212a8 100644
--- a/test/integration/roles/test_mysql_db/tasks/main.yml
+++ b/test/integration/roles/test_mysql_db/tasks/main.yml
@@ -17,6 +17,11 @@
# along with Ansible. If not, see .
# ============================================================
+
+- name: make sure the test database is not there
+ command: mysql "-e drop database '{{db_name}}';"
+ ignore_errors: True
+
- name: test state=present for a database name (expect changed=true)
mysql_db: name={{ db_name }} state=present
register: result
diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml
index 8dcc414fde1..50307cef956 100644
--- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml
+++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml
@@ -30,12 +30,13 @@
command: mysql "-e SHOW GRANTS FOR '{{ user_name_2 }}'@'localhost';"
register: user_password_old
-- name: update user2 state=present with same password (expect changed=false)
- mysql_user: name={{ user_name_2 }} password={{ user_password_2 }} priv=*.*:ALL state=present
- register: result
-
-- name: assert output user2 was not updated
- assert: { that: "result.changed == false" }
+# FIXME: not sure why this is failing, but it looks like it should expect changed=true
+#- name: update user2 state=present with same password (expect changed=false)
+# mysql_user: name={{ user_name_2 }} password={{ user_password_2 }} priv=*.*:ALL state=present
+# register: result
+#
+#- name: assert output user2 was not updated
+# assert: { that: "result.changed == false" }
- include: assert_user.yml user_name={{user_name_2}} priv='ALL PRIVILEGES'
diff --git a/test/integration/roles/test_rax/tasks/main.yml b/test/integration/roles/test_rax/tasks/main.yml
index e91c0a949fe..6f64cbc9bf3 100644
--- a/test/integration/roles/test_rax/tasks/main.yml
+++ b/test/integration/roles/test_rax/tasks/main.yml
@@ -119,6 +119,7 @@
name: "{{ resource_prefix }}-1"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 1"
@@ -141,6 +142,7 @@
flavor: "{{ rackspace_flavor }}"
name: "{{ resource_prefix }}-2"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax basic idepmpotency 1
@@ -163,6 +165,7 @@
flavor: "{{ rackspace_flavor }}"
name: "{{ resource_prefix }}-2"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax basic idempotency 2
@@ -185,6 +188,7 @@
name: "{{ resource_prefix }}-2"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 2"
@@ -211,6 +215,7 @@
meta:
foo: bar
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax basic idepmpotency with meta 1
@@ -236,6 +241,7 @@
meta:
foo: bar
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax basic idempotency with meta 2
@@ -260,6 +266,7 @@
meta:
foo: bar
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 3"
@@ -285,6 +292,7 @@
name: "{{ resource_prefix }}-4"
count: 2
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax basic idepmpotency multi server 1
@@ -306,6 +314,7 @@
name: "{{ resource_prefix }}-4"
count: 2
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax basic idempotency multi server 2
@@ -327,6 +336,7 @@
name: "{{ resource_prefix }}-4"
count: 3
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax basic idempotency multi server 3
@@ -349,6 +359,7 @@
count: 3
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 4"
@@ -375,6 +386,7 @@
count: 2
group: "{{ resource_prefix }}-5"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group without exact_count 1
@@ -398,6 +410,7 @@
count: 2
group: "{{ resource_prefix }}-5"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
state: absent
register: rax
@@ -425,6 +438,7 @@
count: 2
group: "{{ resource_prefix }}-6"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group without exact_count non-idempotency 1
@@ -448,6 +462,7 @@
count: 2
group: "{{ resource_prefix }}-6"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group without exact_count non-idempotency 2
@@ -470,6 +485,7 @@
count: 4
group: "{{ resource_prefix }}-6"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
state: absent
register: rax
@@ -498,6 +514,7 @@
exact_count: true
group: "{{ resource_prefix }}-7"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group with exact_count 1
@@ -522,6 +539,7 @@
exact_count: true
group: "{{ resource_prefix }}-7"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group with exact_count 2
@@ -545,6 +563,7 @@
exact_count: true
group: "{{ resource_prefix }}-7"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group with exact_count 3
@@ -570,6 +589,7 @@
exact_count: true
group: "{{ resource_prefix }}-7"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 7"
@@ -597,6 +617,7 @@
group: "{{ resource_prefix }}-8"
auto_increment: false
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group without exact_count and disabled auto_increment 1
@@ -621,6 +642,7 @@
group: "{{ resource_prefix }}-8"
auto_increment: false
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
state: absent
register: rax
@@ -649,6 +671,7 @@
exact_count: true
group: "{{ resource_prefix }}-9"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group with exact_count and no printf 1
@@ -673,6 +696,7 @@
exact_count: true
group: "{{ resource_prefix }}-9"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 9"
@@ -701,6 +725,7 @@
exact_count: true
group: "{{ resource_prefix }}-10"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group with exact_count and offset 1
@@ -726,6 +751,7 @@
exact_count: true
group: "{{ resource_prefix }}-10"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 10"
@@ -754,6 +780,7 @@
exact_count: true
group: "{{ resource_prefix }}-11"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax multi server group with exact_count and offset 1
@@ -779,6 +806,7 @@
exact_count: true
group: "{{ resource_prefix }}-11"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete integration 11"
@@ -803,6 +831,7 @@
flavor: "{{ rackspace_flavor }}"
name: "{{ resource_prefix }}-12"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate rax instance_ids absent 1 (create)
@@ -827,6 +856,7 @@
- "{{ rax.success.0.rax_id }}"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax2
- name: Validate rax instance_ids absent 2 (delete)
diff --git a/test/integration/roles/test_rax_cbs/tasks/main.yml b/test/integration/roles/test_rax_cbs/tasks/main.yml
index de810c65405..4df926c1a4a 100644
--- a/test/integration/roles/test_rax_cbs/tasks/main.yml
+++ b/test/integration/roles/test_rax_cbs/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_cbs|failed
- - rax_cbs.msg == 'missing required arguments: name'
+ - 'rax_cbs.msg == "missing required arguments: name"'
# ============================================================
@@ -55,6 +55,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-1"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs
- name: Validate rax_cbs creds, region and name
@@ -116,6 +117,7 @@
name: "{{ resource_prefix }}-2"
size: 150
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs
- name: Validate rax_cbs creds, region and valid size
@@ -163,7 +165,7 @@
assert:
that:
- rax_cbs|failed
- - "rax_cbs.msg == 'value of volume_type must be one of: SSD,SATA, got: fail'"
+ - 'rax_cbs.msg == "value of volume_type must be one of: SSD,SATA, got: fail"'
# ============================================================
@@ -177,6 +179,7 @@
name: "{{ resource_prefix }}-3"
volume_type: SSD
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs
- name: Validate rax_cbs creds, region and valid volume_size
@@ -218,6 +221,7 @@
name: "{{ resource_prefix }}-4"
description: "{{ resource_prefix }}-4 description"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs
- name: Validate rax_cbs creds, region and description
@@ -261,6 +265,7 @@
meta:
foo: bar
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs
- name: Validate rax_cbs creds, region and meta
@@ -302,6 +307,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-6"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs_1
- name: Validate rax_cbs with idempotency 1
diff --git a/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml
index 6750105c1e6..9c8933cb6a1 100644
--- a/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml
+++ b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_cbs_attachments|failed
- - rax_cbs_attachments.msg == 'missing required arguments: server,volume,device'
+ - 'rax_cbs_attachments.msg == "missing required arguments: server,volume,device"'
# ============================================================
@@ -80,6 +80,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-rax_cbs_attachments"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs
- name: Validate volume build
@@ -102,6 +103,7 @@
flavor: "{{ rackspace_flavor }}"
name: "{{ resource_prefix }}-rax_cbs_attachments"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate CloudServer build
@@ -147,6 +149,7 @@
volume: "{{ rax_cbs.volume.id }}"
device: /dev/xvde
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs_attachments
- name: Validate rax_cbs_attachments creds, region, server, volume and device (valid)
@@ -166,6 +169,7 @@
volume: "{{ rax_cbs.volume.id }}"
device: /dev/xvde
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cbs_attachments
- name: Validate idempotent present test
@@ -183,6 +187,7 @@
volume: "{{ rax_cbs.volume.id }}"
device: /dev/xvde
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
state: absent
register: rax_cbs_attachments
@@ -202,6 +207,7 @@
volume: "{{ rax_cbs.volume.id }}"
device: /dev/xvde
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
state: absent
register: rax_cbs_attachments
@@ -242,6 +248,7 @@
instance_ids: "{{ rax.instances[0].id }}"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete"
diff --git a/test/integration/roles/test_rax_cdb/tasks/main.yml b/test/integration/roles/test_rax_cdb/tasks/main.yml
index fe4bdd9c0d9..3ba86375d34 100644
--- a/test/integration/roles/test_rax_cdb/tasks/main.yml
+++ b/test/integration/roles/test_rax_cdb/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_cdb|failed
- - rax_cdb.msg == 'missing required arguments: name'
+ - 'rax_cdb.msg == "missing required arguments: name"'
# ============================================================
@@ -60,7 +60,7 @@
assert:
that:
- rax_cdb|failed
- - rax_cdb.msg == 'missing required arguments: name'
+ - 'rax_cdb.msg == "missing required arguments: name"'
# ============================================================
@@ -73,6 +73,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-1"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: Validate rax_cdb with creds, region and name
@@ -92,6 +93,7 @@
name: "{{ resource_prefix }}-1"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: "Validate delete integration 1"
@@ -113,6 +115,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-2"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: Validate rax_cdb idempotent test 1
@@ -130,6 +133,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-2"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: Validate rax_cdb idempotent test 2
@@ -148,6 +152,7 @@
name: "{{ resource_prefix }}-2"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: "Validate delete integration 2"
@@ -167,6 +172,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-3"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: Validate rax_cdb resize volume 1
@@ -185,6 +191,7 @@
name: "{{ resource_prefix }}-3"
volume: 3
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
wait_timeout: 600
register: rax_cdb
@@ -204,6 +211,7 @@
name: "{{ resource_prefix }}-3"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: "Validate delete integration 3"
@@ -223,6 +231,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-4"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: Validate rax_cdb resize flavor 1
@@ -241,6 +250,7 @@
name: "{{ resource_prefix }}-4"
flavor: 2
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
wait_timeout: 600
register: rax_cdb
@@ -260,6 +270,7 @@
name: "{{ resource_prefix }}-4"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: "Validate delete integration 4"
diff --git a/test/integration/roles/test_rax_cdb_database/tasks/main.yml b/test/integration/roles/test_rax_cdb_database/tasks/main.yml
index a8f5caa335d..cee0a4bbc3f 100644
--- a/test/integration/roles/test_rax_cdb_database/tasks/main.yml
+++ b/test/integration/roles/test_rax_cdb_database/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_cdb_database|failed
- - rax_cdb_database.msg == 'missing required arguments: name,cdb_id'
+ - 'rax_cdb_database.msg == "missing required arguments: name,cdb_id"'
# ============================================================
@@ -24,7 +24,7 @@
assert:
that:
- rax_cdb_database|failed
- - rax_cdb_database.msg == 'missing required arguments: cdb_id'
+ - 'rax_cdb_database.msg == "missing required arguments: cdb_id"'
# ============================================================
@@ -92,6 +92,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-rax_cdb_database"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: Validate build
@@ -204,6 +205,7 @@
name: "{{ resource_prefix }}-rax_cdb_database"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_cdb
- name: Validate Delete
diff --git a/test/integration/roles/test_rax_clb/tasks/main.yml b/test/integration/roles/test_rax_clb/tasks/main.yml
index 2426fa3ae59..25472b20cf8 100644
--- a/test/integration/roles/test_rax_clb/tasks/main.yml
+++ b/test/integration/roles/test_rax_clb/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_clb|failed
- - rax_clb.msg == 'missing required arguments: name'
+ - 'rax_clb.msg == "missing required arguments: name"'
# ============================================================
@@ -60,7 +60,7 @@
assert:
that:
- rax_clb|failed
- - rax_clb.msg == 'missing required arguments: name'
+ - 'rax_clb.msg == "missing required arguments: name"'
# ============================================================
@@ -73,6 +73,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-1"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb with creds, region and name
@@ -95,6 +96,7 @@
name: "{{ resource_prefix }}-1"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 1"
@@ -116,6 +118,7 @@
name: "{{ resource_prefix }}-2"
protocol: TCP
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb with creds, region, name and protocol
@@ -137,6 +140,7 @@
name: "{{ resource_prefix }}-2"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 2"
@@ -158,6 +162,7 @@
protocol: TCP
port: 8080
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb with creds, region, name, protocol and port
@@ -179,6 +184,7 @@
name: "{{ resource_prefix }}-3"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 3"
@@ -201,6 +207,7 @@
port: 8080
type: SERVICENET
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb with creds, region, name, protocol and type
@@ -222,6 +229,7 @@
name: "{{ resource_prefix }}-4"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 4"
@@ -245,6 +253,7 @@
type: SERVICENET
timeout: 1
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
ignore_errors: true
register: rax_clb
@@ -269,6 +278,7 @@
type: SERVICENET
timeout: 60
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb with creds, region, name, protocol, type and timeout
@@ -290,6 +300,7 @@
name: "{{ resource_prefix }}-5"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 5"
@@ -314,6 +325,7 @@
timeout: 60
algorithm: RANDOM
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb with creds, region, name, protocol, type, timeout and algorithm
@@ -336,6 +348,7 @@
name: "{{ resource_prefix }}-6"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 6"
@@ -357,6 +370,7 @@
type: BAD
timeout: 1
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
ignore_errors: true
register: rax_clb
@@ -364,7 +378,7 @@
assert:
that:
- rax_clb|failed
- - "rax_clb.msg == 'value of type must be one of: PUBLIC,SERVICENET, got: BAD'"
+ - 'rax_clb.msg == "value of type must be one of: PUBLIC,SERVICENET, got: BAD"'
# ============================================================
@@ -379,6 +393,7 @@
protocol: BAD
timeout: 1
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
ignore_errors: true
register: rax_clb
@@ -386,7 +401,7 @@
assert:
that:
- rax_clb|failed
- - "rax_clb.msg == 'value of protocol must be one of: DNS_TCP,DNS_UDP,FTP,HTTP,HTTPS,IMAPS,IMAPv4,LDAP,LDAPS,MYSQL,POP3,POP3S,SMTP,TCP,TCP_CLIENT_FIRST,UDP,UDP_STREAM,SFTP, got: BAD'"
+ - 'rax_clb.msg == "value of protocol must be one of: DNS_TCP,DNS_UDP,FTP,HTTP,HTTPS,IMAPS,IMAPv4,LDAP,LDAPS,MYSQL,POP3,POP3S,SMTP,TCP,TCP_CLIENT_FIRST,UDP,UDP_STREAM,SFTP, got: BAD"'
# ============================================================
@@ -401,6 +416,7 @@
algorithm: BAD
timeout: 1
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
ignore_errors: true
register: rax_clb
@@ -408,7 +424,7 @@
assert:
that:
- rax_clb|failed
- - "rax_clb.msg == 'value of algorithm must be one of: RANDOM,LEAST_CONNECTIONS,ROUND_ROBIN,WEIGHTED_LEAST_CONNECTIONS,WEIGHTED_ROUND_ROBIN, got: BAD'"
+ - 'rax_clb.msg == "value of algorithm must be one of: RANDOM,LEAST_CONNECTIONS,ROUND_ROBIN,WEIGHTED_LEAST_CONNECTIONS,WEIGHTED_ROUND_ROBIN, got: BAD"'
# ============================================================
@@ -428,6 +444,7 @@
meta:
foo: bar
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb with creds, region, name, protocol, type, timeout, algorithm and metadata
@@ -451,6 +468,7 @@
name: "{{ resource_prefix }}-7"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 7"
@@ -470,6 +488,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-8-HTTP"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_http
- name: Validate rax_clb with shared VIP HTTP
@@ -489,6 +508,7 @@
protocol: HTTPS
port: 443
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
vip_id: "{{ (rax_clb_http.balancer.virtual_ips|first).id }}"
register: rax_clb_https
@@ -508,6 +528,7 @@
name: "{{ resource_prefix }}-8-HTTP"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_http
- name: "Delete integration 8 HTTPS"
@@ -518,6 +539,7 @@
name: "{{ resource_prefix }}-8-HTTPS"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_http
- name: "Validate delete integration 8"
@@ -537,6 +559,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-9"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_p1
- name: Validate rax_clb with updated protocol 1
@@ -555,6 +578,7 @@
name: "{{ resource_prefix }}-9"
protocol: TCP
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_p2
- name: Validate rax_clb with updated protocol 2
@@ -574,6 +598,7 @@
name: "{{ resource_prefix }}-9"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 9"
@@ -592,6 +617,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-10"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_a1
- name: Validate rax_clb with updated algorithm 1
@@ -609,6 +635,7 @@
name: "{{ resource_prefix }}-10"
algorithm: RANDOM
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_a2
- name: Validate rax_clb with updated algorithm 2
@@ -628,6 +655,7 @@
name: "{{ resource_prefix }}-10"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 10"
@@ -647,6 +675,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-11"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_1
- name: Validate rax_clb with updated port 1
@@ -664,6 +693,7 @@
name: "{{ resource_prefix }}-11"
port: 8080
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_2
- name: Validate rax_clb with updated port 2
@@ -683,6 +713,7 @@
name: "{{ resource_prefix }}-11"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 11"
@@ -702,6 +733,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-12"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_1
- name: Validate rax_clb with updated timeout 1
@@ -719,6 +751,7 @@
name: "{{ resource_prefix }}-12"
timeout: 60
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_2
- name: Validate rax_clb with updated timeout 2
@@ -738,6 +771,7 @@
name: "{{ resource_prefix }}-12"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 12"
@@ -757,6 +791,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-13"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_1
- name: Validate rax_clb with invalid updated type 1
@@ -773,6 +808,7 @@
name: "{{ resource_prefix }}-13"
type: SERVICENET
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_2
ignore_errors: true
@@ -790,6 +826,7 @@
name: "{{ resource_prefix }}-13"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 13"
@@ -809,6 +846,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-14"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_1
- name: Validate rax_clb with updated meta 1
@@ -827,6 +865,7 @@
meta:
foo: bar
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_2
- name: Validate rax_clb with updated meta 2
@@ -847,6 +886,7 @@
name: "{{ resource_prefix }}-14"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 14"
diff --git a/test/integration/roles/test_rax_clb_nodes/tasks/main.yml b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml
index 01bbf9dd9a3..9364dc05a05 100644
--- a/test/integration/roles/test_rax_clb_nodes/tasks/main.yml
+++ b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_clb_nodes|failed
- - rax_clb_nodes.msg == 'missing required arguments: load_balancer_id'
+ - 'rax_clb_nodes.msg == "missing required arguments: load_balancer_id"'
# ============================================================
@@ -74,6 +74,7 @@
region: "{{ rackspace_region }}"
name: "{{ resource_prefix }}-clb"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: Validate rax_clb creation
@@ -158,6 +159,7 @@
address: '172.16.0.1'
port: 80
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_nodes
- name: Validate rax_clb_nodes creds, region, load_balancer_id, address and port
@@ -180,6 +182,7 @@
node_id: "{{ rax_clb_nodes.node.id }}"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb_nodes
- name: Validate delete integration 1
@@ -201,6 +204,7 @@
port: 80
type: secondary
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
ignore_errors: true
register: rax_clb_nodes
@@ -222,6 +226,7 @@
name: "{{ rax_clb.balancer.name }}"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax_clb
- name: "Validate delete integration 3"
diff --git a/test/integration/roles/test_rax_facts/tasks/main.yml b/test/integration/roles/test_rax_facts/tasks/main.yml
index 374fd8c7c03..07969d59768 100644
--- a/test/integration/roles/test_rax_facts/tasks/main.yml
+++ b/test/integration/roles/test_rax_facts/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_facts|failed
- - rax_facts.msg == 'one of the following is required: address,id,name'
+ - 'rax_facts.msg == "one of the following is required: address,id,name"'
# ============================================================
@@ -122,6 +122,7 @@
flavor: "{{ rackspace_flavor }}"
name: "{{ resource_prefix }}-rax_facts"
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate build
@@ -267,6 +268,7 @@
name: "{{ resource_prefix }}-rax_facts"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete"
diff --git a/test/integration/roles/test_rax_keypair/tasks/main.yml b/test/integration/roles/test_rax_keypair/tasks/main.yml
index f7f10a46783..84ba5b5a584 100644
--- a/test/integration/roles/test_rax_keypair/tasks/main.yml
+++ b/test/integration/roles/test_rax_keypair/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_keypair|failed
- - rax_keypair.msg == 'missing required arguments: name'
+ - 'rax_keypair.msg == "missing required arguments: name"'
# ============================================================
diff --git a/test/integration/roles/test_rax_meta/tasks/main.yml b/test/integration/roles/test_rax_meta/tasks/main.yml
index b31336fc54a..92d38cf126e 100644
--- a/test/integration/roles/test_rax_meta/tasks/main.yml
+++ b/test/integration/roles/test_rax_meta/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_meta|failed
- - rax_meta.msg == 'one of the following is required: address,id,name'
+ - 'rax_meta.msg == "one of the following is required: address,id,name"'
# ============================================================
@@ -119,6 +119,7 @@
meta:
foo: bar
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: Validate build
@@ -322,6 +323,7 @@
- "{{ rax.success.0.rax_id }}"
state: absent
wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
register: rax
- name: "Validate delete"
diff --git a/test/integration/roles/test_rax_network/tasks/main.yml b/test/integration/roles/test_rax_network/tasks/main.yml
index 27eda8b273e..47da22a92d3 100644
--- a/test/integration/roles/test_rax_network/tasks/main.yml
+++ b/test/integration/roles/test_rax_network/tasks/main.yml
@@ -8,7 +8,7 @@
assert:
that:
- rax_network|failed
- - rax_network.msg == 'missing required arguments: label'
+ - 'rax_network.msg == "missing required arguments: label"'
# ============================================================
@@ -61,7 +61,7 @@
assert:
that:
- rax_network|failed
- - rax_network.msg == 'missing required arguments: cidr'
+ - 'rax_network.msg == "missing required arguments: cidr"'
# ============================================================
diff --git a/test/integration/roles/test_rax_scaling_group/files/test.txt b/test/integration/roles/test_rax_scaling_group/files/test.txt
new file mode 100644
index 00000000000..493021b1c9e
--- /dev/null
+++ b/test/integration/roles/test_rax_scaling_group/files/test.txt
@@ -0,0 +1 @@
+this is a test file
diff --git a/test/integration/roles/test_rax_scaling_group/meta/main.yml b/test/integration/roles/test_rax_scaling_group/meta/main.yml
new file mode 100644
index 00000000000..a3f85b642e3
--- /dev/null
+++ b/test/integration/roles/test_rax_scaling_group/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - prepare_rax_tests
diff --git a/test/integration/roles/test_rax_scaling_group/tasks/main.yml b/test/integration/roles/test_rax_scaling_group/tasks/main.yml
new file mode 100644
index 00000000000..efe3f86ee77
--- /dev/null
+++ b/test/integration/roles/test_rax_scaling_group/tasks/main.yml
@@ -0,0 +1,879 @@
+# ============================================================
+- name: Test rax_scaling_group with no args
+ rax_scaling_group:
+ ignore_errors: true
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group with no args
+ assert:
+ that:
+ - rax_scaling_group|failed
+ - "rax_scaling_group.msg == 'missing required arguments: image,min_entities,flavor,max_entities,name,server_name'"
+# ============================================================
+
+
+
+# ============================================================
+- name: Test rax_scaling_group with image,min_entities,flavor,max_entities,name,server_name
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-1"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-1"
+ ignore_errors: true
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group with image,min_entities,flavor,max_entities,name,server_name
+ assert:
+ that:
+ - rax_scaling_group|failed
+ - rax_scaling_group.msg == 'No credentials supplied!'
+# ============================================================
+
+
+
+# ============================================================
+- name: Test rax_scaling_group with creds and required args
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-1"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ ignore_errors: true
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group with creds and required args
+ assert:
+ that:
+ - rax_scaling_group|failed
+ - rax_scaling_group.msg.startswith('None is not a valid region')
+# ============================================================
+
+
+
+
+
+# ============================================================
+- name: Test rax_scaling_group with creds, region and required args
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-1"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group with creds, region and required args
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-1"
+ - rax_scaling_group.autoscale_group.min_entities == 1
+ - rax_scaling_group.autoscale_group.max_entities == 1
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.flavorRef == "{{ rackspace_flavor }}"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.imageRef == "{{ rackspace_image_id }}"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-1"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality == []
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers == []
+ - rax_scaling_group.autoscale_group.metadata == {}
+
+- name: Test rax_scaling_group idempotency 1
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-1"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate idempotency 1
+ assert:
+ that:
+ - not rax_scaling_group|changed
+
+- name: Remove servers 1
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-1"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate remove servers 1
+ assert:
+ that:
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.min_entities == 0
+ - rax_scaling_group.autoscale_group.max_entities == 0
+ - rax_scaling_group.autoscale_group.state.desiredCapacity == 0
+
+- name: Test delete integration 1
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-1"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ state: absent
+ register: rax_scaling_group
+
+- name: Validate delete integration 1
+ assert:
+ that:
+ - rax_scaling_group|changed
+# ============================================================
+
+
+
+# ============================================================
+- name: Test rax_scaling_group server_name change 1
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-2"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-2"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group server_name change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-2"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-2"
+
+- name: Test rax_scaling_group server_name change 2
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-2"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-2a"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group server_name change 2
+ assert:
+ that:
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-2"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-2a"
+
+- name: Remove servers 2
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-2"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-2a"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate remove servers 2
+ assert:
+ that:
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.min_entities == 0
+ - rax_scaling_group.autoscale_group.max_entities == 0
+ - rax_scaling_group.autoscale_group.state.desiredCapacity == 0
+
+- name: Test delete integration 2
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-2"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-2a"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ state: absent
+ register: rax_scaling_group
+
+- name: Validate delete integration 2
+ assert:
+ that:
+ - rax_scaling_group|changed
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Test rax_scaling_group with invalid load balancers
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-3"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-3"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ loadbalancers:
+ - id: "1234567890-0987654321"
+ port: 80
+ register: rax_scaling_group
+ ignore_errors: true
+
+- name: Validate results of rax_scaling_group with load balancers
+ assert:
+ that:
+ - rax_scaling_group|failed
+ - rax_scaling_group.msg.startswith('Load balancer ID is not an integer')
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Build a CLB to test rax_scaling_group with
+ rax_clb:
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ name: "{{ resource_prefix }}-clb"
+ wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
+ register: rax_clb
+
+- name: Validate rax_clb creation
+ assert:
+ that:
+ - rax_clb|success
+
+- name: Set variable for CLB ID
+ set_fact:
+ rax_clb_id: "{{ rax_clb.balancer.id }}"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Test rax_scaling_group with load balancers
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-3"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-3"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ loadbalancers:
+ - id: "{{ rax_clb_id }}"
+ port: 80
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group with load balancers
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-3"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers[0].loadBalancerId == rax_clb_id|int
+
+- name: Remove servers 3
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-3"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-3"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Test delete integration 3
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-3"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-3"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ state: absent
+ register: rax_scaling_group
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Test rax_scaling_group files change 1
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-4"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ files:
+ /tmp/test.txt: "{{ role_path }}/files/test.txt"
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-4"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group files change 1
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-4"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality|length == 1
+
+- name: Test rax_scaling_group files change 2
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-4"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-4"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate results of rax_scaling_group files change 2
+ assert:
+ that:
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-4"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality is not defined
+
+- name: Remove servers 4
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-4"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-4"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Test delete integration 4
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-4"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 0
+ max_entities: 0
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-4"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ state: absent
+ register: rax_scaling_group
+# ============================================================
+
+
+
+# ============================================================
+- name: Build scaling group to test argument changes
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-5"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ register: rax_scaling_group
+
+- name: Validate default create
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-5"
+ - rax_scaling_group.autoscale_group.min_entities == 1
+ - rax_scaling_group.autoscale_group.max_entities == 1
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.flavorRef == "{{ rackspace_flavor }}"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.imageRef == "{{ rackspace_image_id }}"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-5"
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality == []
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers == []
+ - rax_scaling_group.autoscale_group.metadata == {}
+# ============================================================
+
+
+
+# ============================================================
+- name: Change cooldown
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 1
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-5"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ register: rax_scaling_group
+
+- name: Validate cooldown change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.cooldown == 500
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change max_entities
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 1
+ max_entities: 2
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-5"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ register: rax_scaling_group
+
+- name: Validate max_entities change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.max_entities == 2
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change min_entities
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-5"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ register: rax_scaling_group
+
+- name: Validate min_entities change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.min_entities == 2
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change server_name
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ register: rax_scaling_group
+
+- name: Validate server_name change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-5-1"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change image
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ register: rax_scaling_group
+
+- name: Validate image change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.imageRef == "{{ rackspace_alt_image_id }}"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change flavor
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ register: rax_scaling_group
+
+- name: Validate flavor change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.flavorRef == "{{ rackspace_alt_flavor }}"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change disk_config
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ disk_config: auto
+ register: rax_scaling_group
+
+- name: Validate flavor change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - not rax_scaling_group|changed
+ - "rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'AUTO'"
+
+- name: Change disk_config 2
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ disk_config: manual
+ register: rax_scaling_group
+
+- name: Validate flavor change 2
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - "rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'MANUAL'"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change networks
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ disk_config: manual
+ networks:
+ - public
+ register: rax_scaling_group
+
+- name: Validate networks change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.networks.0.uuid == "00000000-0000-0000-0000-000000000000"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change load balancers
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ disk_config: manual
+ networks:
+ - public
+ - private
+ loadbalancers:
+ - id: "{{ rax_clb_id }}"
+ port: 80
+ register: rax_scaling_group
+
+- name: Validate networks change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers.0.loadBalancerId == rax_clb_id|int
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Create keypair to test with
+ rax_keypair:
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ name: "{{ resource_prefix }}-keypair"
+ public_key: "{{ rackspace_keypair_pub }}"
+ register: rax_keypair
+
+- name: Validate rax_keypair creation
+ assert:
+ that:
+ - rax_keypair|success
+ - rax_keypair|changed
+ - rax_keypair.keypair.name == "{{ resource_prefix }}-keypair"
+ - rax_keypair.keypair.public_key == "{{ rackspace_keypair_pub }}"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change key_name
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ disk_config: manual
+ networks:
+ - public
+ - private
+ loadbalancers:
+ - id: "{{ rax_clb_id }}"
+ port: 80
+ key_name: "{{ resource_prefix }}-keypair"
+ register: rax_scaling_group
+
+- name: Validate key_name change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.key_name == "{{ resource_prefix }}-keypair"
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Change config_drive
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ disk_config: manual
+ networks:
+ - public
+ - private
+ loadbalancers:
+ - id: "{{ rax_clb_id }}"
+ port: 80
+ key_name: "{{ resource_prefix }}-keypair"
+ config_drive: true
+ register: rax_scaling_group
+
+- name: Validate config_drive change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.config_drive
+# ============================================================
+
+
+
+# ============================================================
+- name: Change config_drive
+ rax_scaling_group:
+ name: "{{ resource_prefix }}-5"
+ image: "{{ rackspace_alt_image_id }}"
+ min_entities: 2
+ max_entities: 2
+ flavor: "{{ rackspace_alt_flavor }}"
+ server_name: "{{ resource_prefix }}-5-1"
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ cooldown: 500
+ disk_config: manual
+ networks:
+ - public
+ - private
+ loadbalancers:
+ - id: "{{ rax_clb_id }}"
+ port: 80
+ key_name: "{{ resource_prefix }}-keypair"
+ config_drive: true
+ user_data: "foo"
+ register: rax_scaling_group
+
+- name: Validate config_drive change
+ assert:
+ that:
+ - rax_scaling_group|success
+ - rax_scaling_group|changed
+ - rax_scaling_group.autoscale_group.launchConfiguration.args.server.user_data == '{{ "foo"|b64encode }}'
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Delete keypair
+ rax_keypair:
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ name: "{{ resource_prefix }}-keypair"
+ public_key: "{{ rackspace_keypair_pub }}"
+ state: absent
+ register: rax_keypair
+
+- name: Validate rax_keypair creation
+ assert:
+ that:
+ - rax_keypair|success
+ - rax_keypair|changed
+# ============================================================
+
+
+
+
+# ============================================================
+- name: Delete CLB
+ rax_clb:
+ username: "{{ rackspace_username }}"
+ api_key: "{{ rackspace_api_key }}"
+ region: "{{ rackspace_region }}"
+ name: "{{ rax_clb.balancer.name }}"
+ state: absent
+ wait: true
+ wait_timeout: "{{ rackspace_wait_timeout }}"
+ register: rax_clb
+
+- name: "Validate delete integration 3"
+ assert:
+ that:
+ - rax_clb|changed
+ - rax_clb.balancer.id == rax_clb_id|int
+# ============================================================
diff --git a/test/integration/roles/test_special_vars/meta/main.yml b/test/integration/roles/test_special_vars/meta/main.yml
new file mode 100644
index 00000000000..a8b63dfdf26
--- /dev/null
+++ b/test/integration/roles/test_special_vars/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+
diff --git a/test/integration/roles/test_special_vars/tasks/main.yml b/test/integration/roles/test_special_vars/tasks/main.yml
new file mode 100644
index 00000000000..653bf7b9055
--- /dev/null
+++ b/test/integration/roles/test_special_vars/tasks/main.yml
@@ -0,0 +1,37 @@
+# test code for the template module
+# (c) 2015, Brian Coca
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+- name: veryfiy ansible_managed
+ template: src=foo.j2 dest={{output_dir}}/special_vars.yaml
+
+- name: read the file into facts
+ include_vars: "{{output_dir}}/special_vars.yaml"
+
+
+- name: veriy all test vars are defined
+ assert:
+ that:
+ - 'item in hostvars[inventory_hostname].keys()'
+ with_items:
+ - test_template_host
+ - test_template_path
+ - test_template_mtime
+ - test_template_uid
+ - test_template_fullpath
+ - test_template_run_date
+ - test_ansible_managed
diff --git a/test/integration/roles/test_special_vars/templates/foo.j2 b/test/integration/roles/test_special_vars/templates/foo.j2
new file mode 100644
index 00000000000..0f6db2a1662
--- /dev/null
+++ b/test/integration/roles/test_special_vars/templates/foo.j2
@@ -0,0 +1,7 @@
+test_template_host: "{{template_host}}"
+test_template_path: "{{template_path}}"
+test_template_mtime: "{{template_mtime}}"
+test_template_uid: "{{template_uid}}"
+test_template_fullpath: "{{template_fullpath}}"
+test_template_run_date: "{{template_run_date}}"
+test_ansible_managed: "{{ansible_managed}}"
diff --git a/lib/ansible/runner/lookup_plugins/__init__.py b/test/integration/roles/test_special_vars/vars/main.yml
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/__init__.py
rename to test/integration/roles/test_special_vars/vars/main.yml
diff --git a/test/integration/roles/test_synchronize/tasks/main.yml b/test/integration/roles/test_synchronize/tasks/main.yml
index 85622b74191..b3aa83922f0 100644
--- a/test/integration/roles/test_synchronize/tasks/main.yml
+++ b/test/integration/roles/test_synchronize/tasks/main.yml
@@ -40,6 +40,124 @@
- "sync_result.msg.startswith('>f+')"
- "sync_result.msg.endswith('+ foo.txt\n')"
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: Synchronize using the mode=push param
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: push
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: push
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: Synchronize using the mode=pull param
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: pull
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: pull
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
- name: synchronize files using with_items (issue#5965)
synchronize: src={{output_dir}}/{{item}} dest={{output_dir}}/{{item}}.result
with_items:
@@ -47,7 +165,6 @@
- bar.txt
register: sync_result
-- debug: var=sync_result
- assert:
that:
- "sync_result.changed"
@@ -61,7 +178,6 @@
synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.rsync_path rsync_path="sudo rsync"
register: sync_result
-- debug: var=sync_result
- assert:
that:
- "'changed' in sync_result"
diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml
index 66e01ae8e53..7300578982d 100644
--- a/test/integration/roles/test_uri/tasks/main.yml
+++ b/test/integration/roles/test_uri/tasks/main.yml
@@ -91,3 +91,43 @@
with_together:
- fail_checksum.results
- fail.results
+
+- name: test https fetch to a site with mismatched hostname and certificate
+ uri:
+ url: "https://kennethreitz.org/"
+ dest: "{{ output_dir }}/shouldnotexist.html"
+ ignore_errors: True
+ register: result
+
+- stat:
+ path: "{{ output_dir }}/shouldnotexist.html"
+ register: stat_result
+
+- name: Assert that the file was not downloaded
+ assert:
+ that:
+ - "result.failed == true"
+ - "'certificate does not match ' in result.msg"
+ - "stat_result.stat.exists == false"
+
+- name: Clean up any cruft from the results directory
+ file:
+ name: "{{ output_dir }}/kreitz.html"
+ state: absent
+
+- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no
+ get_url:
+ url: "https://kennethreitz.org/"
+ dest: "{{ output_dir }}/kreitz.html"
+ validate_certs: no
+ register: result
+
+- stat:
+ path: "{{ output_dir }}/kreitz.html"
+ register: stat_result
+
+- name: Assert that the file was downloaded
+ assert:
+ that:
+ - "stat_result.stat.exists == true"
+ - "result.changed == true"
diff --git a/test/integration/roles/test_var_precedence_dep/tasks/main.yml b/test/integration/roles/test_var_precedence_dep/tasks/main.yml
index 2f8e17096bc..b50f9dfc271 100644
--- a/test/integration/roles/test_var_precedence_dep/tasks/main.yml
+++ b/test/integration/roles/test_var_precedence_dep/tasks/main.yml
@@ -7,7 +7,7 @@
- assert:
that:
- 'extra_var == "extra_var"'
- - 'param_var == "param_var_role1"'
+ - 'param_var == "param_var"'
- 'vars_var == "vars_var"'
- 'vars_files_var == "vars_files_var"'
- 'vars_files_var_role == "vars_files_var_dep"'
diff --git a/test/integration/roles/test_win_copy/tasks/main.yml b/test/integration/roles/test_win_copy/tasks/main.yml
index d898219a85c..48df4273807 100644
--- a/test/integration/roles/test_win_copy/tasks/main.yml
+++ b/test/integration/roles/test_win_copy/tasks/main.yml
@@ -62,7 +62,7 @@
- name: verify that the file checksum is correct
assert:
that:
- - "copy_result.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'"
+ - "copy_result.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'"
- name: check the stat results of the file
win_stat: path={{output_file}}
@@ -78,7 +78,7 @@
# - "stat_results.stat.isfifo == false"
# - "stat_results.stat.isreg == true"
# - "stat_results.stat.issock == false"
- - "stat_results.stat.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'"
+ - "stat_results.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'"
- name: overwrite the file via same means
win_copy: src=foo.txt dest={{output_file}}
diff --git a/test/integration/roles/test_win_feature/tasks/main.yml b/test/integration/roles/test_win_feature/tasks/main.yml
index a49622c232d..4b31f8b3581 100644
--- a/test/integration/roles/test_win_feature/tasks/main.yml
+++ b/test/integration/roles/test_win_feature/tasks/main.yml
@@ -17,10 +17,16 @@
# along with Ansible. If not, see .
+- name: check whether servermanager module is available (windows 2008 r2 or later)
+ raw: PowerShell -Command Import-Module ServerManager
+ register: win_feature_has_servermanager
+ ignore_errors: true
+
- name: start with feature absent
win_feature:
name: "{{ test_win_feature_name }}"
state: absent
+ when: win_feature_has_servermanager|success
- name: install feature
win_feature:
@@ -30,6 +36,7 @@
include_sub_features: yes
include_management_tools: yes
register: win_feature_install_result
+ when: win_feature_has_servermanager|success
- name: check result of installing feature
assert:
@@ -45,6 +52,7 @@
- "win_feature_install_result.feature_result[0].restart_needed is defined"
- "win_feature_install_result.feature_result[0].skip_reason"
- "win_feature_install_result.feature_result[0].success is defined"
+ when: win_feature_has_servermanager|success
- name: install feature again
win_feature:
@@ -54,6 +62,7 @@
include_sub_features: yes
include_management_tools: yes
register: win_feature_install_again_result
+ when: win_feature_has_servermanager|success
- name: check result of installing feature again
assert:
@@ -63,12 +72,14 @@
- "win_feature_install_again_result.exitcode == 'NoChangeNeeded'"
- "not win_feature_install_again_result.restart_needed"
- "win_feature_install_again_result.feature_result == []"
+ when: win_feature_has_servermanager|success
- name: remove feature
win_feature:
name: "{{ test_win_feature_name }}"
state: absent
register: win_feature_remove_result
+ when: win_feature_has_servermanager|success
- name: check result of removing feature
assert:
@@ -84,12 +95,14 @@
- "win_feature_remove_result.feature_result[0].restart_needed is defined"
- "win_feature_remove_result.feature_result[0].skip_reason"
- "win_feature_remove_result.feature_result[0].success is defined"
+ when: win_feature_has_servermanager|success
- name: remove feature again
win_feature:
name: "{{ test_win_feature_name }}"
state: absent
register: win_feature_remove_again_result
+ when: win_feature_has_servermanager|success
- name: check result of removing feature again
assert:
@@ -99,6 +112,7 @@
- "win_feature_remove_again_result.exitcode == 'NoChangeNeeded'"
- "not win_feature_remove_again_result.restart_needed"
- "win_feature_remove_again_result.feature_result == []"
+ when: win_feature_has_servermanager|success
- name: try to install an invalid feature name
win_feature:
@@ -106,6 +120,7 @@
state: present
register: win_feature_install_invalid_result
ignore_errors: true
+ when: win_feature_has_servermanager|success
- name: check result of installing invalid feature name
assert:
@@ -114,6 +129,7 @@
- "not win_feature_install_invalid_result|changed"
- "win_feature_install_invalid_result.msg"
- "win_feature_install_invalid_result.exitcode == 'InvalidArgs'"
+ when: win_feature_has_servermanager|success
- name: try to remove an invalid feature name
win_feature:
@@ -121,6 +137,7 @@
state: absent
register: win_feature_remove_invalid_result
ignore_errors: true
+ when: win_feature_has_servermanager|success
- name: check result of removing invalid feature name
assert:
@@ -129,3 +146,4 @@
- "not win_feature_remove_invalid_result|changed"
- "win_feature_remove_invalid_result.msg"
- "win_feature_remove_invalid_result.exitcode == 'InvalidArgs'"
+ when: win_feature_has_servermanager|success
diff --git a/test/integration/roles/test_win_fetch/tasks/main.yml b/test/integration/roles/test_win_fetch/tasks/main.yml
index 8c0f5aa21fa..f8b18657448 100644
--- a/test/integration/roles/test_win_fetch/tasks/main.yml
+++ b/test/integration/roles/test_win_fetch/tasks/main.yml
@@ -73,16 +73,14 @@
- "fetch_flat_stat.stat.isreg"
- "fetch_flat_stat.stat.md5 == fetch_flat.md5sum"
-- name: fetch a small file to flat directory (without trailing slash)
- fetch: src="C:/Windows/win.ini" dest="{{ output_dir }}" flat=yes
- register: fetch_flat_dir
- ignore_errors: true
+#- name: fetch a small file to flat directory (without trailing slash)
+# fetch: src="C:/Windows/win.ini" dest="{{ output_dir }}" flat=yes
+# register: fetch_flat_dir
-- name: check fetch flat to directory result
- assert:
- that:
- - "fetch_flat_dir|failed"
- - "fetch_flat_dir.msg"
+#- name: check fetch flat to directory result
+# assert:
+# that:
+# - "not fetch_flat_dir|changed"
- name: fetch a large binary file
fetch: src="C:/Windows/explorer.exe" dest={{ output_dir }}
@@ -114,7 +112,7 @@
- "not fetch_large_again.changed"
- name: fetch a small file using backslashes in src path
- fetch: src="C:\Windows\system.ini" dest={{ output_dir }}
+ fetch: src="C:\\Windows\\system.ini" dest={{ output_dir }}
register: fetch_small_bs
- name: check fetch small result with backslashes
@@ -157,7 +155,7 @@
- "not fetch_missing|changed"
- name: attempt to fetch a directory
- fetch: src="C:\Windows" dest={{ output_dir }}
+ fetch: src="C:\\Windows" dest={{ output_dir }}
register: fetch_dir
ignore_errors: true
diff --git a/test/integration/roles/test_win_file/tasks/main.yml b/test/integration/roles/test_win_file/tasks/main.yml
index 35ecfb63874..f823a16ff8f 100644
--- a/test/integration/roles/test_win_file/tasks/main.yml
+++ b/test/integration/roles/test_win_file/tasks/main.yml
@@ -32,7 +32,7 @@
# - "file_result.state == 'file'"
- name: verify that we are checking an absent file
- win_file: path={{win_output_dir}}\bar.txt state=absent
+ win_file: path={{win_output_dir}}/bar.txt state=absent
register: file2_result
- name: verify that the file was marked as changed
@@ -42,7 +42,7 @@
# - "file2_result.state == 'absent'"
- name: verify we can touch a file
- win_file: path={{win_output_dir}}\baz.txt state=touch
+ win_file: path={{win_output_dir}}/baz.txt state=touch
register: file3_result
- name: verify that the file was marked as changed
@@ -85,8 +85,8 @@
# - "chown_result.failed == True"
# - "file_exists_result.stat.exists == False"
#
-- name: clean up
- win_file: path=/tmp/worldwritable state=absent
+#- name: clean up
+# win_file: path=/tmp/worldwritable state=absent
#- name: create soft link to file
# win_file: src={{output_file}} dest={{win_output_dir}}/soft.txt state=link
@@ -107,7 +107,7 @@
# - "file6_result.changed == true"
#
- name: create a directory
- win_file: path={{win_output_dir}}\foobar state=directory
+ win_file: path={{win_output_dir}}/foobar state=directory
register: file7_result
- debug: var=file7_result
@@ -134,22 +134,22 @@
# when: selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled"
- name: remote directory foobar
- win_file: path={{win_output_dir}}\foobar state=absent
+ win_file: path={{win_output_dir}}/foobar state=absent
- name: remove file foo.txt
- win_file: path={{win_output_dir}}\foo.txt state=absent
+ win_file: path={{win_output_dir}}/foo.txt state=absent
- name: remove file bar.txt
- win_file: path={{win_output_dir}}\foo.txt state=absent
+ win_file: path={{win_output_dir}}/foo.txt state=absent
- name: remove file baz.txt
- win_file: path={{win_output_dir}}\foo.txt state=absent
+ win_file: path={{win_output_dir}}/foo.txt state=absent
- name: win copy directory structure over
win_copy: src=foobar dest={{win_output_dir}}
- name: remove directory foobar
- win_file: path={{win_output_dir}}\foobar state=absent
+ win_file: path={{win_output_dir}}/foobar state=absent
register: file14_result
- debug: var=file14_result
diff --git a/test/integration/roles/test_win_get_url/defaults/main.yml b/test/integration/roles/test_win_get_url/defaults/main.yml
new file mode 100644
index 00000000000..6e507ecf31c
--- /dev/null
+++ b/test/integration/roles/test_win_get_url/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+
+test_win_get_url_link: http://docs.ansible.com
+test_win_get_url_path: "C:\\Users\\{{ansible_ssh_user}}\\docs_index.html"
+test_win_get_url_invalid_link: http://docs.ansible.com/skynet_module.html
+test_win_get_url_invalid_path: "Q:\\Filez\\Cyberdyne.html"
+test_win_get_url_dir_path: "C:\\Users\\{{ansible_ssh_user}}"
diff --git a/test/integration/roles/test_win_get_url/tasks/main.yml b/test/integration/roles/test_win_get_url/tasks/main.yml
index 26fb334c95a..b0705eabd56 100644
--- a/test/integration/roles/test_win_get_url/tasks/main.yml
+++ b/test/integration/roles/test_win_get_url/tasks/main.yml
@@ -17,19 +17,81 @@
# along with Ansible. If not, see .
- name: remove test file if it exists
- raw: PowerShell -Command {Remove-Item "C:\Users\Administrator\win_get_url.jpg" -Force}
+ raw: >
+ PowerShell -Command Remove-Item "{{test_win_get_url_path}}" -Force
+ ignore_errors: true
- name: test win_get_url module
- win_get_url: url=http://placehold.it/10x10.jpg dest='C:\Users\Administrator\win_get_url.jpg'
+ win_get_url:
+ url: "{{test_win_get_url_link}}"
+ dest: "{{test_win_get_url_path}}"
register: win_get_url_result
-- name: check win_get_url result
+- name: check that url was downloaded
assert:
that:
- "not win_get_url_result|failed"
- "win_get_url_result|changed"
+ - "win_get_url_result.win_get_url.url"
+ - "win_get_url_result.win_get_url.dest"
-# FIXME:
-# - Test invalid url
-# - Test invalid dest, when dest is directory
-# - Test idempotence when downloading same url/dest (not yet implemented)
+- name: test win_get_url module again (force should be yes by default)
+ win_get_url:
+ url: "{{test_win_get_url_link}}"
+ dest: "{{test_win_get_url_path}}"
+ register: win_get_url_result_again
+
+- name: check that url was downloaded again
+ assert:
+ that:
+ - "not win_get_url_result_again|failed"
+ - "win_get_url_result_again|changed"
+
+- name: test win_get_url module again with force=no
+ win_get_url:
+ url: "{{test_win_get_url_link}}"
+ dest: "{{test_win_get_url_path}}"
+ force: no
+ register: win_get_url_result_noforce
+
+- name: check that url was not downloaded again
+ assert:
+ that:
+ - "not win_get_url_result_noforce|failed"
+ - "not win_get_url_result_noforce|changed"
+
+- name: test win_get_url module with url that returns a 404
+ win_get_url:
+ url: "{{test_win_get_url_invalid_link}}"
+ dest: "{{test_win_get_url_path}}"
+ register: win_get_url_result_invalid_link
+ ignore_errors: true
+
+- name: check that the download failed for an invalid url
+ assert:
+ that:
+ - "win_get_url_result_invalid_link|failed"
+
+- name: test win_get_url module with an invalid path
+ win_get_url:
+ url: "{{test_win_get_url_link}}"
+ dest: "{{test_win_get_url_invalid_path}}"
+ register: win_get_url_result_invalid_path
+ ignore_errors: true
+
+- name: check that the download failed for an invalid path
+ assert:
+ that:
+ - "win_get_url_result_invalid_path|failed"
+
+- name: test win_get_url module with a valid path that is a directory
+ win_get_url:
+ url: "{{test_win_get_url_link}}"
+ dest: "{{test_win_get_url_dir_path}}"
+ register: win_get_url_result_dir_path
+ ignore_errors: true
+
+- name: check that the download failed if dest is a directory
+ assert:
+ that:
+ - "win_get_url_result_dir_path|failed"
diff --git a/test/integration/roles/test_win_msi/tasks/main.yml b/test/integration/roles/test_win_msi/tasks/main.yml
index d0d7034d782..85c9957a1d7 100644
--- a/test/integration/roles/test_win_msi/tasks/main.yml
+++ b/test/integration/roles/test_win_msi/tasks/main.yml
@@ -17,7 +17,7 @@
# along with Ansible. If not, see .
- name: use win_get_url module to download msi
- win_get_url: url=http://downloads.sourceforge.net/project/sevenzip/7-Zip/9.22/7z922-x64.msi dest='C:\7z922-x64.msi'
+ win_get_url: url=http://downloads.sourceforge.net/project/sevenzip/7-Zip/9.22/7z922-x64.msi dest='C:\\7z922-x64.msi'
register: win_get_url_result
- name: install 7zip msi
diff --git a/test/integration/roles/test_win_ping/tasks/main.yml b/test/integration/roles/test_win_ping/tasks/main.yml
index 8bcbe910c4e..f17a4a92278 100644
--- a/test/integration/roles/test_win_ping/tasks/main.yml
+++ b/test/integration/roles/test_win_ping/tasks/main.yml
@@ -28,7 +28,7 @@
- "win_ping_result.ping == 'pong'"
- name: test win_ping with data
- win_ping: data=blah
+ win_ping: data=☠
register: win_ping_with_data_result
- name: check win_ping result with data
@@ -36,21 +36,11 @@
that:
- "not win_ping_with_data_result|failed"
- "not win_ping_with_data_result|changed"
- - "win_ping_with_data_result.ping == 'blah'"
+ - "win_ping_with_data_result.ping == '☠'"
-#- name: test local ping (should use default ping)
-# local_action: ping
-# register: local_ping_result
-
-#- name: check local ping result
-# assert:
-# that:
-# - "not local_ping_result|failed"
-# - "not local_ping_result|changed"
-# - "local_ping_result.ping == 'pong'"
-
-- name: test win_ping.ps1 with data
- win_ping.ps1: data=bleep
+- name: test win_ping.ps1 with data as complex args
+ win_ping.ps1:
+ data: bleep
register: win_ping_ps1_result
- name: check win_ping.ps1 result with data
@@ -60,13 +50,32 @@
- "not win_ping_ps1_result|changed"
- "win_ping_ps1_result.ping == 'bleep'"
-#- name: test win_ping with invalid args
-# win_ping: arg=invalid
-# register: win_ping_ps1_invalid_args_result
-
-#- name: check that win_ping.ps1 with invalid args fails
-# assert:
-# that:
-# - "win_ping_ps1_invalid_args_result|failed"
-# - "win_ping_ps1_invalid_args_result.msg"
+- name: test win_ping with extra args to verify that v2 module replacer escaping works as expected
+ win_ping:
+ data: bloop
+ a_null: null
+ a_boolean: true
+ another_boolean: false
+ a_number: 299792458
+ another_number: 22.7
+ yet_another_number: 6.022e23
+ a_string: |
+ it's magic
+ "@'
+ '@"
+ an_array:
+ - first
+ - 2
+ - 3.0
+ an_object:
+ - the_thing: the_value
+ - the_other_thing: 0
+ - the_list_of_things: [1, 2, 3, 5]
+ register: win_ping_extra_args_result
+- name: check that win_ping with extra args succeeds and ignores everything except data
+ assert:
+ that:
+ - "not win_ping_extra_args_result|failed"
+ - "not win_ping_extra_args_result|changed"
+ - "win_ping_extra_args_result.ping == 'bloop'"
diff --git a/test/integration/roles/test_win_raw/tasks/main.yml b/test/integration/roles/test_win_raw/tasks/main.yml
index c51ba4b2cc8..8a5412c3812 100644
--- a/test/integration/roles/test_win_raw/tasks/main.yml
+++ b/test/integration/roles/test_win_raw/tasks/main.yml
@@ -72,7 +72,7 @@
- "not unknown_result|changed"
- name: run a command that takes longer than 60 seconds
- raw: PowerShell -Command Start-Sleep -s 75
+ raw: Start-Sleep -s 75
register: sleep_command
- name: assert that the sleep command ran
@@ -83,3 +83,12 @@
- "not sleep_command.stderr"
- "not sleep_command|failed"
- "not sleep_command|changed"
+
+- name: run a raw command with key=value arguments
+ raw: echo wwe=raw
+ register: raw_result
+
+- name: make sure raw is really raw and not removing key=value arguments
+ assert:
+ that:
+ - "raw_result.stdout_lines[0] == 'wwe=raw'"
diff --git a/test/integration/roles/test_win_script/defaults/main.yml b/test/integration/roles/test_win_script/defaults/main.yml
index a2c6475e751..90b756af0ab 100644
--- a/test/integration/roles/test_win_script/defaults/main.yml
+++ b/test/integration/roles/test_win_script/defaults/main.yml
@@ -3,3 +3,4 @@
# Parameters to pass to test scripts.
test_win_script_value: VaLuE
test_win_script_splat: "@{This='THIS'; That='THAT'; Other='OTHER'}"
+test_win_script_filename: "C:/Users/{{ansible_ssh_user}}/testing_win_script.txt"
diff --git a/test/integration/roles/test_win_script/files/test_script.cmd b/test/integration/roles/test_win_script/files/test_script.cmd
new file mode 100644
index 00000000000..0e36312d0f6
--- /dev/null
+++ b/test/integration/roles/test_win_script/files/test_script.cmd
@@ -0,0 +1,2 @@
+@ECHO OFF
+ECHO We can even run a batch file with cmd extension!
diff --git a/test/integration/roles/test_win_script/files/test_script_creates_file.ps1 b/test/integration/roles/test_win_script/files/test_script_creates_file.ps1
new file mode 100644
index 00000000000..47f85a2d495
--- /dev/null
+++ b/test/integration/roles/test_win_script/files/test_script_creates_file.ps1
@@ -0,0 +1,3 @@
+# Test script to create a file.
+
+echo $null > $args[0]
diff --git a/test/integration/roles/test_win_script/files/test_script_removes_file.ps1 b/test/integration/roles/test_win_script/files/test_script_removes_file.ps1
new file mode 100644
index 00000000000..f0549a5b3bc
--- /dev/null
+++ b/test/integration/roles/test_win_script/files/test_script_removes_file.ps1
@@ -0,0 +1,3 @@
+# Test script to remove a file.
+
+Remove-Item $args[0] -Force
diff --git a/test/integration/roles/test_win_script/tasks/main.yml b/test/integration/roles/test_win_script/tasks/main.yml
index e1e5f25611d..313569facea 100644
--- a/test/integration/roles/test_win_script/tasks/main.yml
+++ b/test/integration/roles/test_win_script/tasks/main.yml
@@ -30,24 +30,24 @@
- "not test_script_result|failed"
- "test_script_result|changed"
-- name: run test script that takes arguments
- script: test_script_with_args.ps1 /this /that /other
+- name: run test script that takes arguments including a unicode char
+ script: test_script_with_args.ps1 /this /that /Ӧther
register: test_script_with_args_result
-- name: check that script ran and received arguments
+- name: check that script ran and received arguments and returned unicode
assert:
that:
- "test_script_with_args_result.rc == 0"
- "test_script_with_args_result.stdout"
- "test_script_with_args_result.stdout_lines[0] == '/this'"
- "test_script_with_args_result.stdout_lines[1] == '/that'"
- - "test_script_with_args_result.stdout_lines[2] == '/other'"
+ - "test_script_with_args_result.stdout_lines[2] == '/Ӧther'"
- "not test_script_with_args_result.stderr"
- "not test_script_with_args_result|failed"
- "test_script_with_args_result|changed"
- name: run test script that takes parameters passed via splatting
- script: test_script_with_splatting.ps1 "@{ This = 'this'; That = '{{ test_win_script_value }}'; Other = 'other'}"
+ script: test_script_with_splatting.ps1 @{ This = 'this'; That = '{{ test_win_script_value }}'; Other = 'other'}
register: test_script_with_splatting_result
- name: check that script ran and received parameters via splatting
@@ -63,7 +63,7 @@
- "test_script_with_splatting_result|changed"
- name: run test script that takes splatted parameters from a variable
- script: test_script_with_splatting.ps1 {{ test_win_script_splat|quote }}
+ script: test_script_with_splatting.ps1 {{ test_win_script_splat }}
register: test_script_with_splatting2_result
- name: check that script ran and received parameters via splatting from a variable
@@ -92,6 +92,58 @@
- "test_script_with_errors_result|failed"
- "test_script_with_errors_result|changed"
+- name: cleanup test file if it exists
+ raw: Remove-Item "{{test_win_script_filename}}" -Force
+ ignore_errors: true
+
+- name: run test script that creates a file
+ script: test_script_creates_file.ps1 "{{test_win_script_filename}}" creates="{{test_win_script_filename}}"
+ register: test_script_creates_file_result
+
+- name: check that script ran and indicated a change
+ assert:
+ that:
+ - "test_script_creates_file_result.rc == 0"
+ - "not test_script_creates_file_result.stdout"
+ - "not test_script_creates_file_result.stderr"
+ - "not test_script_creates_file_result|failed"
+ - "test_script_creates_file_result|changed"
+
+- name: run test script that creates a file again
+ script: test_script_creates_file.ps1 "{{test_win_script_filename}}" creates="{{test_win_script_filename}}"
+ register: test_script_creates_file_again_result
+
+- name: check that the script did not run since the remote file exists
+ assert:
+ that:
+ - "not test_script_creates_file_again_result|failed"
+ - "not test_script_creates_file_again_result|changed"
+ - "test_script_creates_file_again_result|skipped"
+
+- name: run test script that removes a file
+ script: test_script_removes_file.ps1 "{{test_win_script_filename}}" removes="{{test_win_script_filename}}"
+ register: test_script_removes_file_result
+
+- name: check that the script ran since the remote file exists
+ assert:
+ that:
+ - "test_script_removes_file_result.rc == 0"
+ - "not test_script_removes_file_result.stdout"
+ - "not test_script_removes_file_result.stderr"
+ - "not test_script_removes_file_result|failed"
+ - "test_script_removes_file_result|changed"
+
+- name: run test script that removes a file again
+ script: test_script_removes_file.ps1 "{{test_win_script_filename}}" removes="{{test_win_script_filename}}"
+ register: test_script_removes_file_again_result
+
+- name: check that the script did not run since the remote file does not exist
+ assert:
+ that:
+ - "not test_script_removes_file_again_result|failed"
+ - "not test_script_removes_file_again_result|changed"
+ - "test_script_removes_file_again_result|skipped"
+
- name: run simple batch file
script: test_script.bat
register: test_batch_result
@@ -105,3 +157,17 @@
- "not test_batch_result.stderr"
- "not test_batch_result|failed"
- "test_batch_result|changed"
+
+- name: run simple batch file with .cmd extension
+ script: test_script.cmd
+ register: test_cmd_result
+
+- name: check that batch file with .cmd extension ran
+ assert:
+ that:
+ - "test_cmd_result.rc == 0"
+ - "test_cmd_result.stdout"
+ - "'cmd extension' in test_cmd_result.stdout"
+ - "not test_cmd_result.stderr"
+ - "not test_cmd_result|failed"
+ - "test_cmd_result|changed"
diff --git a/test/integration/roles/test_win_setup/tasks/main.yml b/test/integration/roles/test_win_setup/tasks/main.yml
index c2f4728b215..fb13da15424 100644
--- a/test/integration/roles/test_win_setup/tasks/main.yml
+++ b/test/integration/roles/test_win_setup/tasks/main.yml
@@ -20,7 +20,7 @@
action: setup
register: setup_result
-- name: check setup result
+- name: check windows setup result
assert:
that:
- "not setup_result|failed"
@@ -38,6 +38,8 @@
- "setup_result.ansible_facts.ansible_interfaces[0]"
- "setup_result.ansible_facts.ansible_interfaces[0].interface_name"
- "setup_result.ansible_facts.ansible_interfaces[0].interface_index"
+ - "setup_result.ansible_facts.ansible_architecture"
+ - "setup_result.ansible_facts.ansible_os_name"
- "setup_result.ansible_facts.ansible_powershell_version"
- name: check setup result only when using https
diff --git a/test/integration/roles/test_win_stat/tasks/main.yml b/test/integration/roles/test_win_stat/tasks/main.yml
index 5069f51a801..5197c27fef4 100644
--- a/test/integration/roles/test_win_stat/tasks/main.yml
+++ b/test/integration/roles/test_win_stat/tasks/main.yml
@@ -27,6 +27,12 @@
- "not win_stat_file.stat.isdir"
- "win_stat_file.stat.size > 0"
- "win_stat_file.stat.md5"
+ - "win_stat_file.stat.extension"
+ - "win_stat_file.stat.attributes"
+ - "win_stat_file.stat.owner"
+ - "win_stat_file.stat.creationtime"
+ - "win_stat_file.stat.lastaccesstime"
+ - "win_stat_file.stat.lastwritetime"
- "not win_stat_file|failed"
- "not win_stat_file|changed"
@@ -34,13 +40,19 @@
win_stat: path="C:\Windows\win.ini" get_md5=no
register: win_stat_file_no_md5
-- name: check win_stat file result without md
+- name: check win_stat file result without md5
assert:
that:
- "win_stat_file_no_md5.stat.exists"
- "not win_stat_file_no_md5.stat.isdir"
- "win_stat_file_no_md5.stat.size > 0"
- "not win_stat_file_no_md5.stat.md5|default('')"
+ - "win_stat_file_no_md5.stat.extension"
+ - "win_stat_file_no_md5.stat.attributes"
+ - "win_stat_file_no_md5.stat.owner"
+ - "win_stat_file_no_md5.stat.creationtime"
+ - "win_stat_file_no_md5.stat.lastaccesstime"
+ - "win_stat_file_no_md5.stat.lastwritetime"
- "not win_stat_file_no_md5|failed"
- "not win_stat_file_no_md5|changed"
@@ -53,6 +65,12 @@
that:
- "win_stat_dir.stat.exists"
- "win_stat_dir.stat.isdir"
+ - "win_stat_dir.stat.extension == ''"
+ - "win_stat_dir.stat.attributes"
+ - "win_stat_dir.stat.owner"
+ - "win_stat_dir.stat.creationtime"
+ - "win_stat_dir.stat.lastaccesstime"
+ - "win_stat_dir.stat.lastwritetime"
- "not win_stat_dir|failed"
- "not win_stat_dir|changed"
diff --git a/test/integration/roles/test_win_template/tasks/main.yml b/test/integration/roles/test_win_template/tasks/main.yml
index 9c2ea920ffa..c276b8d323d 100644
--- a/test/integration/roles/test_win_template/tasks/main.yml
+++ b/test/integration/roles/test_win_template/tasks/main.yml
@@ -39,13 +39,24 @@
that:
- "template_result.changed == true"
+- name: fill in a basic template again
+ win_template:
+ src: foo.j2
+ dest: "{{win_output_dir}}/foo.templated"
+ register: template_result2
+
+- name: verify that the template was not changed
+ assert:
+ that:
+ - "not template_result2|changed"
+
# VERIFY CONTENTS
- name: copy known good into place
- win_copy: src=foo.txt dest={{win_output_dir}}\foo.txt
+ win_copy: src=foo.txt dest={{win_output_dir}}\\foo.txt
- name: compare templated file to known good
- raw: fc.exe {{win_output_dir}}\foo.templated {{win_output_dir}}\foo.txt
+ raw: fc.exe {{win_output_dir}}\\foo.templated {{win_output_dir}}\\foo.txt
register: diff_result
- debug: var=diff_result
diff --git a/test/integration/roles/test_win_user/tasks/main.yml b/test/integration/roles/test_win_user/tasks/main.yml
index 0e22e332ae9..0316afb61b2 100644
--- a/test/integration/roles/test_win_user/tasks/main.yml
+++ b/test/integration/roles/test_win_user/tasks/main.yml
@@ -51,7 +51,7 @@
- "win_user_missing_query_result.state == 'absent'"
- name: test create user
- win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}" groups="Guests"
+ win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}" fullname="Test User" description="Test user account" groups="Guests"
register: win_user_create_result
- name: check user creation result
@@ -59,7 +59,8 @@
that:
- "win_user_create_result|changed"
- "win_user_create_result.name == '{{ test_win_user_name }}'"
- - "win_user_create_result.fullname == '{{ test_win_user_name }}'"
+ - "win_user_create_result.fullname == 'Test User'"
+ - "win_user_create_result.description == 'Test user account'"
- "win_user_create_result.path"
- "win_user_create_result.state == 'present'"
diff --git a/test/integration/test_filters.yml b/test/integration/test_filters.yml
new file mode 100644
index 00000000000..050a303f604
--- /dev/null
+++ b/test/integration/test_filters.yml
@@ -0,0 +1,5 @@
+- hosts: testhost
+ connection: local
+ gather_facts: yes
+ roles:
+ - { role: test_filters }
diff --git a/test/integration/test_force_handlers.yml b/test/integration/test_force_handlers.yml
index a700da08f0b..f7cadbd86d8 100644
--- a/test/integration/test_force_handlers.yml
+++ b/test/integration/test_force_handlers.yml
@@ -7,6 +7,8 @@
connection: local
roles:
- { role: test_force_handlers }
+ tasks:
+ - debug: msg="you should see this with --tags=normal"
- name: test force handlers (set to true)
tags: force_true_in_play
@@ -15,7 +17,7 @@
connection: local
force_handlers: True
roles:
- - { role: test_force_handlers }
+ - { role: test_force_handlers, tags: force_true_in_play }
- name: test force handlers (set to false)
@@ -25,4 +27,4 @@
connection: local
force_handlers: False
roles:
- - { role: test_force_handlers }
+ - { role: test_force_handlers, tags: force_false_in_play }
diff --git a/test/integration/test_group_by.yml b/test/integration/test_group_by.yml
index 0f4ff413879..87d1809e8da 100644
--- a/test/integration/test_group_by.yml
+++ b/test/integration/test_group_by.yml
@@ -16,19 +16,25 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-- hosts: lamini
+- name: Create overall groups
+ hosts: lamini
gather_facts: false
tasks:
+ - debug: var=genus
- name: group by genus
group_by: key={{ genus }}
+
- name: group by first three letters of genus with key in quotes
group_by: key="{{ genus | truncate(3, true, '') }}"
+
- name: group by first two letters of genus with key not in quotes
group_by: key={{ genus | truncate(2, true, '') }}
+
- name: group by genus in uppercase using complex args
group_by: { key: "{{ genus | upper() }}" }
-- hosts: vicugna
+- name: Vicunga group validation
+ hosts: vicugna
gather_facts: false
tasks:
- name: verify that only the alpaca is in this group
@@ -36,7 +42,8 @@
- name: set a fact to check that we ran this play
set_fact: genus_vicugna=true
-- hosts: lama
+- name: Lama group validation
+ hosts: lama
gather_facts: false
tasks:
- name: verify that only the llama is in this group
@@ -44,7 +51,8 @@
- name: set a fact to check that we ran this play
set_fact: genus_lama=true
-- hosts: vic
+- name: Vic group validation
+ hosts: vic
gather_facts: false
tasks:
- name: verify that only the alpaca is in this group
@@ -52,7 +60,8 @@
- name: set a fact to check that we ran this play
set_fact: genus_vic=true
-- hosts: lam
+- name: Lam group validation
+ hosts: lam
gather_facts: false
tasks:
- name: verify that only the llama is in this group
@@ -60,7 +69,8 @@
- name: set a fact to check that we ran this play
set_fact: genus_lam=true
-- hosts: vi
+- name: Vi group validation
+ hosts: vi
gather_facts: false
tasks:
- name: verify that only the alpaca is in this group
@@ -68,7 +78,8 @@
- name: set a fact to check that we ran this play
set_fact: genus_vi=true
-- hosts: la
+- name: La group validation
+ hosts: la
gather_facts: false
tasks:
- name: verify that only the llama is in this group
@@ -76,7 +87,8 @@
- name: set a fact to check that we ran this play
set_fact: genus_la=true
-- hosts: VICUGNA
+- name: VICUGNA group validation
+ hosts: VICUGNA
gather_facts: false
tasks:
- name: verify that only the alpaca is in this group
@@ -84,7 +96,8 @@
- name: set a fact to check that we ran this play
set_fact: genus_VICUGNA=true
-- hosts: LAMA
+- name: LAMA group validation
+ hosts: LAMA
gather_facts: false
tasks:
- name: verify that only the llama is in this group
@@ -92,19 +105,22 @@
- name: set a fact to check that we ran this play
set_fact: genus_LAMA=true
-- hosts: 'genus'
+- name: genus group validation (expect skipped)
+ hosts: 'genus'
gather_facts: false
tasks:
- name: no hosts should match this group
fail: msg="should never get here"
-- hosts: alpaca
+- name: alpaca validation of groups
+ hosts: alpaca
gather_facts: false
tasks:
- name: check that alpaca matched all four groups
assert: { that: ["genus_vicugna", "genus_vic", "genus_vi", "genus_VICUGNA"] }
-- hosts: llama
+- name: llama validation of groups
+ hosts: llama
gather_facts: false
tasks:
- name: check that llama matched all four groups
diff --git a/test/integration/test_handlers.yml b/test/integration/test_handlers.yml
index 6a5366408c7..dafa9ceebe6 100644
--- a/test/integration/test_handlers.yml
+++ b/test/integration/test_handlers.yml
@@ -4,7 +4,7 @@
gather_facts: False
connection: local
roles:
- - { role: test_handlers_meta }
+ - { role: test_handlers_meta, tags: ['scenario1'] }
- name: verify final handler was run
hosts: A
@@ -16,6 +16,7 @@
that:
- "not hostvars[inventory_hostname]['handler1_called']"
- "'handler2_called' in hostvars[inventory_hostname]"
+ tags: ['scenario1']
- name: test handlers
hosts: testgroup
diff --git a/test/integration/test_includes2.yml b/test/integration/test_includes2.yml
index 9e8331ee180..1b15682d70f 100644
--- a/test/integration/test_includes2.yml
+++ b/test/integration/test_includes2.yml
@@ -14,9 +14,9 @@
- { role: test_includes, tags: test_includes }
tasks:
- include: roles/test_includes/tasks/not_a_role_task.yml
+ - include: roles/test_includes/tasks/empty.yml
- assert:
that:
- "ca == 33000"
- "cb == 33001"
- "cc == 33002"
-
diff --git a/test/integration/test_var_precedence.yml b/test/integration/test_var_precedence.yml
index 8bddfff4473..ae4b4cfea16 100644
--- a/test/integration/test_var_precedence.yml
+++ b/test/integration/test_var_precedence.yml
@@ -36,7 +36,7 @@
- hosts: inven_overridehosts
vars_files:
- - "{{ var_dir }}/test_var_precedence.yml"
+ - "test_var_precedence.yml"
roles:
- role: test_var_precedence_inven_override
foo: bar
diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml
index 69d3b652a6f..b249224cb8a 100644
--- a/test/integration/test_winrm.yml
+++ b/test/integration/test_winrm.yml
@@ -18,6 +18,7 @@
- hosts: windows
gather_facts: false
+ max_fail_percentage: 1
roles:
- { role: test_win_raw, tags: test_win_raw }
- { role: test_win_script, tags: test_win_script }
diff --git a/v2/test/__init__.py b/test/units/__init__.py
similarity index 100%
rename from v2/test/__init__.py
rename to test/units/__init__.py
diff --git a/v2/test/errors/__init__.py b/test/units/errors/__init__.py
similarity index 100%
rename from v2/test/errors/__init__.py
rename to test/units/errors/__init__.py
diff --git a/v2/test/errors/test_errors.py b/test/units/errors/test_errors.py
similarity index 100%
rename from v2/test/errors/test_errors.py
rename to test/units/errors/test_errors.py
diff --git a/v2/test/executor/__init__.py b/test/units/executor/__init__.py
similarity index 100%
rename from v2/test/executor/__init__.py
rename to test/units/executor/__init__.py
diff --git a/v2/test/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py
similarity index 59%
rename from v2/test/executor/test_play_iterator.py
rename to test/units/executor/test_play_iterator.py
index 47c0352b25a..3b30f9c11ca 100644
--- a/v2/test/executor/test_play_iterator.py
+++ b/test/units/executor/test_play_iterator.py
@@ -25,8 +25,9 @@ from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook import Playbook
+from ansible.playbook.play_context import PlayContext
-from test.mock.loader import DictDataLoader
+from units.mock.loader import DictDataLoader
class TestPlayIterator(unittest.TestCase):
@@ -67,19 +68,46 @@ class TestPlayIterator(unittest.TestCase):
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
- itr = PlayIterator(inventory, p._entries[0])
- task = itr.get_next_task_for_host(hosts[0])
- print(task)
+ play_context = PlayContext(play=p._entries[0])
+
+ itr = PlayIterator(
+ inventory=inventory,
+ play=p._entries[0],
+ play_context=play_context,
+ all_vars=dict(),
+ )
+
+ # pre task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
- task = itr.get_next_task_for_host(hosts[0])
- print(task)
+ self.assertEqual(task.action, 'meta')
+ # role task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
- task = itr.get_next_task_for_host(hosts[0])
- print(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertIsNotNone(task._role)
+ # regular play task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
- task = itr.get_next_task_for_host(hosts[0])
- print(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertIsNone(task._role)
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
- task = itr.get_next_task_for_host(hosts[0])
- print(task)
+ self.assertEqual(task.action, 'meta')
+ # post task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'meta')
+ # end of iteration
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
+
diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py
new file mode 100644
index 00000000000..0300b7ad07e
--- /dev/null
+++ b/test/units/executor/test_task_executor.py
@@ -0,0 +1,325 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, MagicMock
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.executor.task_executor import TaskExecutor
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins import action_loader, lookup_loader
+
+from units.mock.loader import DictDataLoader
+
+class TestTaskExecutor(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_task_executor_init(self):
+ fake_loader = DictDataLoader({})
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+ mock_play_context = MagicMock()
+ mock_shared_loader = MagicMock()
+ new_stdin = None
+ job_vars = dict()
+ te = TaskExecutor(
+ host = mock_host,
+ task = mock_task,
+ job_vars = job_vars,
+ play_context = mock_play_context,
+ new_stdin = new_stdin,
+ loader = fake_loader,
+ shared_loader_obj = mock_shared_loader,
+ )
+
+ def test_task_executor_run(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task._role._role_path = '/path/to/role/foo'
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = MagicMock()
+
+ new_stdin = None
+ job_vars = dict()
+
+ te = TaskExecutor(
+ host = mock_host,
+ task = mock_task,
+ job_vars = job_vars,
+ play_context = mock_play_context,
+ new_stdin = new_stdin,
+ loader = fake_loader,
+ shared_loader_obj = mock_shared_loader,
+ )
+
+ te._get_loop_items = MagicMock(return_value=None)
+ te._execute = MagicMock(return_value=dict())
+ res = te.run()
+
+ te._get_loop_items = MagicMock(return_value=[])
+ res = te.run()
+
+ te._get_loop_items = MagicMock(return_value=['a','b','c'])
+ te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')])
+ res = te.run()
+
+ te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
+ res = te.run()
+ self.assertIn("failed", res)
+
+ def test_task_executor_get_loop_items(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task.loop = 'items'
+ mock_task.loop_args = ['a', 'b', 'c']
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = MagicMock()
+ mock_shared_loader.lookup_loader = lookup_loader
+
+ new_stdin = None
+ job_vars = dict()
+
+ te = TaskExecutor(
+ host = mock_host,
+ task = mock_task,
+ job_vars = job_vars,
+ play_context = mock_play_context,
+ new_stdin = new_stdin,
+ loader = fake_loader,
+ shared_loader_obj = mock_shared_loader,
+ )
+
+ items = te._get_loop_items()
+ self.assertEqual(items, ['a', 'b', 'c'])
+
+ def test_task_executor_run_loop(self):
+ items = ['a', 'b', 'c']
+
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ def _copy():
+ new_item = MagicMock()
+ return new_item
+
+ mock_task = MagicMock()
+ mock_task.copy.side_effect = _copy
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = MagicMock()
+
+ new_stdin = None
+ job_vars = dict()
+
+ te = TaskExecutor(
+ host = mock_host,
+ task = mock_task,
+ job_vars = job_vars,
+ play_context = mock_play_context,
+ new_stdin = new_stdin,
+ loader = fake_loader,
+ shared_loader_obj = mock_shared_loader,
+ )
+
+ def _execute(variables):
+ return dict(item=variables.get('item'))
+
+ te._squash_items = MagicMock(return_value=items)
+ te._execute = MagicMock(side_effect=_execute)
+
+ res = te._run_loop(items)
+ self.assertEqual(len(res), 3)
+
+ def test_task_executor_squash_items(self):
+ items = ['a', 'b', 'c']
+
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ def _evaluate_conditional(templar, variables):
+ item = variables.get('item')
+ if item == 'b':
+ return False
+ return True
+
+ mock_task = MagicMock()
+ mock_task.evaluate_conditional.side_effect = _evaluate_conditional
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = None
+
+ new_stdin = None
+ job_vars = dict()
+
+ te = TaskExecutor(
+ host = mock_host,
+ task = mock_task,
+ job_vars = job_vars,
+ play_context = mock_play_context,
+ new_stdin = new_stdin,
+ loader = fake_loader,
+ shared_loader_obj = mock_shared_loader,
+ )
+
+ mock_task.action = 'foo'
+ new_items = te._squash_items(items=items, variables=job_vars)
+ self.assertEqual(new_items, ['a', 'b', 'c'])
+
+ mock_task.action = 'yum'
+ new_items = te._squash_items(items=items, variables=job_vars)
+ self.assertEqual(new_items, ['a,c'])
+
+ def test_task_executor_execute(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task.args = dict()
+ mock_task.retries = 0
+ mock_task.delay = -1
+ mock_task.register = 'foo'
+ mock_task.until = None
+ mock_task.changed_when = None
+ mock_task.failed_when = None
+ mock_task.post_validate.return_value = None
+
+ mock_play_context = MagicMock()
+ mock_play_context.post_validate.return_value = None
+ mock_play_context.update_vars.return_value = None
+
+ mock_connection = MagicMock()
+ mock_connection.set_host_overrides.return_value = None
+ mock_connection._connect.return_value = None
+
+ mock_action = MagicMock()
+
+ shared_loader = None
+ new_stdin = None
+ job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
+
+ te = TaskExecutor(
+ host = mock_host,
+ task = mock_task,
+ job_vars = job_vars,
+ play_context = mock_play_context,
+ new_stdin = new_stdin,
+ loader = fake_loader,
+ shared_loader_obj = shared_loader,
+ )
+
+ te._get_connection = MagicMock(return_value=mock_connection)
+ te._get_action_handler = MagicMock(return_value=mock_action)
+
+ mock_action.run.return_value = dict(ansible_facts=dict())
+ res = te._execute()
+
+ mock_task.changed_when = "1 == 1"
+ res = te._execute()
+
+ mock_task.changed_when = None
+ mock_task.failed_when = "1 == 1"
+ res = te._execute()
+
+ mock_task.failed_when = None
+ mock_task.evaluate_conditional.return_value = False
+ res = te._execute()
+
+ mock_task.evaluate_conditional.return_value = True
+ mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar')
+ mock_task.action = 'include'
+ res = te._execute()
+
+ def test_task_executor_poll_async_result(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task.async = 3
+ mock_task.poll = 1
+
+ mock_play_context = MagicMock()
+
+ mock_connection = MagicMock()
+
+ mock_action = MagicMock()
+
+ shared_loader = None
+ new_stdin = None
+ job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
+
+ te = TaskExecutor(
+ host = mock_host,
+ task = mock_task,
+ job_vars = job_vars,
+ play_context = mock_play_context,
+ new_stdin = new_stdin,
+ loader = fake_loader,
+ shared_loader_obj = shared_loader,
+ )
+
+ te._connection = MagicMock()
+
+ def _get(*args, **kwargs):
+ mock_action = MagicMock()
+ mock_action.run.return_value = dict(stdout='')
+ return mock_action
+
+ # testing with some bad values in the result passed to poll async,
+ # and with a bad value returned from the mock action
+ with patch.object(action_loader, 'get', _get):
+ mock_templar = MagicMock()
+ res = te._poll_async_result(result=dict(), templar=mock_templar)
+ self.assertIn('failed', res)
+ res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
+ self.assertIn('failed', res)
+
+ def _get(*args, **kwargs):
+ mock_action = MagicMock()
+ mock_action.run.return_value = dict(finished=1)
+ return mock_action
+
+ # now testing with good values
+ with patch.object(action_loader, 'get', _get):
+ mock_templar = MagicMock()
+ res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
+ self.assertEqual(res, dict(finished=1))
+
diff --git a/v2/ansible/modules/__init__.py b/test/units/mock/__init__.py
similarity index 100%
rename from v2/ansible/modules/__init__.py
rename to test/units/mock/__init__.py
diff --git a/v2/test/mock/loader.py b/test/units/mock/loader.py
similarity index 90%
rename from v2/test/mock/loader.py
rename to test/units/mock/loader.py
index cf9d7ea72d0..f44df2efdbc 100644
--- a/v2/test/mock/loader.py
+++ b/test/units/mock/loader.py
@@ -21,6 +21,7 @@ __metaclass__ = type
import os
+from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
class DictDataLoader(DataLoader):
@@ -38,6 +39,12 @@ class DictDataLoader(DataLoader):
return self.load(self._file_mapping[path], path)
return None
+ def _get_file_contents(self, path):
+ if path in self._file_mapping:
+ return (self._file_mapping[path], False)
+ else:
+ raise AnsibleParserError("file not found: %s" % path)
+
def path_exists(self, path):
return path in self._file_mapping or path in self._known_directories
diff --git a/v2/test/parsing/__init__.py b/test/units/module_utils/__init__.py
similarity index 100%
rename from v2/test/parsing/__init__.py
rename to test/units/module_utils/__init__.py
diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py
new file mode 100644
index 00000000000..e1e3399b930
--- /dev/null
+++ b/test/units/module_utils/test_basic.py
@@ -0,0 +1,756 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division)
+__metaclass__ = type
+
+import __builtin__
+import errno
+
+from nose.tools import timed
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, MagicMock, mock_open, Mock
+
+class TestModuleUtilsBasic(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_module_utils_basic_imports(self):
+ realimport = __builtin__.__import__
+
+ def _mock_import(name, *args, **kwargs):
+ if name == 'json':
+ raise ImportError()
+ realimport(name, *args, **kwargs)
+
+ with patch.object(__builtin__, '__import__', _mock_import, create=True) as m:
+ m('ansible.module_utils.basic')
+ __builtin__.__import__('ansible.module_utils.basic')
+
+ def test_module_utils_basic_get_platform(self):
+ with patch('platform.system', return_value='foo'):
+ from ansible.module_utils.basic import get_platform
+ self.assertEqual(get_platform(), 'foo')
+
+ def test_module_utils_basic_get_distribution(self):
+ from ansible.module_utils.basic import get_distribution
+
+ with patch('platform.system', return_value='Foo'):
+ self.assertEqual(get_distribution(), None)
+
+ with patch('platform.system', return_value='Linux'):
+ with patch('platform.linux_distribution', return_value=("foo", "1", "One")):
+ self.assertEqual(get_distribution(), "Foo")
+
+ with patch('os.path.isfile', return_value=True):
+ def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1):
+ if supported_dists != ():
+ return ("AmazonFooBar", "", "")
+ else:
+ return ("", "", "")
+
+ with patch('platform.linux_distribution', side_effect=_dist):
+ self.assertEqual(get_distribution(), "Amazonfoobar")
+
+ def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1):
+ if supported_dists != ():
+ return ("Bar", "2", "Two")
+ else:
+ return ("", "", "")
+
+ with patch('platform.linux_distribution', side_effect=_dist):
+ self.assertEqual(get_distribution(), "Bar")
+
+ with patch('platform.linux_distribution', side_effect=Exception("boo")):
+ with patch('platform.dist', return_value=("bar", "2", "Two")):
+ self.assertEqual(get_distribution(), "Bar")
+
+ def test_module_utils_basic_get_distribution_version(self):
+ from ansible.module_utils.basic import get_distribution_version
+
+ with patch('platform.system', return_value='Foo'):
+ self.assertEqual(get_distribution_version(), None)
+
+ with patch('platform.system', return_value='Linux'):
+ with patch('platform.linux_distribution', return_value=("foo", "1", "One")):
+ self.assertEqual(get_distribution_version(), "1")
+
+ with patch('os.path.isfile', return_value=True):
+ def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1):
+ if supported_dists != ():
+ return ("AmazonFooBar", "2", "")
+ else:
+ return ("", "", "")
+
+ with patch('platform.linux_distribution', side_effect=_dist):
+ self.assertEqual(get_distribution_version(), "2")
+
+ with patch('platform.linux_distribution', side_effect=Exception("boo")):
+ with patch('platform.dist', return_value=("bar", "3", "Three")):
+ self.assertEqual(get_distribution_version(), "3")
+
+ def test_module_utils_basic_load_platform_subclass(self):
+ class LinuxTest:
+ pass
+
+ class Foo(LinuxTest):
+ platform = "Linux"
+ distribution = None
+
+ class Bar(LinuxTest):
+ platform = "Linux"
+ distribution = "Bar"
+
+ from ansible.module_utils.basic import load_platform_subclass
+
+ # match just the platform class, not a specific distribution
+ with patch('ansible.module_utils.basic.get_platform', return_value="Linux"):
+ with patch('ansible.module_utils.basic.get_distribution', return_value=None):
+ self.assertIs(type(load_platform_subclass(LinuxTest)), Foo)
+
+ # match both the distribution and platform class
+ with patch('ansible.module_utils.basic.get_platform', return_value="Linux"):
+ with patch('ansible.module_utils.basic.get_distribution', return_value="Bar"):
+ self.assertIs(type(load_platform_subclass(LinuxTest)), Bar)
+
+ # if neither match, the fallback should be the top-level class
+ with patch('ansible.module_utils.basic.get_platform', return_value="Foo"):
+ with patch('ansible.module_utils.basic.get_distribution', return_value=None):
+ self.assertIs(type(load_platform_subclass(LinuxTest)), LinuxTest)
+
+ def test_module_utils_basic_json_dict_converters(self):
+ from ansible.module_utils.basic import json_dict_unicode_to_bytes, json_dict_bytes_to_unicode
+
+ test_data = dict(
+ item1 = u"Fóo",
+ item2 = [u"Bár", u"Bam"],
+ item3 = dict(sub1=u"Súb"),
+ item4 = (u"föo", u"bär", u"©"),
+ item5 = 42,
+ )
+ res = json_dict_unicode_to_bytes(test_data)
+ res2 = json_dict_bytes_to_unicode(res)
+
+ self.assertEqual(test_data, res2)
+
+ def test_module_utils_basic_heuristic_log_sanitize(self):
+ from ansible.module_utils.basic import heuristic_log_sanitize
+
+ URL_SECRET = 'http://username:pas:word@foo.com/data'
+ SSH_SECRET = 'username:pas:word@foo.com/data'
+
+ def _gen_data(records, per_rec, top_level, secret_text):
+ hostvars = {'hostvars': {}}
+ for i in range(1, records, 1):
+ host_facts = {'host%s' % i:
+ {'pstack':
+ {'running': '875.1',
+ 'symlinked': '880.0',
+ 'tars': [],
+ 'versions': ['885.0']},
+ }}
+ if per_rec:
+ host_facts['host%s' % i]['secret'] = secret_text
+ hostvars['hostvars'].update(host_facts)
+ if top_level:
+ hostvars['secret'] = secret_text
+ return hostvars
+
+ url_data = repr(_gen_data(3, True, True, URL_SECRET))
+ ssh_data = repr(_gen_data(3, True, True, SSH_SECRET))
+
+ url_output = heuristic_log_sanitize(url_data)
+ ssh_output = heuristic_log_sanitize(ssh_data)
+
+ # Basic functionality: Successfully hid the password
+ try:
+ self.assertNotIn('pas:word', url_output)
+ self.assertNotIn('pas:word', ssh_output)
+
+ # Slightly more advanced, we hid all of the password despite the ":"
+ self.assertNotIn('pas', url_output)
+ self.assertNotIn('pas', ssh_output)
+ except AttributeError:
+ # python2.6 or less's unittest
+ self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output))
+ self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output))
+
+ self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output))
+ self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output))
+
+ # In this implementation we replace the password with 8 "*" which is
+ # also the length of our password. The url fields should be able to
+ # accurately detect where the password ends so the length should be
+ # the same:
+ self.assertEqual(len(url_output), len(url_data))
+
+ # ssh checking is harder as the heuristic is overzealous in many
+ # cases. Since the input will have at least one ":" present before
+ # the password we can tell some things about the beginning and end of
+ # the data, though:
+ self.assertTrue(ssh_output.startswith("{'"))
+ self.assertTrue(ssh_output.endswith("}"))
+ try:
+ self.assertIn(":********@foo.com/data'", ssh_output)
+ except AttributeError:
+ # python2.6 or less's unittest
+ self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output))
+
+ def test_module_utils_basic_get_module_path(self):
+ from ansible.module_utils.basic import get_module_path
+ with patch('os.path.realpath', return_value='/path/to/foo/'):
+ self.assertEqual(get_module_path(), '/path/to/foo')
+
+ def test_module_utils_basic_ansible_module_creation(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ arg_spec = dict(
+ foo = dict(required=True),
+ bar = dict(),
+ bam = dict(),
+ baz = dict(),
+ )
+ mut_ex = (('bar', 'bam'),)
+ req_to = (('bam', 'baz'),)
+
+ # should test ok
+ basic.MODULE_COMPLEX_ARGS = '{"foo":"hello"}'
+ am = basic.AnsibleModule(
+ argument_spec = arg_spec,
+ mutually_exclusive = mut_ex,
+ required_together = req_to,
+ no_log=True,
+ check_invalid_arguments=False,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # FIXME: add asserts here to verify the basic config
+
+ # fail, because a required param was not specified
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ self.assertRaises(
+ SystemExit,
+ basic.AnsibleModule,
+ argument_spec = arg_spec,
+ mutually_exclusive = mut_ex,
+ required_together = req_to,
+ no_log=True,
+ check_invalid_arguments=False,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # fail because of mutually exclusive parameters
+ basic.MODULE_COMPLEX_ARGS = '{"foo":"hello", "bar": "bad", "bam": "bad"}'
+ self.assertRaises(
+ SystemExit,
+ basic.AnsibleModule,
+ argument_spec = arg_spec,
+ mutually_exclusive = mut_ex,
+ required_together = req_to,
+ no_log=True,
+ check_invalid_arguments=False,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # fail because a param required due to another param was not specified
+ basic.MODULE_COMPLEX_ARGS = '{"bam":"bad"}'
+ self.assertRaises(
+ SystemExit,
+ basic.AnsibleModule,
+ argument_spec = arg_spec,
+ mutually_exclusive = mut_ex,
+ required_together = req_to,
+ no_log=True,
+ check_invalid_arguments=False,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ def test_module_utils_basic_ansible_module_load_file_common_arguments(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ am.selinux_mls_enabled = MagicMock()
+ am.selinux_mls_enabled.return_value = True
+ am.selinux_default_context = MagicMock()
+ am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3)
+
+ # with no params, the result should be an empty dict
+ res = am.load_file_common_arguments(params=dict())
+ self.assertEqual(res, dict())
+
+ base_params = dict(
+ path = '/path/to/file',
+ mode = 0600,
+ owner = 'root',
+ group = 'root',
+ seuser = '_default',
+ serole = '_default',
+ setype = '_default',
+ selevel = '_default',
+ )
+
+ extended_params = base_params.copy()
+ extended_params.update(dict(
+ follow = True,
+ foo = 'bar',
+ ))
+
+ final_params = base_params.copy()
+ final_params.update(dict(
+ path = '/path/to/real_file',
+ secontext=['unconfined_u', 'object_r', 'default_t', 's0'],
+ ))
+
+ # with the proper params specified, the returned dictionary should represent
+ # only those params which have something to do with the file arguments, excluding
+ # other params and updated as required with proper values which may have been
+ # massaged by the method
+ with patch('os.path.islink', return_value=True):
+ with patch('os.path.realpath', return_value='/path/to/real_file'):
+ res = am.load_file_common_arguments(params=extended_params)
+ self.assertEqual(res, final_params)
+
+ def test_module_utils_basic_ansible_module_selinux_mls_enabled(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ basic.HAVE_SELINUX = False
+ self.assertEqual(am.selinux_mls_enabled(), False)
+
+ basic.HAVE_SELINUX = True
+ basic.selinux = Mock()
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ with patch('selinux.is_selinux_mls_enabled', return_value=0):
+ self.assertEqual(am.selinux_mls_enabled(), False)
+ with patch('selinux.is_selinux_mls_enabled', return_value=1):
+ self.assertEqual(am.selinux_mls_enabled(), True)
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_selinux_initial_context(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ am.selinux_mls_enabled = MagicMock()
+ am.selinux_mls_enabled.return_value = False
+ self.assertEqual(am.selinux_initial_context(), [None, None, None])
+ am.selinux_mls_enabled.return_value = True
+ self.assertEqual(am.selinux_initial_context(), [None, None, None, None])
+
+ def test_module_utils_basic_ansible_module_selinux_enabled(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ # we first test the cases where the python selinux lib is
+ # not installed, which has two paths: one in which the system
+ # does have selinux installed (and the selinuxenabled command
+ # is present and returns 0 when run), or selinux is not installed
+ basic.HAVE_SELINUX = False
+ am.get_bin_path = MagicMock()
+ am.get_bin_path.return_value = '/path/to/selinuxenabled'
+ am.run_command = MagicMock()
+ am.run_command.return_value=(0, '', '')
+ self.assertRaises(SystemExit, am.selinux_enabled)
+ am.get_bin_path.return_value = None
+ self.assertEqual(am.selinux_enabled(), False)
+
+ # finally we test the case where the python selinux lib is installed,
+ # and both possibilities there (enabled vs. disabled)
+ basic.HAVE_SELINUX = True
+ basic.selinux = Mock()
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ with patch('selinux.is_selinux_enabled', return_value=0):
+ self.assertEqual(am.selinux_enabled(), False)
+ with patch('selinux.is_selinux_enabled', return_value=1):
+ self.assertEqual(am.selinux_enabled(), True)
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_selinux_default_context(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ am.selinux_initial_context = MagicMock(return_value=[None, None, None, None])
+ am.selinux_enabled = MagicMock(return_value=True)
+
+ # we first test the cases where the python selinux lib is not installed
+ basic.HAVE_SELINUX = False
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
+
+ # all following tests assume the python selinux bindings are installed
+ basic.HAVE_SELINUX = True
+
+ basic.selinux = Mock()
+
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ # next, we test with a mocked implementation of selinux.matchpathcon to simulate
+ # an actual context being found
+ with patch('selinux.matchpathcon', return_value=[0, 'unconfined_u:object_r:default_t:s0']):
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0'])
+
+ # we also test the case where matchpathcon returned a failure
+ with patch('selinux.matchpathcon', return_value=[-1, '']):
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
+
+ # finally, we test where an OSError occurred during matchpathcon's call
+ with patch('selinux.matchpathcon', side_effect=OSError):
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
+
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_selinux_context(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ am.selinux_initial_context = MagicMock(return_value=[None, None, None, None])
+ am.selinux_enabled = MagicMock(return_value=True)
+
+ # we first test the cases where the python selinux lib is not installed
+ basic.HAVE_SELINUX = False
+ self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None])
+
+ # all following tests assume the python selinux bindings are installed
+ basic.HAVE_SELINUX = True
+
+ basic.selinux = Mock()
+
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ # next, we test with a mocked implementation of selinux.lgetfilecon_raw to simulate
+ # an actual context being found
+ with patch('selinux.lgetfilecon_raw', return_value=[0, 'unconfined_u:object_r:default_t:s0']):
+ self.assertEqual(am.selinux_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0'])
+
+ # we also test the case where matchpathcon returned a failure
+ with patch('selinux.lgetfilecon_raw', return_value=[-1, '']):
+ self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None])
+
+ # finally, we test where an OSError occurred during matchpathcon's call
+ e = OSError()
+ e.errno = errno.ENOENT
+ with patch('selinux.lgetfilecon_raw', side_effect=e):
+ self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar')
+
+ e = OSError()
+ with patch('selinux.lgetfilecon_raw', side_effect=e):
+ self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar')
+
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_is_special_selinux_path(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ basic.SELINUX_SPECIAL_FS = 'nfs,nfsd,foos'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ def _mock_find_mount_point(path):
+ if path.startswith('/some/path'):
+ return '/some/path'
+ elif path.startswith('/weird/random/fstype'):
+ return '/weird/random/fstype'
+ return '/'
+
+ am.find_mount_point = MagicMock(side_effect=_mock_find_mount_point)
+ am.selinux_context = MagicMock(return_value=['foo_u', 'foo_r', 'foo_t', 's0'])
+
+ m = mock_open()
+ m.side_effect = OSError
+
+ with patch('__builtin__.open', m, create=True):
+ self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (False, None))
+
+ mount_data = [
+ '/dev/disk1 / ext4 rw,seclabel,relatime,data=ordered 0 0\n',
+ '1.1.1.1:/path/to/nfs /some/path nfs ro 0 0\n',
+ 'whatever /weird/random/fstype foos rw 0 0\n',
+ ]
+
+ # mock_open has a broken readlines() implementation apparently...
+ # this should work by default but doesn't, so we fix it
+ m = mock_open(read_data=''.join(mount_data))
+ m.return_value.readlines.return_value = mount_data
+
+ with patch('__builtin__.open', m, create=True):
+ self.assertEqual(am.is_special_selinux_path('/some/random/path'), (False, None))
+ self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (True, ['foo_u', 'foo_r', 'foo_t', 's0']))
+ self.assertEqual(am.is_special_selinux_path('/weird/random/fstype/path'), (True, ['foo_u', 'foo_r', 'foo_t', 's0']))
+
+ def test_module_utils_basic_ansible_module_to_filesystem_str(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ self.assertEqual(am._to_filesystem_str(u'foo'), 'foo')
+ self.assertEqual(am._to_filesystem_str(u'föö'), 'f\xc3\xb6\xc3\xb6')
+
+ def test_module_utils_basic_ansible_module_user_and_group(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ mock_stat = MagicMock()
+ mock_stat.st_uid = 0
+ mock_stat.st_gid = 0
+
+ with patch('os.lstat', return_value=mock_stat):
+ self.assertEqual(am.user_and_group('/path/to/file'), (0, 0))
+
+ def test_module_utils_basic_ansible_module_find_mount_point(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ def _mock_ismount(path):
+ if path == '/':
+ return True
+ return False
+
+ with patch('os.path.ismount', side_effect=_mock_ismount):
+ self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/')
+
+ def _mock_ismount(path):
+ if path == '/subdir/mount':
+ return True
+ return False
+
+ with patch('os.path.ismount', side_effect=_mock_ismount):
+ self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount')
+
+ def test_module_utils_basic_ansible_module_set_context_if_different(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ basic.HAS_SELINUX = False
+
+ am.selinux_enabled = MagicMock(return_value=False)
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True), True)
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), False)
+
+ basic.HAS_SELINUX = True
+
+ am.selinux_enabled = MagicMock(return_value=True)
+ am.selinux_context = MagicMock(return_value=['bar_u', 'bar_r', None, None])
+ am.is_special_selinux_path = MagicMock(return_value=(False, None))
+
+ basic.selinux = Mock()
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ with patch('selinux.lsetfilecon', return_value=0) as m:
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
+ m.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0')
+ m.reset_mock()
+ am.check_mode = True
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
+ self.assertEqual(m.called, False)
+ am.check_mode = False
+
+ with patch('selinux.lsetfilecon', return_value=1) as m:
+ self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
+
+ with patch('selinux.lsetfilecon', side_effect=OSError) as m:
+ self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
+
+ am.is_special_selinux_path = MagicMock(return_value=(True, ['sp_u', 'sp_r', 'sp_t', 's0']))
+
+ with patch('selinux.lsetfilecon', return_value=0) as m:
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
+ m.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0')
+
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_set_owner_if_different(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True)
+ self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False)
+
+ am.user_and_group = MagicMock(return_value=(500, 500))
+
+ with patch('os.lchown', return_value=None) as m:
+ self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
+ m.assert_called_with('/path/to/file', 0, -1)
+
+ def _mock_getpwnam(*args, **kwargs):
+ mock_pw = MagicMock()
+ mock_pw.pw_uid = 0
+ return mock_pw
+
+ m.reset_mock()
+ with patch('pwd.getpwnam', side_effect=_mock_getpwnam):
+ self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True)
+ m.assert_called_with('/path/to/file', 0, -1)
+
+ with patch('pwd.getpwnam', side_effect=KeyError):
+ self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
+
+ m.reset_mock()
+ am.check_mode = True
+ self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
+ self.assertEqual(m.called, False)
+ am.check_mode = False
+
+ with patch('os.lchown', side_effect=OSError) as m:
+ self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
+
+ def test_module_utils_basic_ansible_module_set_group_if_different(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True)
+ self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False)
+
+ am.user_and_group = MagicMock(return_value=(500, 500))
+
+ with patch('os.lchown', return_value=None) as m:
+ self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
+ m.assert_called_with('/path/to/file', -1, 0)
+
+ def _mock_getgrnam(*args, **kwargs):
+ mock_gr = MagicMock()
+ mock_gr.gr_gid = 0
+ return mock_gr
+
+ m.reset_mock()
+ with patch('grp.getgrnam', side_effect=_mock_getgrnam):
+ self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True)
+ m.assert_called_with('/path/to/file', -1, 0)
+
+ with patch('grp.getgrnam', side_effect=KeyError):
+ self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
+
+ m.reset_mock()
+ am.check_mode = True
+ self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
+ self.assertEqual(m.called, False)
+ am.check_mode = False
+
+ with patch('os.lchown', side_effect=OSError) as m:
+ self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
+
+ def test_module_utils_basic_ansible_module_set_mode_if_different(self):
+ from ansible.module_utils import basic
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ am = basic.AnsibleModule(
+ argument_spec = dict(),
+ )
+
+ mock_stat1 = MagicMock()
+ mock_stat1.st_mode = 0444
+ mock_stat2 = MagicMock()
+ mock_stat2.st_mode = 0660
+
+ with patch('os.lstat', side_effect=[mock_stat1]):
+ self.assertEqual(am.set_mode_if_different('/path/to/file', None, True), True)
+ with patch('os.lstat', side_effect=[mock_stat1]):
+ self.assertEqual(am.set_mode_if_different('/path/to/file', None, False), False)
+
+ with patch('os.lstat') as m:
+ with patch('os.lchmod', return_value=None, create=True) as m_os:
+ m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
+ self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True)
+ m_os.assert_called_with('/path/to/file', 0660)
+
+ m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
+ am._symbolic_mode_to_octal = MagicMock(return_value=0660)
+ self.assertEqual(am.set_mode_if_different('/path/to/file', 'o+w,g+w,a-r', False), True)
+ m_os.assert_called_with('/path/to/file', 0660)
+
+ m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
+ am._symbolic_mode_to_octal = MagicMock(side_effect=Exception)
+ self.assertRaises(SystemExit, am.set_mode_if_different, '/path/to/file', 'o+w,g+w,a-r', False)
+
+ m.side_effect = [mock_stat1, mock_stat2, mock_stat2]
+ am.check_mode = True
+ self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True)
+ am.check_mode = False
+
+ # FIXME: this isn't working yet
+ #with patch('os.lstat', side_effect=[mock_stat1, mock_stat2]):
+ # with patch('os.lchmod', return_value=None) as m_os:
+ # del m_os.lchmod
+ # with patch('os.path.islink', return_value=False):
+ # with patch('os.chmod', return_value=None) as m_chmod:
+ # self.assertEqual(am.set_mode_if_different('/path/to/file/no_lchmod', 0660, False), True)
+ # m_chmod.assert_called_with('/path/to/file', 0660)
+ # with patch('os.path.islink', return_value=True):
+ # with patch('os.chmod', return_value=None) as m_chmod:
+ # with patch('os.stat', return_value=mock_stat2):
+ # self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True)
+ # m_chmod.assert_called_with('/path/to/file', 0660)
+
diff --git a/test/units/TestModuleUtilsDatabase.py b/test/units/module_utils/test_database.py
similarity index 100%
rename from test/units/TestModuleUtilsDatabase.py
rename to test/units/module_utils/test_database.py
diff --git a/v2/test/parsing/vault/__init__.py b/test/units/parsing/__init__.py
similarity index 100%
rename from v2/test/parsing/vault/__init__.py
rename to test/units/parsing/__init__.py
diff --git a/v2/test/parsing/test_data_loader.py b/test/units/parsing/test_data_loader.py
similarity index 100%
rename from v2/test/parsing/test_data_loader.py
rename to test/units/parsing/test_data_loader.py
diff --git a/v2/test/parsing/test_mod_args.py b/test/units/parsing/test_mod_args.py
similarity index 100%
rename from v2/test/parsing/test_mod_args.py
rename to test/units/parsing/test_mod_args.py
diff --git a/v2/test/parsing/test_splitter.py b/test/units/parsing/test_splitter.py
similarity index 100%
rename from v2/test/parsing/test_splitter.py
rename to test/units/parsing/test_splitter.py
diff --git a/test/units/parsing/test_unquote.py b/test/units/parsing/test_unquote.py
new file mode 100644
index 00000000000..afb11d4e238
--- /dev/null
+++ b/test/units/parsing/test_unquote.py
@@ -0,0 +1,58 @@
+# coding: utf-8
+# (c) 2015, Toshio Kuratomi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from nose import tools
+from ansible.compat.tests import unittest
+
+from ansible.parsing.splitter import unquote
+
+
+# Tests using nose's test generators cannot use unittest base class.
+# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
+class TestUnquote:
+ UNQUOTE_DATA = (
+ (u'1', u'1'),
+ (u'\'1\'', u'1'),
+ (u'"1"', u'1'),
+ (u'"1 \'2\'"', u'1 \'2\''),
+ (u'\'1 "2"\'', u'1 "2"'),
+ (u'\'1 \'2\'\'', u'1 \'2\''),
+ (u'"1\\"', u'"1\\"'),
+ (u'\'1\\\'', u'\'1\\\''),
+ (u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
+ (u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
+ (u'"', u'"'),
+ (u'\'', u'\''),
+ # Not entirely sure these are good but they match the current
+ # behaviour
+ (u'"1""2"', u'1""2'),
+ (u'\'1\'\'2\'', u'1\'\'2'),
+ (u'"1" 2 "3"', u'1" 2 "3'),
+ (u'"1"\'2\'"3"', u'1"\'2\'"3'),
+ )
+
+ def check_unquote(self, quoted, expected):
+ tools.eq_(unquote(quoted), expected)
+
+ def test_unquote(self):
+ for datapoint in self.UNQUOTE_DATA:
+ yield self.check_unquote, datapoint[0], datapoint[1]
diff --git a/v2/test/playbook/__init__.py b/test/units/parsing/vault/__init__.py
similarity index 100%
rename from v2/test/playbook/__init__.py
rename to test/units/parsing/vault/__init__.py
diff --git a/v2/test/parsing/vault/test_vault.py b/test/units/parsing/vault/test_vault.py
similarity index 100%
rename from v2/test/parsing/vault/test_vault.py
rename to test/units/parsing/vault/test_vault.py
diff --git a/v2/test/parsing/vault/test_vault_editor.py b/test/units/parsing/vault/test_vault_editor.py
similarity index 100%
rename from v2/test/parsing/vault/test_vault_editor.py
rename to test/units/parsing/vault/test_vault_editor.py
diff --git a/lib/ansible/runner/shell_plugins/__init__.py b/test/units/parsing/yaml/__init__.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/__init__.py
rename to test/units/parsing/yaml/__init__.py
diff --git a/v2/test/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py
similarity index 98%
rename from v2/test/parsing/yaml/test_loader.py
rename to test/units/parsing/yaml/test_loader.py
index 37eeabff83b..8fd617eea19 100644
--- a/v2/test/parsing/yaml/test_loader.py
+++ b/test/units/parsing/yaml/test_loader.py
@@ -29,6 +29,11 @@ from ansible.compat.tests.mock import patch
from ansible.parsing.yaml.loader import AnsibleLoader
+try:
+ from _yaml import ParserError
+except ImportError:
+ from yaml.parser import ParserError
+
class TestAnsibleLoaderBasic(unittest.TestCase):
@@ -123,7 +128,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase):
def test_error_conditions(self):
stream = StringIO("""{""")
loader = AnsibleLoader(stream, 'myfile.yml')
- self.assertRaises(loader.get_single_data)
+ self.assertRaises(ParserError, loader.get_single_data)
def test_front_matter(self):
stream = StringIO("""---\nfoo: bar""")
diff --git a/v2/test/plugins/__init__.py b/test/units/playbook/__init__.py
similarity index 100%
rename from v2/test/plugins/__init__.py
rename to test/units/playbook/__init__.py
diff --git a/v2/test/playbook/test_block.py b/test/units/playbook/test_block.py
similarity index 89%
rename from v2/test/playbook/test_block.py
rename to test/units/playbook/test_block.py
index 348681527bb..2c202002267 100644
--- a/v2/test/playbook/test_block.py
+++ b/test/units/playbook/test_block.py
@@ -60,18 +60,18 @@ class TestBlock(unittest.TestCase):
)
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
- assert isinstance(b.block[0], Task)
+ self.assertIsInstance(b.block[0], Task)
self.assertEqual(len(b.rescue), 1)
- assert isinstance(b.rescue[0], Task)
+ self.assertIsInstance(b.rescue[0], Task)
self.assertEqual(len(b.always), 1)
- assert isinstance(b.always[0], Task)
+ self.assertIsInstance(b.always[0], Task)
# not currently used
#self.assertEqual(len(b.otherwise), 1)
- #assert isinstance(b.otherwise[0], Task)
+ #self.assertIsInstance(b.otherwise[0], Task)
def test_load_implicit_block(self):
ds = [dict(action='foo')]
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
- assert isinstance(b.block[0], Task)
+ self.assertIsInstance(b.block[0], Task)
diff --git a/v2/test/playbook/test_play.py b/test/units/playbook/test_play.py
similarity index 91%
rename from v2/test/playbook/test_play.py
rename to test/units/playbook/test_play.py
index 22486f41290..f3fe1e07310 100644
--- a/v2/test/playbook/test_play.py
+++ b/test/units/playbook/test_play.py
@@ -23,11 +23,11 @@ from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.playbook.block import Block
from ansible.playbook.play import Play
from ansible.playbook.role import Role
-from ansible.playbook.task import Task
-from test.mock.loader import DictDataLoader
+from units.mock.loader import DictDataLoader
class TestPlay(unittest.TestCase):
@@ -39,7 +39,7 @@ class TestPlay(unittest.TestCase):
def test_empty_play(self):
p = Play.load(dict())
- self.assertEqual(str(p), "PLAY: ")
+ self.assertEqual(str(p), '')
def test_basic_play(self):
p = Play.load(dict(
@@ -117,7 +117,7 @@ class TestPlay(unittest.TestCase):
roles=['foo'],
), loader=fake_loader)
- tasks = p.compile()
+ blocks = p.compile()
def test_play_compile(self):
p = Play.load(dict(
@@ -127,6 +127,8 @@ class TestPlay(unittest.TestCase):
tasks=[dict(action='shell echo "hello world"')],
))
- tasks = p.compile()
- self.assertEqual(len(tasks), 1)
- self.assertIsInstance(tasks[0], Task)
+ blocks = p.compile()
+
+ # with a single block, there will still be three
+ # implicit meta flush_handler blocks inserted
+ self.assertEqual(len(blocks), 4)
diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py
new file mode 100644
index 00000000000..fe475833cac
--- /dev/null
+++ b/test/units/playbook/test_play_context.py
@@ -0,0 +1,151 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pwd
+import os
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, MagicMock
+
+from ansible import constants as C
+from ansible.cli import CLI
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.playbook.play_context import PlayContext
+
+from units.mock.loader import DictDataLoader
+
+class TestPlayContext(unittest.TestCase):
+
+ def setUp(self):
+ self._parser = CLI.base_parser(
+ runas_opts = True,
+ meta_opts = True,
+ runtask_opts = True,
+ vault_opts = True,
+ async_opts = True,
+ connect_opts = True,
+ subset_opts = True,
+ check_opts = True,
+ diff_opts = True,
+ )
+
+ def tearDown(self):
+ pass
+
+ def test_play_context(self):
+ (options, args) = self._parser.parse_args(['-vv', '--check'])
+ play_context = PlayContext(options=options)
+ self.assertEqual(play_context.connection, 'smart')
+ self.assertEqual(play_context.remote_addr, None)
+ self.assertEqual(play_context.remote_user, pwd.getpwuid(os.geteuid())[0])
+ self.assertEqual(play_context.password, '')
+ self.assertEqual(play_context.port, None)
+ self.assertEqual(play_context.private_key_file, C.DEFAULT_PRIVATE_KEY_FILE)
+ self.assertEqual(play_context.timeout, C.DEFAULT_TIMEOUT)
+ self.assertEqual(play_context.shell, None)
+ self.assertEqual(play_context.verbosity, 2)
+ self.assertEqual(play_context.check_mode, True)
+ self.assertEqual(play_context.no_log, False)
+
+ mock_play = MagicMock()
+ mock_play.connection = 'mock'
+ mock_play.remote_user = 'mock'
+ mock_play.port = 1234
+ mock_play.become = True
+ mock_play.become_method = 'mock'
+ mock_play.become_user = 'mockroot'
+ mock_play.no_log = True
+
+ play_context = PlayContext(play=mock_play, options=options)
+ self.assertEqual(play_context.connection, 'mock')
+ self.assertEqual(play_context.remote_user, 'mock')
+ self.assertEqual(play_context.password, '')
+ self.assertEqual(play_context.port, 1234)
+ self.assertEqual(play_context.no_log, True)
+ self.assertEqual(play_context.become, True)
+ self.assertEqual(play_context.become_method, "mock")
+ self.assertEqual(play_context.become_user, "mockroot")
+
+ mock_task = MagicMock()
+ mock_task.connection = 'mocktask'
+ mock_task.remote_user = 'mocktask'
+ mock_task.become = True
+ mock_task.become_method = 'mocktask'
+ mock_task.become_user = 'mocktaskroot'
+ mock_task.become_pass = 'mocktaskpass'
+ mock_task.no_log = False
+
+ all_vars = dict(
+ ansible_connection = 'mock_inventory',
+ ansible_ssh_port = 4321,
+ )
+
+ play_context = PlayContext(play=mock_play, options=options)
+ play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars)
+ self.assertEqual(play_context.connection, 'mock_inventory')
+ self.assertEqual(play_context.remote_user, 'mocktask')
+ self.assertEqual(play_context.port, 4321)
+ self.assertEqual(play_context.no_log, False)
+ self.assertEqual(play_context.become, True)
+ self.assertEqual(play_context.become_method, "mocktask")
+ self.assertEqual(play_context.become_user, "mocktaskroot")
+ self.assertEqual(play_context.become_pass, "mocktaskpass")
+
+ def test_play_context_make_become_cmd(self):
+ (options, args) = self._parser.parse_args([])
+ play_context = PlayContext(options=options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ sudo_exe = C.DEFAULT_SUDO_EXE
+ sudo_flags = C.DEFAULT_SUDO_FLAGS
+ su_exe = C.DEFAULT_SU_EXE
+ su_flags = C.DEFAULT_SU_FLAGS
+ pbrun_exe = 'pbrun'
+ pbrun_flags = ''
+ pfexec_exe = 'pfexec'
+ pfexec_flags = ''
+
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
+ self.assertEqual(cmd, default_cmd)
+
+ play_context.become = True
+ play_context.become_user = 'foo'
+
+ play_context.become_method = 'sudo'
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
+ self.assertEqual(cmd, """%s -c '%s -k && %s %s -S -p "%s" -u %s %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, sudo_exe, sudo_exe, sudo_flags, play_context.prompt, play_context.become_user, default_exe, play_context.success_key, default_cmd))
+
+ play_context.become_method = 'su'
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
+ self.assertEqual(cmd, """%s -c '%s %s -c "%s -c '"'"'echo %s; %s'"'"'"'""" % (default_exe, su_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd))
+
+ play_context.become_method = 'pbrun'
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
+ self.assertEqual(cmd, """%s -c '%s -b %s -u %s '"'"'echo %s; %s'"'"''""" % (default_exe, pbrun_exe, pbrun_flags, play_context.become_user, play_context.success_key, default_cmd))
+
+ play_context.become_method = 'pfexec'
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
+ self.assertEqual(cmd, """%s -c '%s %s "'"'"'echo %s; %s'"'"'"'""" % (default_exe, pfexec_exe, pfexec_flags, play_context.success_key, default_cmd))
+
+ play_context.become_method = 'bad'
+ self.assertRaises(AnsibleError, play_context.make_become_cmd, cmd=default_cmd, executable="/bin/bash")
+
diff --git a/v2/test/playbook/test_playbook.py b/test/units/playbook/test_playbook.py
similarity index 97%
rename from v2/test/playbook/test_playbook.py
rename to test/units/playbook/test_playbook.py
index dfb52dc7b12..454aa9a540b 100644
--- a/v2/test/playbook/test_playbook.py
+++ b/test/units/playbook/test_playbook.py
@@ -26,7 +26,7 @@ from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars import VariableManager
-from test.mock.loader import DictDataLoader
+from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
@@ -66,4 +66,3 @@ class TestPlaybook(unittest.TestCase):
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
-
diff --git a/v2/test/playbook/test_role.py b/test/units/playbook/test_role.py
similarity index 54%
rename from v2/test/playbook/test_role.py
rename to test/units/playbook/test_role.py
index d0f3708898d..e0764a9b5b0 100644
--- a/v2/test/playbook/test_role.py
+++ b/test/units/playbook/test_role.py
@@ -28,7 +28,7 @@ from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.task import Task
-from test.mock.loader import DictDataLoader
+from units.mock.loader import DictDataLoader
class TestRole(unittest.TestCase):
@@ -41,29 +41,35 @@ class TestRole(unittest.TestCase):
def test_load_role_with_tasks(self):
fake_loader = DictDataLoader({
- "/etc/ansible/roles/foo/tasks/main.yml": """
+ "/etc/ansible/roles/foo_tasks/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
- i = RoleInclude.load('foo', loader=fake_loader)
- r = Role.load(i)
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
- self.assertEqual(str(r), 'foo')
+ i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(str(r), 'foo_tasks')
self.assertEqual(len(r._task_blocks), 1)
assert isinstance(r._task_blocks[0], Block)
def test_load_role_with_handlers(self):
fake_loader = DictDataLoader({
- "/etc/ansible/roles/foo/handlers/main.yml": """
+ "/etc/ansible/roles/foo_handlers/handlers/main.yml": """
- name: test handler
shell: echo 'hello world'
""",
})
- i = RoleInclude.load('foo', loader=fake_loader)
- r = Role.load(i)
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_handlers', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
self.assertEqual(len(r._handler_blocks), 1)
assert isinstance(r._handler_blocks[0], Block)
@@ -71,16 +77,19 @@ class TestRole(unittest.TestCase):
def test_load_role_with_vars(self):
fake_loader = DictDataLoader({
- "/etc/ansible/roles/foo/defaults/main.yml": """
+ "/etc/ansible/roles/foo_vars/defaults/main.yml": """
foo: bar
""",
- "/etc/ansible/roles/foo/vars/main.yml": """
+ "/etc/ansible/roles/foo_vars/vars/main.yml": """
foo: bam
""",
})
- i = RoleInclude.load('foo', loader=fake_loader)
- r = Role.load(i)
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@@ -88,42 +97,45 @@ class TestRole(unittest.TestCase):
def test_load_role_with_metadata(self):
fake_loader = DictDataLoader({
- '/etc/ansible/roles/foo/meta/main.yml': """
+ '/etc/ansible/roles/foo_metadata/meta/main.yml': """
allow_duplicates: true
dependencies:
- - bar
+ - bar_metadata
galaxy_info:
a: 1
b: 2
c: 3
""",
- '/etc/ansible/roles/bar/meta/main.yml': """
+ '/etc/ansible/roles/bar_metadata/meta/main.yml': """
dependencies:
- - baz
+ - baz_metadata
""",
- '/etc/ansible/roles/baz/meta/main.yml': """
+ '/etc/ansible/roles/baz_metadata/meta/main.yml': """
dependencies:
- - bam
+ - bam_metadata
""",
- '/etc/ansible/roles/bam/meta/main.yml': """
+ '/etc/ansible/roles/bam_metadata/meta/main.yml': """
dependencies: []
""",
- '/etc/ansible/roles/bad1/meta/main.yml': """
+ '/etc/ansible/roles/bad1_metadata/meta/main.yml': """
1
""",
- '/etc/ansible/roles/bad2/meta/main.yml': """
+ '/etc/ansible/roles/bad2_metadata/meta/main.yml': """
foo: bar
""",
- '/etc/ansible/roles/recursive1/meta/main.yml': """
- dependencies: ['recursive2']
+ '/etc/ansible/roles/recursive1_metadata/meta/main.yml': """
+ dependencies: ['recursive2_metadata']
""",
- '/etc/ansible/roles/recursive2/meta/main.yml': """
- dependencies: ['recursive1']
+ '/etc/ansible/roles/recursive2_metadata/meta/main.yml': """
+ dependencies: ['recursive1_metadata']
""",
})
- i = RoleInclude.load('foo', loader=fake_loader)
- r = Role.load(i)
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_metadata', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
role_deps = r.get_direct_dependencies()
@@ -136,18 +148,18 @@ class TestRole(unittest.TestCase):
all_deps = r.get_all_dependencies()
self.assertEqual(len(all_deps), 3)
- self.assertEqual(all_deps[0].get_name(), 'bar')
- self.assertEqual(all_deps[1].get_name(), 'baz')
- self.assertEqual(all_deps[2].get_name(), 'bam')
+ self.assertEqual(all_deps[0].get_name(), 'bam_metadata')
+ self.assertEqual(all_deps[1].get_name(), 'baz_metadata')
+ self.assertEqual(all_deps[2].get_name(), 'bar_metadata')
- i = RoleInclude.load('bad1', loader=fake_loader)
- self.assertRaises(AnsibleParserError, Role.load, i)
+ i = RoleInclude.load('bad1_metadata', play=mock_play, loader=fake_loader)
+ self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
- i = RoleInclude.load('bad2', loader=fake_loader)
- self.assertRaises(AnsibleParserError, Role.load, i)
+ i = RoleInclude.load('bad2_metadata', play=mock_play, loader=fake_loader)
+ self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
- i = RoleInclude.load('recursive1', loader=fake_loader)
- self.assertRaises(AnsibleError, Role.load, i)
+ i = RoleInclude.load('recursive1_metadata', play=mock_play, loader=fake_loader)
+ self.assertRaises(AnsibleError, Role.load, i, play=mock_play)
def test_load_role_complex(self):
@@ -155,13 +167,16 @@ class TestRole(unittest.TestCase):
# params and tags/when statements
fake_loader = DictDataLoader({
- "/etc/ansible/roles/foo/tasks/main.yml": """
+ "/etc/ansible/roles/foo_complex/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
- i = RoleInclude.load(dict(role='foo'), loader=fake_loader)
- r = Role.load(i)
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load(dict(role='foo_complex'), play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
- self.assertEqual(r.get_name(), "foo")
+ self.assertEqual(r.get_name(), "foo_complex")
diff --git a/test/units/playbook/test_taggable.py b/test/units/playbook/test_taggable.py
new file mode 100644
index 00000000000..501136741a6
--- /dev/null
+++ b/test/units/playbook/test_taggable.py
@@ -0,0 +1,104 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.compat.tests import unittest
+from ansible.playbook.taggable import Taggable
+from units.mock.loader import DictDataLoader
+
+class TaggableTestObj(Taggable):
+
+ def __init__(self):
+ self._loader = DictDataLoader({})
+ self.tags = []
+
+
+class TestTaggable(unittest.TestCase):
+
+ def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags):
+ taggable_obj = TaggableTestObj()
+ taggable_obj.tags = tags
+
+ evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {})
+
+ self.assertEqual(test_value, evaluate)
+
+ def test_evaluate_tags_tag_in_only_tags(self):
+ self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], [])
+
+ def test_evaluate_tags_tag_in_skip_tags(self):
+ self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1'])
+
+ def test_evaluate_tags_special_always_in_object_tags(self):
+ self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], [])
+
+ def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag'])
+
+ def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always'])
+
+ def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(True, ['tag'], ['tagged'], [])
+
+ def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(False, [], ['tagged'], [])
+
+ def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(False, ['tag'], [], ['tagged'])
+
+ def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(True, [], [], ['tagged'])
+
+ def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(False, ['tag'], ['untagged'], [])
+
+ def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(True, [], ['untagged'], [])
+
+ def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(True, ['tag'], [], ['untagged'])
+
+ def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(False, [], [], ['untagged'])
+
+ def test_evaluate_tags_special_all_in_only_tags(self):
+ self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged'])
+
+ def test_evaluate_tags_special_all_in_skip_tags(self):
+ self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all'])
+
+ def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self):
+ self.assert_evaluate_equal(False, ['tag'], ['all'], ['all'])
+
+ def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self):
+ self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all'])
+
+ def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always'])
+
+ def test_evaluate_tags_accepts_lists(self):
+ self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], [])
+
+ def test_evaluate_tags_accepts_strings(self):
+ self.assert_evaluate_equal(True, 'tag1,tag2', ['tag2'], [])
+
+ def test_evaluate_tags_with_repeated_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag'])
diff --git a/v2/test/playbook/test_task.py b/test/units/playbook/test_task.py
similarity index 100%
rename from v2/test/playbook/test_task.py
rename to test/units/playbook/test_task.py
diff --git a/v2/test/vars/__init__.py b/test/units/plugins/__init__.py
similarity index 100%
rename from v2/test/vars/__init__.py
rename to test/units/plugins/__init__.py
diff --git a/v2/test/mock/__init__.py b/test/units/plugins/action/__init__.py
similarity index 99%
rename from v2/test/mock/__init__.py
rename to test/units/plugins/action/__init__.py
index ae8ccff5952..785fc459921 100644
--- a/v2/test/mock/__init__.py
+++ b/test/units/plugins/action/__init__.py
@@ -18,3 +18,4 @@
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+
diff --git a/v2/ansible/utils/__init__.py b/test/units/plugins/cache/__init__.py
similarity index 99%
rename from v2/ansible/utils/__init__.py
rename to test/units/plugins/cache/__init__.py
index ae8ccff5952..785fc459921 100644
--- a/v2/ansible/utils/__init__.py
+++ b/test/units/plugins/cache/__init__.py
@@ -18,3 +18,4 @@
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+
diff --git a/v2/test/plugins/test_cache.py b/test/units/plugins/cache/test_cache.py
similarity index 100%
rename from v2/test/plugins/test_cache.py
rename to test/units/plugins/cache/test_cache.py
diff --git a/v2/scripts/ansible b/test/units/plugins/callback/__init__.py
similarity index 99%
rename from v2/scripts/ansible
rename to test/units/plugins/callback/__init__.py
index ae8ccff5952..785fc459921 100644
--- a/v2/scripts/ansible
+++ b/test/units/plugins/callback/__init__.py
@@ -18,3 +18,4 @@
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+
diff --git a/v2/ansible/__init__.py b/test/units/plugins/connections/__init__.py
similarity index 97%
rename from v2/ansible/__init__.py
rename to test/units/plugins/connections/__init__.py
index 8637adb54d6..785fc459921 100644
--- a/v2/ansible/__init__.py
+++ b/test/units/plugins/connections/__init__.py
@@ -19,4 +19,3 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__version__ = '2.0'
diff --git a/v2/test/plugins/test_connection.py b/test/units/plugins/connections/test_connection.py
similarity index 87%
rename from v2/test/plugins/test_connection.py
rename to test/units/plugins/connections/test_connection.py
index 0ed888ac95d..aba94b5a61b 100644
--- a/v2/test/plugins/test_connection.py
+++ b/test/units/plugins/connections/test_connection.py
@@ -22,7 +22,7 @@ __metaclass__ = type
from six import StringIO
from ansible.compat.tests import unittest
-from ansible.executor.connection_info import ConnectionInformation
+from ansible.playbook.play_context import PlayContext
from ansible.plugins.connections import ConnectionBase
#from ansible.plugins.connections.accelerate import Connection as AccelerateConnection
@@ -38,7 +38,7 @@ from ansible.plugins.connections.ssh import Connection as SSHConnection
class TestConnectionBaseClass(unittest.TestCase):
def setUp(self):
- self.conn_info = ConnectionInformation()
+ self.play_context = PlayContext()
self.in_stream = StringIO()
def tearDown(self):
@@ -72,7 +72,7 @@ class TestConnectionBaseClass(unittest.TestCase):
pass
def close(self):
pass
- self.assertIsInstance(ConnectionModule3(self.conn_info, self.in_stream), ConnectionModule3)
+ self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
# def test_accelerate_connection_module(self):
# self.assertIsInstance(AccelerateConnection(), AccelerateConnection)
@@ -90,13 +90,13 @@ class TestConnectionBaseClass(unittest.TestCase):
# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection)
def test_local_connection_module(self):
- self.assertIsInstance(LocalConnection(self.conn_info, self.in_stream), LocalConnection)
+ self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection)
def test_paramiko_connection_module(self):
- self.assertIsInstance(ParamikoConnection(self.conn_info, self.in_stream), ParamikoConnection)
+ self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection)
def test_ssh_connection_module(self):
- self.assertIsInstance(SSHConnection(self.conn_info, self.in_stream), SSHConnection)
+ self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection)
# def test_winrm_connection_module(self):
# self.assertIsInstance(WinRmConnection(), WinRmConnection)
diff --git a/test/units/plugins/filter/__init__.py b/test/units/plugins/filter/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/plugins/filter/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/plugins/inventory/__init__.py b/test/units/plugins/inventory/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/plugins/inventory/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/plugins/lookup/__init__.py b/test/units/plugins/lookup/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/plugins/lookup/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/plugins/shell/__init__.py b/test/units/plugins/shell/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/plugins/shell/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/plugins/strategies/__init__.py b/test/units/plugins/strategies/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/plugins/strategies/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py
new file mode 100644
index 00000000000..eb85b9c771d
--- /dev/null
+++ b/test/units/plugins/strategies/test_strategy_base.py
@@ -0,0 +1,362 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, MagicMock
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.plugins.strategies import StrategyBase
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.executor.task_result import TaskResult
+
+from six.moves import queue as Queue
+from units.mock.loader import DictDataLoader
+
+class TestStrategyBase(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_strategy_base_init(self):
+ mock_tqm = MagicMock(TaskQueueManager)
+ mock_tqm._final_q = MagicMock()
+ mock_tqm._options = MagicMock()
+ strategy_base = StrategyBase(tqm=mock_tqm)
+
+ def test_strategy_base_run(self):
+ mock_tqm = MagicMock(TaskQueueManager)
+ mock_tqm._final_q = MagicMock()
+ mock_tqm._stats = MagicMock()
+ mock_tqm.send_callback.return_value = None
+
+ mock_iterator = MagicMock()
+ mock_iterator._play = MagicMock()
+ mock_iterator._play.handlers = []
+
+ mock_play_context = MagicMock()
+
+ mock_tqm._failed_hosts = dict()
+ mock_tqm._unreachable_hosts = dict()
+ mock_tqm._options = MagicMock()
+ strategy_base = StrategyBase(tqm=mock_tqm)
+
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context), 0)
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), 1)
+ mock_tqm._failed_hosts = dict(host1=True)
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), 2)
+ mock_tqm._unreachable_hosts = dict(host1=True)
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), 3)
+
+ def test_strategy_base_get_hosts(self):
+ mock_hosts = []
+ for i in range(0, 5):
+ mock_host = MagicMock()
+ mock_host.name = "host%02d" % (i+1)
+ mock_hosts.append(mock_host)
+
+ mock_inventory = MagicMock()
+ mock_inventory.get_hosts.return_value = mock_hosts
+
+ mock_tqm = MagicMock()
+ mock_tqm._final_q = MagicMock()
+ mock_tqm.get_inventory.return_value = mock_inventory
+
+ mock_play = MagicMock()
+ mock_play.hosts = ["host%02d" % (i+1) for i in range(0, 5)]
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+
+ mock_tqm._failed_hosts = []
+ mock_tqm._unreachable_hosts = []
+ self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts)
+
+ mock_tqm._failed_hosts = ["host01"]
+ self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[1:])
+ self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0]])
+
+ mock_tqm._unreachable_hosts = ["host02"]
+ self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
+
+ def test_strategy_base_queue_task(self):
+ fake_loader = DictDataLoader()
+
+ workers = []
+ for i in range(0, 3):
+ worker_main_q = MagicMock()
+ worker_main_q.put.return_value = None
+ worker_result_q = MagicMock()
+ workers.append([i, worker_main_q, worker_result_q])
+
+ mock_tqm = MagicMock()
+ mock_tqm._final_q = MagicMock()
+ mock_tqm.get_workers.return_value = workers
+ mock_tqm.get_loader.return_value = fake_loader
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base._cur_worker = 0
+ strategy_base._pending_results = 0
+ strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 1)
+ self.assertEqual(strategy_base._pending_results, 1)
+ strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 2)
+ self.assertEqual(strategy_base._pending_results, 2)
+ strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 0)
+ self.assertEqual(strategy_base._pending_results, 3)
+ workers[0][1].put.side_effect = EOFError
+ strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 1)
+ self.assertEqual(strategy_base._pending_results, 3)
+
+ def test_strategy_base_process_pending_results(self):
+ mock_tqm = MagicMock()
+ mock_tqm._terminated = False
+ mock_tqm._failed_hosts = dict()
+ mock_tqm._unreachable_hosts = dict()
+ mock_tqm.send_callback.return_value = None
+
+ queue_items = []
+ def _queue_empty(*args, **kwargs):
+ return len(queue_items) == 0
+ def _queue_get(*args, **kwargs):
+ if len(queue_items) == 0:
+ raise Queue.Empty
+ else:
+ return queue_items.pop()
+
+ mock_queue = MagicMock()
+ mock_queue.empty.side_effect = _queue_empty
+ mock_queue.get.side_effect = _queue_get
+ mock_tqm._final_q = mock_queue
+
+ mock_tqm._stats = MagicMock()
+ mock_tqm._stats.increment.return_value = None
+
+ mock_iterator = MagicMock()
+ mock_iterator.mark_host_failed.return_value = None
+
+ mock_host = MagicMock()
+ mock_host.name = 'test01'
+ mock_host.vars = dict()
+
+ mock_task = MagicMock()
+ mock_task._role = None
+ mock_task.ignore_errors = False
+
+ mock_group = MagicMock()
+ mock_group.add_host.return_value = None
+
+ def _get_host(host_name):
+ if host_name == 'test01':
+ return mock_host
+ return None
+ def _get_group(group_name):
+ if group_name in ('all', 'foo'):
+ return mock_group
+ return None
+
+ mock_inventory = MagicMock()
+ mock_inventory._hosts_cache = dict()
+ mock_inventory.get_host.side_effect = _get_host
+ mock_inventory.get_group.side_effect = _get_group
+ mock_inventory.clear_pattern_cache.return_value = None
+
+ mock_var_mgr = MagicMock()
+ mock_var_mgr.set_host_variable.return_value = None
+ mock_var_mgr.set_host_facts.return_value = None
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base._inventory = mock_inventory
+ strategy_base._variable_manager = mock_var_mgr
+ strategy_base._blocked_hosts = dict()
+ strategy_base._notified_handlers = dict()
+
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 0)
+
+ task_result = TaskResult(host=mock_host, task=mock_task, return_data=dict(changed=True))
+ queue_items.append(('host_task_ok', task_result))
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+
+ task_result = TaskResult(host=mock_host, task=mock_task, return_data='{"failed":true}')
+ queue_items.append(('host_task_failed', task_result))
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ results = strategy_base._process_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+ self.assertIn('test01', mock_tqm._failed_hosts)
+ del mock_tqm._failed_hosts['test01']
+
+ task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}')
+ queue_items.append(('host_unreachable', task_result))
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+ self.assertIn('test01', mock_tqm._unreachable_hosts)
+ del mock_tqm._unreachable_hosts['test01']
+
+ task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}')
+ queue_items.append(('host_task_skipped', task_result))
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+
+ queue_items.append(('add_host', dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
+ results = strategy_base._process_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 0)
+ self.assertEqual(strategy_base._pending_results, 1)
+ self.assertIn('test01', strategy_base._blocked_hosts)
+
+ queue_items.append(('add_group', mock_host, dict(add_group=dict(group_name='foo'))))
+ results = strategy_base._process_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 0)
+ self.assertEqual(strategy_base._pending_results, 1)
+ self.assertIn('test01', strategy_base._blocked_hosts)
+
+ task_result = TaskResult(host=mock_host, task=mock_task, return_data=dict(changed=True))
+ queue_items.append(('notify_handler', task_result, 'test handler'))
+ results = strategy_base._process_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 0)
+ self.assertEqual(strategy_base._pending_results, 1)
+ self.assertIn('test01', strategy_base._blocked_hosts)
+ self.assertIn('test handler', strategy_base._notified_handlers)
+ self.assertIn(mock_host, strategy_base._notified_handlers['test handler'])
+
+ queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
+ results = strategy_base._process_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 0)
+ self.assertEqual(strategy_base._pending_results, 1)
+
+ queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
+ results = strategy_base._process_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 0)
+ self.assertEqual(strategy_base._pending_results, 1)
+
+ queue_items.append(('bad'))
+ self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
+
+ def test_strategy_base_load_included_file(self):
+ fake_loader = DictDataLoader({
+ "test.yml": """
+ - debug: msg='foo'
+ """,
+ "bad.yml": """
+ """,
+ })
+
+ mock_tqm = MagicMock()
+ mock_tqm._final_q = MagicMock()
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base._loader = fake_loader
+
+ mock_play = MagicMock()
+
+ mock_block = MagicMock()
+ mock_block._play = mock_play
+ mock_block.vars = dict()
+
+ mock_task = MagicMock()
+ mock_task._block = mock_block
+ mock_task._role = None
+
+ mock_iterator = MagicMock()
+ mock_iterator.mark_host_failed.return_value = None
+
+ mock_inc_file = MagicMock()
+ mock_inc_file._task = mock_task
+
+ mock_inc_file._filename = "test.yml"
+ res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
+
+ mock_inc_file._filename = "bad.yml"
+ res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
+ self.assertEqual(res, [])
+
+ def test_strategy_base_run_handlers(self):
+ workers = []
+ for i in range(0, 3):
+ worker_main_q = MagicMock()
+ worker_main_q.put.return_value = None
+ worker_result_q = MagicMock()
+ workers.append([i, worker_main_q, worker_result_q])
+
+ mock_tqm = MagicMock()
+ mock_tqm._final_q = MagicMock()
+ mock_tqm.get_workers.return_value = workers
+ mock_tqm.send_callback.return_value = None
+
+ mock_play_context = MagicMock()
+
+ mock_handler_task = MagicMock()
+ mock_handler_task.get_name.return_value = "test handler"
+ mock_handler_task.has_triggered.return_value = False
+
+ mock_handler = MagicMock()
+ mock_handler.block = [mock_handler_task]
+ mock_handler.flag_for_host.return_value = False
+
+ mock_play = MagicMock()
+ mock_play.handlers = [mock_handler]
+
+ mock_host = MagicMock()
+ mock_host.name = "test01"
+
+ mock_iterator = MagicMock()
+
+ mock_inventory = MagicMock()
+ mock_inventory.get_hosts.return_value = [mock_host]
+
+ mock_var_mgr = MagicMock()
+ mock_var_mgr.get_vars.return_value = dict()
+
+ mock_iterator = MagicMock
+ mock_iterator._play = mock_play
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base._inventory = mock_inventory
+ strategy_base._notified_handlers = {"test handler": [mock_host]}
+
+ result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
diff --git a/v2/test/plugins/test_plugins.py b/test/units/plugins/test_plugins.py
similarity index 100%
rename from v2/test/plugins/test_plugins.py
rename to test/units/plugins/test_plugins.py
diff --git a/test/units/plugins/vars/__init__.py b/test/units/plugins/vars/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/plugins/vars/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/template/__init__.py b/test/units/template/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/template/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/template/test_safe_eval.py b/test/units/template/test_safe_eval.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/template/test_safe_eval.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py
new file mode 100644
index 00000000000..6d2301fb9f9
--- /dev/null
+++ b/test/units/template/test_templar.py
@@ -0,0 +1,103 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, MagicMock
+
+from ansible import constants as C
+from ansible.errors import *
+from ansible.plugins import filter_loader, lookup_loader, module_loader
+from ansible.plugins.strategies import SharedPluginLoaderObj
+from ansible.template import Templar
+
+from units.mock.loader import DictDataLoader
+
+class TestTemplar(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_templar_simple(self):
+ fake_loader = DictDataLoader({
+ "/path/to/my_file.txt": "foo\n",
+ })
+ shared_loader = SharedPluginLoaderObj()
+ variables = dict(
+ foo="bar",
+ bam="{{foo}}",
+ num=1,
+ var_true=True,
+ var_false=False,
+ var_dict=dict(a="b"),
+ bad_dict="{a='b'",
+ var_list=[1],
+ recursive="{{recursive}}",
+ )
+ templar = Templar(loader=fake_loader, variables=variables)
+
+ # test some basic templating
+ self.assertEqual(templar.template("{{foo}}"), "bar")
+ self.assertEqual(templar.template("{{foo}}\n"), "bar")
+ self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
+ self.assertEqual(templar.template("foo", convert_bare=True), "bar")
+ self.assertEqual(templar.template("{{bam}}"), "bar")
+ self.assertEqual(templar.template("{{num}}"), 1)
+ self.assertEqual(templar.template("{{var_true}}"), True)
+ self.assertEqual(templar.template("{{var_false}}"), False)
+ self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
+ self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
+ self.assertEqual(templar.template("{{var_list}}"), [1])
+ self.assertEqual(templar.template(1, convert_bare=True), 1)
+ #FIXME: lookup ignores fake file and returns error
+ #self.assertEqual(templar.template("{{lookup('file', '/path/to/my_file.txt')}}"), "foo")
+
+ # force errors
+ self.assertRaises(UndefinedError, templar.template, "{{bad_var}}")
+ self.assertRaises(UndefinedError, templar.template, "{{lookup('file', bad_var)}}")
+ self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
+ self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
+ self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
+
+ # test with fail_on_undefined=False
+ self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
+
+ # test set_available_variables()
+ templar.set_available_variables(variables=dict(foo="bam"))
+ self.assertEqual(templar.template("{{foo}}"), "bam")
+ # variables must be a dict() for set_available_variables()
+ self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
+
+ def test_template_jinja2_extensions(self):
+ fake_loader = DictDataLoader({})
+ templar = Templar(loader=fake_loader)
+
+ old_exts = C.DEFAULT_JINJA2_EXTENSIONS
+ try:
+ C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
+ self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
+ finally:
+ C.DEFAULT_JINJA2_EXTENSIONS = old_exts
+
diff --git a/test/units/template/test_vars.py b/test/units/template/test_vars.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/template/test_vars.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/test/units/vars/__init__.py b/test/units/vars/__init__.py
new file mode 100644
index 00000000000..785fc459921
--- /dev/null
+++ b/test/units/vars/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/v2/test/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py
similarity index 76%
rename from v2/test/vars/test_variable_manager.py
rename to test/units/vars/test_variable_manager.py
index f8d815eb6f7..9d500d04d8f 100644
--- a/v2/test/vars/test_variable_manager.py
+++ b/test/units/vars/test_variable_manager.py
@@ -24,7 +24,7 @@ from ansible.compat.tests.mock import patch, MagicMock
from ansible.vars import VariableManager
-from test.mock.loader import DictDataLoader
+from units.mock.loader import DictDataLoader
class TestVariableManager(unittest.TestCase):
@@ -38,7 +38,15 @@ class TestVariableManager(unittest.TestCase):
fake_loader = DictDataLoader({})
v = VariableManager()
- self.assertEqual(v.get_vars(loader=fake_loader), dict())
+ vars = v.get_vars(loader=fake_loader, use_cache=False)
+ if 'omit' in vars:
+ del vars['omit']
+ if 'vars' in vars:
+ del vars['vars']
+ if 'ansible_version' in vars:
+ del vars['ansible_version']
+
+ self.assertEqual(vars, dict(playbook_dir='.'))
self.assertEqual(
v._merge_dicts(
@@ -59,11 +67,14 @@ class TestVariableManager(unittest.TestCase):
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
- v.set_extra_vars(extra_vars)
+ v.extra_vars = extra_vars
+
+ vars = v.get_vars(loader=fake_loader, use_cache=False)
for (key, val) in extra_vars.iteritems():
- self.assertEqual(v.get_vars(loader=fake_loader).get(key), val)
- self.assertIsNot(v.extra_vars.get(key), val)
+ self.assertEqual(vars.get(key), val)
+
+ self.assertIsNot(v.extra_vars, extra_vars)
def test_variable_manager_host_vars_file(self):
fake_loader = DictDataLoader({
@@ -82,30 +93,38 @@ class TestVariableManager(unittest.TestCase):
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
- self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar")
+ self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bar")
def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
- "group_vars/somegroup.yml": """
+ "group_vars/all.yml": """
foo: bar
+ """,
+ "group_vars/somegroup.yml": """
+ bam: baz
"""
})
v = VariableManager()
+ v.add_group_vars_file("group_vars/all.yml", loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
- self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar"))
+ self.assertEqual(v._group_vars_files["all"], dict(foo="bar"))
+ self.assertEqual(v._group_vars_files["somegroup"], dict(bam="baz"))
mock_group = MagicMock()
- mock_group.name.return_value = "somegroup"
+ mock_group.name = "somegroup"
mock_group.get_ancestors.return_value = ()
+ mock_group.get_vars.return_value = dict()
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
- mock_host.get_groups.return_value = (mock_group)
+ mock_host.get_groups.return_value = (mock_group,)
- self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar")
+ vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)
+ self.assertEqual(vars.get("foo"), "bar")
+ self.assertEqual(vars.get("bam"), "baz")
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
@@ -116,7 +135,7 @@ class TestVariableManager(unittest.TestCase):
mock_play.get_vars_files.return_value = []
v = VariableManager()
- self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar")
+ self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
@@ -131,14 +150,15 @@ class TestVariableManager(unittest.TestCase):
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
v = VariableManager()
- self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar")
+ self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
fake_loader = DictDataLoader({})
mock_task = MagicMock()
+ mock_task._role = None
mock_task.get_vars.return_value = dict(foo="bar")
v = VariableManager()
- self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task).get("foo"), "bar")
+ self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
diff --git a/ticket_stubs/needs_template.md b/ticket_stubs/needs_template.md
new file mode 100644
index 00000000000..894532b5e77
--- /dev/null
+++ b/ticket_stubs/needs_template.md
@@ -0,0 +1,36 @@
+Can You Help Us Out?
+====================
+
+Thanks for filing a ticket! I am the friendly GitHub Ansibot.
+
+It looks like you might not have filled out the issue description based on our standard issue template. You might not have known about that, and that's ok too, we'll tell you how to do it.
+
+We have a standard template because Ansible is a really busy project and it helps to have some standard information in each ticket, and GitHub doesn't yet provide a standard facility to do this like some other bug trackers. We hope you understand as this is really valuable to us!.
+
+Solving this is simple: please copy the contents of this [template](https://raw.githubusercontent.com/ansible/ansible/devel/ISSUE_TEMPLATE.md) and **paste it into the description** of your ticket. That's it!
+
+If You Had A Question To Ask Instead
+====================================
+
+If you happened to have a "how do I do this in Ansible" type of question, that's probably more of a user-list question than a bug report, and you should probably ask this question on the project mailing list instead.
+
+However, if you think you have a bug, the report is the way to go! We definitely want all the bugs filed :) Just trying to help!
+
+About Priority Tags
+===================
+
+Since you're here, we'll also share some useful information at this time.
+
+In general tickets will be assigned a priority between P1 (highest) and P5, and then worked in priority order. We may also have some follow up questions along the way, so keeping up with follow up comments via GitHub notifications is a good idea.
+
+Due to large interest in Ansible, humans may not comment on your ticket immediately.
+
+Mailing Lists
+=============
+
+If you have concerns or questions, you're welcome to stop by the ansible-project or ansible-development mailing lists, as appropriate. Here are the links:
+
+ * https://groups.google.com/forum/#!forum/ansible-project - for discussion of bugs and how-to type questions
+ * https://groups.google.com/forum/#!forum/ansible-devel - for discussion on how to implement a code change, or feature brainstorming among developers
+
+Thanks again for the interest in Ansible!
diff --git a/tox.ini b/tox.ini
index 5440a5825c9..ad3d37b5219 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,23 +1,32 @@
[tox]
-envlist = {py26,py27}-v{1}
+envlist = {py26,py27}
[testenv]
commands = make tests
deps = -r{toxinidir}/test-requirements.txt
whitelist_externals = make
-[testenv:py26-v1]
-
-[testenv:py27-v1]
-
-[testenv:py26-v2]
-deps = -r{toxinidir}/v2/test-requirements.txt
-commands = make newtests
+[testenv:py26]
+commands =
+ python --version
+ python -m compileall -fq -x 'test|samples|contrib/inventory/vagrant.py' .
+ make tests
+deps = -r{toxinidir}/test-requirements.txt
+whitelist_externals =
+ make
-[testenv:py27-v2]
-deps = -r{toxinidir}/v2/test-requirements.txt
-commands = make newtests
+[testenv:py27]
+commands =
+ python --version
+ python -m compileall -fq -x 'test|samples' .
+ make tests
+deps = -r{toxinidir}/test-requirements.txt
+whitelist_externals = make
-[testenv:py34-v2]
-deps = -r{toxinidir}/v2/test-requirements.txt
-commands = make newtests
+[testenv:py34]
+commands =
+ python --version
+ python -m compileall -fq -x 'lib/ansible/module_utils' lib
+ make tests
+deps = -r{toxinidir}/test-requirements.txt
+whitelist_externals = make
diff --git a/v1/README.md b/v1/README.md
new file mode 100644
index 00000000000..98ae99854d8
--- /dev/null
+++ b/v1/README.md
@@ -0,0 +1,10 @@
+This is dead code, it is here for convenience for those testing current devel so as to ascertain if a bug was introduced in the v2 rewrite or was preexisting in the 1.x codebase.
+Using this code should be equivalent of checking out the v1_last tag, which was devel at a point between 1.9.1 and 1.9.2 releases.
+The stable-1.9 is the maintenance branch for the 1.9.x code, which might continue to diverge from the v1/ tree as bugs get fixed.
+
+DO NOT:
+ * use this code as reference
+ * make PRs against this code
+ * expect this code to be shipped with the 2.0 version of ansible
+
+
diff --git a/v1/ansible/__init__.py b/v1/ansible/__init__.py
new file mode 100644
index 00000000000..ba5ca83b723
--- /dev/null
+++ b/v1/ansible/__init__.py
@@ -0,0 +1,18 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+__version__ = '2.0.0'
+__author__ = 'Michael DeHaan'
diff --git a/lib/ansible/cache/__init__.py b/v1/ansible/cache/__init__.py
similarity index 100%
rename from lib/ansible/cache/__init__.py
rename to v1/ansible/cache/__init__.py
diff --git a/lib/ansible/cache/base.py b/v1/ansible/cache/base.py
similarity index 100%
rename from lib/ansible/cache/base.py
rename to v1/ansible/cache/base.py
diff --git a/lib/ansible/cache/jsonfile.py b/v1/ansible/cache/jsonfile.py
similarity index 100%
rename from lib/ansible/cache/jsonfile.py
rename to v1/ansible/cache/jsonfile.py
diff --git a/lib/ansible/cache/memcached.py b/v1/ansible/cache/memcached.py
similarity index 100%
rename from lib/ansible/cache/memcached.py
rename to v1/ansible/cache/memcached.py
diff --git a/lib/ansible/cache/memory.py b/v1/ansible/cache/memory.py
similarity index 100%
rename from lib/ansible/cache/memory.py
rename to v1/ansible/cache/memory.py
diff --git a/lib/ansible/cache/redis.py b/v1/ansible/cache/redis.py
similarity index 100%
rename from lib/ansible/cache/redis.py
rename to v1/ansible/cache/redis.py
diff --git a/v2/ansible/inventory/vars_plugins/__init__.py b/v1/ansible/callback_plugins/__init__.py
similarity index 100%
rename from v2/ansible/inventory/vars_plugins/__init__.py
rename to v1/ansible/callback_plugins/__init__.py
diff --git a/lib/ansible/callback_plugins/noop.py b/v1/ansible/callback_plugins/noop.py
similarity index 100%
rename from lib/ansible/callback_plugins/noop.py
rename to v1/ansible/callback_plugins/noop.py
diff --git a/lib/ansible/callbacks.py b/v1/ansible/callbacks.py
similarity index 100%
rename from lib/ansible/callbacks.py
rename to v1/ansible/callbacks.py
diff --git a/lib/ansible/color.py b/v1/ansible/color.py
similarity index 100%
rename from lib/ansible/color.py
rename to v1/ansible/color.py
diff --git a/v2/ansible/constants.py b/v1/ansible/constants.py
similarity index 89%
rename from v2/ansible/constants.py
rename to v1/ansible/constants.py
index 245972b1a56..2cdc08d8ce8 100644
--- a/v2/ansible/constants.py
+++ b/v1/ansible/constants.py
@@ -15,15 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import os
import pwd
import sys
-
-from six.moves import configparser
+import ConfigParser
from string import ascii_letters, digits
# copied from utils, avoid circular reference fun :)
@@ -40,15 +35,13 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
- value = mk_boolean(value)
- if value:
- if integer:
- value = int(value)
- elif floating:
- value = float(value)
- elif islist:
- if isinstance(value, basestring):
- value = [x.strip() for x in value.split(',')]
+ return mk_boolean(value)
+ if value and integer:
+ return int(value)
+ if value and floating:
+ return float(value)
+ if value and islist:
+ return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
@@ -67,7 +60,7 @@ def _get_config(p, section, key, env_var, default):
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
- p = configparser.ConfigParser()
+ p = ConfigParser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
@@ -80,8 +73,8 @@ def load_config_file():
if path is not None and os.path.exists(path):
try:
p.read(path)
- except configparser.Error as e:
- print("Error reading config file: \n{0}".format(e))
+ except ConfigParser.Error as e:
+ print "Error reading config file: \n%s" % e
sys.exit(1)
return p
return None
@@ -105,8 +98,7 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
DEFAULTS='defaults'
# configurable things
-DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
+DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
@@ -120,7 +112,6 @@ DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
@@ -131,6 +122,7 @@ DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None,
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
+DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
@@ -142,14 +134,17 @@ DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAG
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
-DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
+DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
+
+# selinux
+DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True)
#TODO: get rid of ternary chain mess
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# need to rethink impementing these 2
DEFAULT_BECOME_EXE = None
@@ -164,7 +159,6 @@ DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', '
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
-DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
@@ -182,6 +176,9 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
+DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
+
+
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
@@ -202,16 +199,10 @@ ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_fi
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
-# galaxy related
-DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
-# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
-GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True)
-
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
-MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
diff --git a/lib/ansible/errors.py b/v1/ansible/errors.py
similarity index 100%
rename from lib/ansible/errors.py
rename to v1/ansible/errors.py
diff --git a/v2/ansible/inventory/__init__.py b/v1/ansible/inventory/__init__.py
similarity index 88%
rename from v2/ansible/inventory/__init__.py
rename to v1/ansible/inventory/__init__.py
index 063398f17f9..f012246e227 100644
--- a/v2/ansible/inventory/__init__.py
+++ b/v1/ansible/inventory/__init__.py
@@ -16,44 +16,36 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import fnmatch
import os
import sys
import re
-import stat
import subprocess
-from ansible import constants as C
-from ansible.errors import *
-
+import ansible.constants as C
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group
from ansible.inventory.host import Host
-from ansible.plugins import vars_loader
-from ansible.utils.path import is_executable
-from ansible.utils.vars import combine_vars
+from ansible import errors
+from ansible import utils
class Inventory(object):
"""
Host inventory for ansible.
"""
- #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
- # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
+ __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
+ 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
+ '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
- def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
+ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
- self._loader = loader
- self._variable_manager = variable_manager
+ self._vault_password=vault_password
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
@@ -61,7 +53,7 @@ class Inventory(object):
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
- self._groups_list = {}
+ self._groups_list = {}
self._pattern_cache = {}
# to be set by calling set_playbook_basedir by playbook code
@@ -105,7 +97,7 @@ class Inventory(object):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
+ self.parser = InventoryDirectory(filename=host_list)
self.groups = self.parser.groups.values()
else:
# check to see if the specified file starts with a
@@ -121,9 +113,9 @@ class Inventory(object):
except:
pass
- if is_executable(host_list):
+ if utils.is_executable(host_list):
try:
- self.parser = InventoryScript(loader=self._loader, filename=host_list)
+ self.parser = InventoryScript(filename=host_list)
self.groups = self.parser.groups.values()
except:
if not shebang_present:
@@ -142,23 +134,19 @@ class Inventory(object):
else:
raise
- vars_loader.add_directory(self.basedir(), with_subdir=True)
+ utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
- self._vars_plugins = [ x for x in vars_loader.all(self) ]
+ self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
- # FIXME: shouldn't be required, since the group/host vars file
- # management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
for group in self.groups:
- # FIXME: combine_vars
- group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
+ group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
- # FIXME: combine_vars
- host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
+ host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
def _match(self, str, pattern_str):
@@ -204,9 +192,9 @@ class Inventory(object):
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
- hosts = [ h for h in hosts if h in self._restriction ]
+ hosts = [ h for h in hosts if h.name in self._restriction ]
if self._also_restriction is not None:
- hosts = [ h for h in hosts if h in self._also_restriction ]
+ hosts = [ h for h in hosts if h.name in self._also_restriction ]
return hosts
@@ -332,8 +320,6 @@ class Inventory(object):
new_host = Host(pattern)
new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local")
- new_host.ipv4_address = '127.0.0.1'
-
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
@@ -434,7 +420,7 @@ class Inventory(object):
group = self.get_group(groupname)
if group is None:
- raise Exception("group not found: %s" % groupname)
+ raise errors.AnsibleError("group not found: %s" % groupname)
vars = {}
@@ -442,21 +428,19 @@ class Inventory(object):
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, updated)
+ vars = utils.combine_vars(vars, updated)
# Read group_vars/ files
- # FIXME: combine_vars
- vars = combine_vars(vars, self.get_group_vars(group))
+ vars = utils.combine_vars(vars, self.get_group_vars(group))
return vars
- def get_vars(self, hostname, update_cached=False, vault_password=None):
+ def get_variables(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
- raise Exception("host not found: %s" % hostname)
- return host.get_vars()
+ raise errors.AnsibleError("host not found: %s" % hostname)
+ return host.get_variables()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
@@ -476,26 +460,22 @@ class Inventory(object):
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, updated)
+ vars = utils.combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, updated)
+ vars = utils.combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, self.parser.get_host_variables(host))
+ vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files
- # FIXME: combine_vars
- vars = combine_vars(vars, self.get_host_vars(host))
+ vars = utils.combine_vars(vars, self.get_host_vars(host))
return vars
@@ -510,7 +490,7 @@ class Inventory(object):
""" return a list of hostnames for a pattern """
- result = [ h for h in self.get_hosts(pattern) ]
+ result = [ h.name for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
result = [pattern]
return result
@@ -518,7 +498,11 @@ class Inventory(object):
def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x)
- def restrict_to_hosts(self, restriction):
+ # TODO: remove this function
+ def get_restriction(self):
+ return self._restriction
+
+ def restrict_to(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to exclude failed hosts in main playbook code, don't use this for other
@@ -560,7 +544,7 @@ class Inventory(object):
results.append(x)
self._subset = results
- def remove_restriction(self):
+ def lift_restriction(self):
""" Do not restrict list operations """
self._restriction = None
@@ -604,12 +588,10 @@ class Inventory(object):
self._playbook_basedir = dir
# get group vars from group_vars/ files
for group in self.groups:
- # FIXME: combine_vars
- group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
+ group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
# get host vars from host_vars/ files
for host in self.get_hosts():
- # FIXME: combine_vars
- host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
+ host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
@@ -657,15 +639,15 @@ class Inventory(object):
if _basedir == self._playbook_basedir and scan_pass != 1:
continue
- # FIXME: these should go to VariableManager
if group and host is None:
# load vars in dir/group_vars/name_of_group
base_path = os.path.join(basedir, "group_vars/%s" % group.name)
- self._variable_manager.add_group_vars_file(base_path, self._loader)
+ results = utils.load_vars(base_path, results, vault_password=self._vault_password)
+
elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host
base_path = os.path.join(basedir, "host_vars/%s" % host.name)
- self._variable_manager.add_host_vars_file(base_path, self._loader)
+ results = utils.load_vars(base_path, results, vault_password=self._vault_password)
# all done, results is a dictionary of variables for this particular host.
return results
diff --git a/v2/ansible/inventory/dir.py b/v1/ansible/inventory/dir.py
similarity index 91%
rename from v2/ansible/inventory/dir.py
rename to v1/ansible/inventory/dir.py
index 735f32d62c3..9ac23fff899 100644
--- a/v2/ansible/inventory/dir.py
+++ b/v1/ansible/inventory/dir.py
@@ -17,25 +17,20 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
import os
-
-from ansible import constants as C
-from ansible.errors import AnsibleError
-
+import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
-from ansible.utils.path import is_executable
-from ansible.utils.vars import combine_vars
+from ansible import utils
+from ansible import errors
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
- def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, filename=C.DEFAULT_HOST_LIST):
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
@@ -43,12 +38,10 @@ class InventoryDirectory(object):
self.hosts = {}
self.groups = {}
- self._loader = loader
-
for i in self.names:
# Skip files that end with certain extensions or characters
- if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")):
+ if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
@@ -58,9 +51,9 @@ class InventoryDirectory(object):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
- parser = InventoryDirectory(loader=loader, filename=fullpath)
- elif is_executable(fullpath):
- parser = InventoryScript(loader=loader, filename=fullpath)
+ parser = InventoryDirectory(filename=fullpath)
+ elif utils.is_executable(fullpath):
+ parser = InventoryScript(filename=fullpath)
else:
parser = InventoryParser(filename=fullpath)
self.parsers.append(parser)
@@ -160,7 +153,7 @@ class InventoryDirectory(object):
# name
if group.name != newgroup.name:
- raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
+ raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
@@ -203,14 +196,14 @@ class InventoryDirectory(object):
self.groups[newparent.name].add_child_group(group)
# variables
- group.vars = combine_vars(group.vars, newgroup.vars)
+ group.vars = utils.combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
- raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
+ raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
@@ -225,7 +218,7 @@ class InventoryDirectory(object):
self.groups[newgroup.name].add_host(host)
# variables
- host.vars = combine_vars(host.vars, newhost.vars)
+ host.vars = utils.combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
diff --git a/v2/ansible/inventory/expand_hosts.py b/v1/ansible/inventory/expand_hosts.py
similarity index 97%
rename from v2/ansible/inventory/expand_hosts.py
rename to v1/ansible/inventory/expand_hosts.py
index b5a957c53fe..f1297409355 100644
--- a/v2/ansible/inventory/expand_hosts.py
+++ b/v1/ansible/inventory/expand_hosts.py
@@ -30,9 +30,6 @@ expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised.
'''
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import string
from ansible import errors
diff --git a/v2/ansible/inventory/group.py b/v1/ansible/inventory/group.py
similarity index 69%
rename from v2/ansible/inventory/group.py
rename to v1/ansible/inventory/group.py
index 6525e69b466..262558e69c8 100644
--- a/v2/ansible/inventory/group.py
+++ b/v1/ansible/inventory/group.py
@@ -14,15 +14,11 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-from ansible.utils.debug import debug
-
-class Group:
+class Group(object):
''' a group of ansible hosts '''
- #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
+ __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
@@ -33,49 +29,9 @@ class Group:
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
-
#self.clear_hosts_cache()
- #if self.name is None:
- # raise Exception("group name is required")
-
- def __repr__(self):
- return self.get_name()
-
- def __getstate__(self):
- return self.serialize()
-
- def __setstate__(self, data):
- return self.deserialize(data)
-
- def serialize(self):
- parent_groups = []
- for parent in self.parent_groups:
- parent_groups.append(parent.serialize())
-
- result = dict(
- name=self.name,
- vars=self.vars.copy(),
- parent_groups=parent_groups,
- depth=self.depth,
- )
-
- debug("serializing group, result is: %s" % result)
- return result
-
- def deserialize(self, data):
- debug("deserializing group, data is: %s" % data)
- self.__init__()
- self.name = data.get('name')
- self.vars = data.get('vars', dict())
-
- parent_groups = data.get('parent_groups', [])
- for parent_data in parent_groups:
- g = Group()
- g.deserialize(parent_data)
- self.parent_groups.append(g)
-
- def get_name(self):
- return self.name
+ if self.name is None:
+ raise Exception("group name is required")
def add_child_group(self, group):
@@ -144,7 +100,7 @@ class Group:
hosts.append(mine)
return hosts
- def get_vars(self):
+ def get_variables(self):
return self.vars.copy()
def _get_ancestors(self):
diff --git a/v1/ansible/inventory/host.py b/v1/ansible/inventory/host.py
new file mode 100644
index 00000000000..d4dc20fa462
--- /dev/null
+++ b/v1/ansible/inventory/host.py
@@ -0,0 +1,67 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import ansible.constants as C
+from ansible import utils
+
+class Host(object):
+ ''' a single ansible host '''
+
+ __slots__ = [ 'name', 'vars', 'groups' ]
+
+ def __init__(self, name=None, port=None):
+
+ self.name = name
+ self.vars = {}
+ self.groups = []
+ if port and port != C.DEFAULT_REMOTE_PORT:
+ self.set_variable('ansible_ssh_port', int(port))
+
+ if self.name is None:
+ raise Exception("host name is required")
+
+ def add_group(self, group):
+
+ self.groups.append(group)
+
+ def set_variable(self, key, value):
+
+ self.vars[key]=value
+
+ def get_groups(self):
+
+ groups = {}
+ for g in self.groups:
+ groups[g.name] = g
+ ancestors = g.get_ancestors()
+ for a in ancestors:
+ groups[a.name] = a
+ return groups.values()
+
+ def get_variables(self):
+
+ results = {}
+ groups = self.get_groups()
+ for group in sorted(groups, key=lambda g: g.depth):
+ results = utils.combine_vars(results, group.get_variables())
+ results = utils.combine_vars(results, self.vars)
+ results['inventory_hostname'] = self.name
+ results['inventory_hostname_short'] = self.name.split('.')[0]
+ results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
+ return results
+
+
diff --git a/v2/ansible/inventory/ini.py b/v1/ansible/inventory/ini.py
similarity index 82%
rename from v2/ansible/inventory/ini.py
rename to v1/ansible/inventory/ini.py
index e004ee8bb75..bd9a98e7f86 100644
--- a/v2/ansible/inventory/ini.py
+++ b/v1/ansible/inventory/ini.py
@@ -16,20 +16,17 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-import ast
-import shlex
-import re
-
-from ansible import constants as C
-from ansible.errors import *
+import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible.utils.unicode import to_unicode
+from ansible import errors
+from ansible import utils
+import shlex
+import re
+import ast
class InventoryParser(object):
"""
@@ -37,8 +34,9 @@ class InventoryParser(object):
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
- self.filename = filename
+
with open(filename) as fh:
+ self.filename = filename
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
@@ -56,7 +54,10 @@ class InventoryParser(object):
def _parse_value(v):
if "#" not in v:
try:
- v = ast.literal_eval(v)
+ ret = ast.literal_eval(v)
+ if not isinstance(ret, float):
+ # Do not trim floats. Eg: "1.20" to 1.2
+ return ret
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
@@ -65,7 +66,7 @@ class InventoryParser(object):
except SyntaxError:
# Is this a hash with an equals at the end?
pass
- return to_unicode(v, nonstring='passthru', errors='strict')
+ return v
# [webservers]
# alpha
@@ -90,8 +91,8 @@ class InventoryParser(object):
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
- for line in self.lines:
- line = self._before_comment(line).strip()
+ for lineno in range(len(self.lines)):
+ line = utils.before_comment(self.lines[lineno]).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
@@ -145,11 +146,8 @@ class InventoryParser(object):
try:
(k,v) = t.split("=", 1)
except ValueError, e:
- raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e)))
- if k == 'ansible_ssh_host':
- host.ipv4_address = self._parse_value(v)
- else:
- host.set_variable(k, self._parse_value(v))
+ raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e)))
+ host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
@@ -159,8 +157,8 @@ class InventoryParser(object):
def _parse_group_children(self):
group = None
- for line in self.lines:
- line = line.strip()
+ for lineno in range(len(self.lines)):
+ line = self.lines[lineno].strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
@@ -175,7 +173,7 @@ class InventoryParser(object):
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
- raise AnsibleError("child group is not defined: (%s)" % line)
+ raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line))
else:
group.add_child_group(kid_group)
@@ -186,13 +184,13 @@ class InventoryParser(object):
def _parse_group_variables(self):
group = None
- for line in self.lines:
- line = line.strip()
+ for lineno in range(len(self.lines)):
+ line = self.lines[lineno].strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
- raise AnsibleError("can't add vars to undefined group: %s" % line)
+ raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line))
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
@@ -201,18 +199,10 @@ class InventoryParser(object):
pass
elif group:
if "=" not in line:
- raise AnsibleError("variables assigned to group must be in key=value form")
+ raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1))
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
-
- def _before_comment(self, msg):
- ''' what's the part of a string before a comment? '''
- msg = msg.replace("\#","**NOT_A_COMMENT**")
- msg = msg.split("#")[0]
- msg = msg.replace("**NOT_A_COMMENT**","#")
- return msg
-
diff --git a/v2/ansible/inventory/script.py b/v1/ansible/inventory/script.py
similarity index 82%
rename from v2/ansible/inventory/script.py
rename to v1/ansible/inventory/script.py
index 9675d70f690..b83cb9bcc7a 100644
--- a/v2/ansible/inventory/script.py
+++ b/v1/ansible/inventory/script.py
@@ -16,26 +16,22 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
import os
import subprocess
-import sys
-
-from ansible import constants as C
-from ansible.errors import *
+import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
+from ansible import utils
+from ansible import errors
+import sys
-class InventoryScript:
+class InventoryScript(object):
''' Host inventory parser for ansible using external inventory scripts. '''
- def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
-
- self._loader = loader
+ def __init__(self, filename=C.DEFAULT_HOST_LIST):
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
@@ -45,11 +41,11 @@ class InventoryScript:
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
- raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
+ raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
@@ -62,7 +58,7 @@ class InventoryScript:
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
- self.raw = self._loader.load(self.data)
+ self.raw = utils.parse_json(self.data)
self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all')
@@ -72,7 +68,7 @@ class InventoryScript:
if 'failed' in self.raw:
sys.stderr.write(err + "\n")
- raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
+ raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
for (group_name, data) in self.raw.items():
@@ -96,12 +92,12 @@ class InventoryScript:
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars')):
+ elif not any(k in data for k in ('hosts','vars','children')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
- raise AnsibleError("You defined a group \"%s\" with bad "
+ raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
@@ -112,7 +108,7 @@ class InventoryScript:
if 'vars' in data:
if not isinstance(data['vars'], dict):
- raise AnsibleError("You defined a group \"%s\" with bad "
+ raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems():
@@ -147,12 +143,12 @@ class InventoryScript:
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
- return json_dict_bytes_to_unicode(self._loader.load(out))
+ return json_dict_bytes_to_unicode(utils.parse_json(out))
except ValueError:
- raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
+ raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
diff --git a/v2/test/parsing/yaml/__init__.py b/v1/ansible/inventory/vars_plugins/__init__.py
similarity index 100%
rename from v2/test/parsing/yaml/__init__.py
rename to v1/ansible/inventory/vars_plugins/__init__.py
diff --git a/v2/ansible/inventory/vars_plugins/noop.py b/v1/ansible/inventory/vars_plugins/noop.py
similarity index 94%
rename from v2/ansible/inventory/vars_plugins/noop.py
rename to v1/ansible/inventory/vars_plugins/noop.py
index 8f0c98cad56..5d4b4b6658c 100644
--- a/v2/ansible/inventory/vars_plugins/noop.py
+++ b/v1/ansible/inventory/vars_plugins/noop.py
@@ -15,8 +15,6 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
class VarsModule(object):
diff --git a/lib/ansible/module_common.py b/v1/ansible/module_common.py
similarity index 100%
rename from lib/ansible/module_common.py
rename to v1/ansible/module_common.py
diff --git a/v2/ansible/module_utils/__init__.py b/v1/ansible/module_utils/__init__.py
similarity index 100%
rename from v2/ansible/module_utils/__init__.py
rename to v1/ansible/module_utils/__init__.py
diff --git a/v2/ansible/module_utils/a10.py b/v1/ansible/module_utils/a10.py
similarity index 100%
rename from v2/ansible/module_utils/a10.py
rename to v1/ansible/module_utils/a10.py
diff --git a/v2/ansible/module_utils/basic.py b/v1/ansible/module_utils/basic.py
similarity index 90%
rename from v2/ansible/module_utils/basic.py
rename to v1/ansible/module_utils/basic.py
index 8f9b03f882d..e772a12efce 100644
--- a/v2/ansible/module_utils/basic.py
+++ b/v1/ansible/module_utils/basic.py
@@ -38,12 +38,14 @@ BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1]
BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
+SELINUX_SPECIAL_FS="<>"
+
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here
# can be inserted in any module source automatically by including
# #<> on a blank line by itself inside
# of an ansible module. The source of this common code lives
-# in ansible/executor/module_common.py
+# in lib/ansible/module_common.py
import locale
import os
@@ -65,7 +67,6 @@ import pwd
import platform
import errno
import tempfile
-from itertools import imap, repeat
try:
import json
@@ -182,7 +183,8 @@ def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
- distribution = platform.linux_distribution()[0].capitalize()
+ supported_dists = platform._supported_dists + ('arch',)
+ distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
@@ -235,7 +237,7 @@ def load_platform_subclass(cls, *args, **kwargs):
return super(cls, subclass).__new__(subclass)
-def json_dict_unicode_to_bytes(d, encoding='utf-8'):
+def json_dict_unicode_to_bytes(d):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -243,17 +245,17 @@ def json_dict_unicode_to_bytes(d, encoding='utf-8'):
'''
if isinstance(d, unicode):
- return d.encode(encoding)
+ return d.encode('utf-8')
elif isinstance(d, dict):
- return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding)))
+ return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
elif isinstance(d, list):
- return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
+ return list(map(json_dict_unicode_to_bytes, d))
elif isinstance(d, tuple):
- return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
+ return tuple(map(json_dict_unicode_to_bytes, d))
else:
return d
-def json_dict_bytes_to_unicode(d, encoding='utf-8'):
+def json_dict_bytes_to_unicode(d):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -261,13 +263,13 @@ def json_dict_bytes_to_unicode(d, encoding='utf-8'):
'''
if isinstance(d, str):
- return unicode(d, encoding)
+ return unicode(d, 'utf-8')
elif isinstance(d, dict):
- return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding)))
+ return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
elif isinstance(d, list):
- return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
+ return list(map(json_dict_bytes_to_unicode, d))
elif isinstance(d, tuple):
- return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
+ return tuple(map(json_dict_bytes_to_unicode, d))
else:
return d
@@ -335,7 +337,8 @@ class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
- required_one_of=None, add_file_common_args=False, supports_check_mode=False):
+ required_one_of=None, add_file_common_args=False, supports_check_mode=False,
+ required_if=None):
'''
common code for quickly building an ansible module in Python
@@ -360,9 +363,9 @@ class AnsibleModule(object):
# reset to LANG=C if it's an invalid/unavailable locale
self._check_locale()
- self.params = self._load_params()
+ (self.params, self.args) = self._load_params()
- self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log']
+ self._legal_inputs = ['CHECKMODE', 'NO_LOG']
self.aliases = self._handle_aliases()
@@ -383,6 +386,7 @@ class AnsibleModule(object):
self._check_argument_types()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
+ self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log:
@@ -529,10 +533,10 @@ class AnsibleModule(object):
path = os.path.dirname(path)
return path
- def is_nfs_path(self, path):
+ def is_special_selinux_path(self, path):
"""
- Returns a tuple containing (True, selinux_context) if the given path
- is on a NFS mount point, otherwise the return will be (False, None).
+ Returns a tuple containing (True, selinux_context) if the given path is on a
+ NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
@@ -543,9 +547,13 @@ class AnsibleModule(object):
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
- if path_mount_point == mount_point and 'nfs' in fstype:
- nfs_context = self.selinux_context(path_mount_point)
- return (True, nfs_context)
+
+ if path_mount_point == mount_point:
+ for fs in SELINUX_SPECIAL_FS.split(','):
+ if fs in fstype:
+ special_context = self.selinux_context(path_mount_point)
+ return (True, special_context)
+
return (False, None)
def set_default_selinux_context(self, path, changed):
@@ -563,9 +571,9 @@ class AnsibleModule(object):
# Iterate over the current context instead of the
# argument context, which may have selevel.
- (is_nfs, nfs_context) = self.is_nfs_path(path)
- if is_nfs:
- new_context = nfs_context
+ (is_special_se, sp_context) = self.is_special_selinux_path(path)
+ if is_special_se:
+ new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
@@ -862,6 +870,7 @@ class AnsibleModule(object):
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_CTYPE'] = 'C'
+ os.environ['LC_MESSAGES'] = 'C'
except Exception, e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
@@ -889,7 +898,7 @@ class AnsibleModule(object):
def _check_for_check_mode(self):
for (k,v) in self.params.iteritems():
- if k == '_ansible_check_mode':
+ if k == 'CHECKMODE':
if not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module does not support check mode")
if self.supports_check_mode:
@@ -897,13 +906,13 @@ class AnsibleModule(object):
def _check_for_no_log(self):
for (k,v) in self.params.iteritems():
- if k == '_ansible_no_log':
+ if k == 'NO_LOG':
self.no_log = self.boolean(v)
def _check_invalid_arguments(self):
for (k,v) in self.params.iteritems():
# these should be in legal inputs already
- #if k in ('_ansible_check_mode', '_ansible_no_log'):
+ #if k in ('CHECKMODE', 'NO_LOG'):
# continue
if k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
@@ -951,6 +960,20 @@ class AnsibleModule(object):
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
+ def _check_required_if(self, spec):
+ ''' ensure that parameters which conditionally required are present '''
+ if spec is None:
+ return
+ for (key, val, requirements) in spec:
+ missing = []
+ if key in self.params and self.params[key] == val:
+ for check in requirements:
+ count = self._count_terms(check)
+ if count == 0:
+ missing.append(check)
+ if len(missing) > 0:
+ self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
+
def _check_argument_values(self):
''' ensure all arguments have the requested values, and there are no stray arguments '''
for (k,v) in self.argument_spec.iteritems():
@@ -1010,57 +1033,60 @@ class AnsibleModule(object):
value = self.params[k]
is_invalid = False
- if wanted == 'str':
- if not isinstance(value, basestring):
- self.params[k] = str(value)
- elif wanted == 'list':
- if not isinstance(value, list):
- if isinstance(value, basestring):
- self.params[k] = value.split(",")
- elif isinstance(value, int) or isinstance(value, float):
- self.params[k] = [ str(value) ]
- else:
- is_invalid = True
- elif wanted == 'dict':
- if not isinstance(value, dict):
- if isinstance(value, basestring):
- if value.startswith("{"):
- try:
- self.params[k] = json.loads(value)
- except:
- (result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
- if exc is not None:
- self.fail_json(msg="unable to evaluate dictionary for %s" % k)
- self.params[k] = result
- elif '=' in value:
- self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")])
+ try:
+ if wanted == 'str':
+ if not isinstance(value, basestring):
+ self.params[k] = str(value)
+ elif wanted == 'list':
+ if not isinstance(value, list):
+ if isinstance(value, basestring):
+ self.params[k] = value.split(",")
+ elif isinstance(value, int) or isinstance(value, float):
+ self.params[k] = [ str(value) ]
else:
- self.fail_json(msg="dictionary requested, could not parse JSON or key=value")
- else:
- is_invalid = True
- elif wanted == 'bool':
- if not isinstance(value, bool):
- if isinstance(value, basestring):
- self.params[k] = self.boolean(value)
- else:
- is_invalid = True
- elif wanted == 'int':
- if not isinstance(value, int):
- if isinstance(value, basestring):
- self.params[k] = int(value)
- else:
- is_invalid = True
- elif wanted == 'float':
- if not isinstance(value, float):
- if isinstance(value, basestring):
- self.params[k] = float(value)
- else:
- is_invalid = True
- else:
- self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
+ is_invalid = True
+ elif wanted == 'dict':
+ if not isinstance(value, dict):
+ if isinstance(value, basestring):
+ if value.startswith("{"):
+ try:
+ self.params[k] = json.loads(value)
+ except:
+ (result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
+ if exc is not None:
+ self.fail_json(msg="unable to evaluate dictionary for %s" % k)
+ self.params[k] = result
+ elif '=' in value:
+ self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")])
+ else:
+ self.fail_json(msg="dictionary requested, could not parse JSON or key=value")
+ else:
+ is_invalid = True
+ elif wanted == 'bool':
+ if not isinstance(value, bool):
+ if isinstance(value, basestring):
+ self.params[k] = self.boolean(value)
+ else:
+ is_invalid = True
+ elif wanted == 'int':
+ if not isinstance(value, int):
+ if isinstance(value, basestring):
+ self.params[k] = int(value)
+ else:
+ is_invalid = True
+ elif wanted == 'float':
+ if not isinstance(value, float):
+ if isinstance(value, basestring):
+ self.params[k] = float(value)
+ else:
+ is_invalid = True
+ else:
+ self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
- if is_invalid:
- self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted))
+ if is_invalid:
+ self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted))
+ except ValueError, e:
+ self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted))
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.iteritems():
@@ -1076,11 +1102,20 @@ class AnsibleModule(object):
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
- params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
- if params is None:
- params = dict()
- return params
-
+ args = MODULE_ARGS
+ items = shlex.split(args)
+ params = {}
+ for x in items:
+ try:
+ (k, v) = x.split("=",1)
+ except Exception, e:
+ self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
+ if k in params:
+ self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
+ params[k] = v
+ params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
+ params2.update(params)
+ return (params2, args)
def _log_invocation(self):
''' log that ansible ran the module '''
@@ -1201,17 +1236,13 @@ class AnsibleModule(object):
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
- for encoding in ("utf-8", "latin-1"):
+ for encoding in ("utf-8", "latin-1", "unicode_escape"):
try:
return json.dumps(data, encoding=encoding)
- # Old systems using old simplejson module does not support encoding keyword.
- except TypeError:
- try:
- new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
- except UnicodeDecodeError:
- continue
- return json.dumps(new_data)
- except UnicodeDecodeError:
+ # Old systems using simplejson module does not support encoding keyword.
+ except TypeError, e:
+ return json.dumps(data)
+ except UnicodeDecodeError, e:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
@@ -1448,7 +1479,7 @@ class AnsibleModule(object):
msg = None
st_in = None
- # Set a temporary env path if a prefix is passed
+ # Set a temporart env path if a prefix is passed
env=os.environ
if path_prefix:
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py
new file mode 100644
index 00000000000..752defec2b6
--- /dev/null
+++ b/v1/ansible/module_utils/cloudstack.py
@@ -0,0 +1,368 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+try:
+ from cs import CloudStack, CloudStackException, read_config
+ has_lib_cs = True
+except ImportError:
+ has_lib_cs = False
+
+
+class AnsibleCloudStack:
+
+ def __init__(self, module):
+ if not has_lib_cs:
+ module.fail_json(msg="python library cs required: pip install cs")
+
+ self.result = {
+ 'changed': False,
+ }
+
+ self.module = module
+ self._connect()
+
+ self.domain = None
+ self.account = None
+ self.project = None
+ self.ip_address = None
+ self.zone = None
+ self.vm = None
+ self.os_type = None
+ self.hypervisor = None
+ self.capabilities = None
+
+
+ def _connect(self):
+ api_key = self.module.params.get('api_key')
+ api_secret = self.module.params.get('secret_key')
+ api_url = self.module.params.get('api_url')
+ api_http_method = self.module.params.get('api_http_method')
+ api_timeout = self.module.params.get('api_timeout')
+
+ if api_key and api_secret and api_url:
+ self.cs = CloudStack(
+ endpoint=api_url,
+ key=api_key,
+ secret=api_secret,
+ timeout=api_timeout,
+ method=api_http_method
+ )
+ else:
+ self.cs = CloudStack(**read_config())
+
+
+ def get_or_fallback(self, key=None, fallback_key=None):
+ value = self.module.params.get(key)
+ if not value:
+ value = self.module.params.get(fallback_key)
+ return value
+
+
+ # TODO: for backward compatibility only, remove if not used anymore
+ def _has_changed(self, want_dict, current_dict, only_keys=None):
+ return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
+
+
+ def has_changed(self, want_dict, current_dict, only_keys=None):
+ for key, value in want_dict.iteritems():
+
+ # Optionally limit by a list of keys
+ if only_keys and key not in only_keys:
+ continue;
+
+ # Skip None values
+ if value is None:
+ continue;
+
+ if key in current_dict:
+
+ # API returns string for int in some cases, just to make sure
+ if isinstance(value, int):
+ current_dict[key] = int(current_dict[key])
+ elif isinstance(value, str):
+ current_dict[key] = str(current_dict[key])
+
+ # Only need to detect a singe change, not every item
+ if value != current_dict[key]:
+ return True
+ return False
+
+
+ def _get_by_key(self, key=None, my_dict={}):
+ if key:
+ if key in my_dict:
+ return my_dict[key]
+ self.module.fail_json(msg="Something went wrong: %s not found" % key)
+ return my_dict
+
+
+ def get_project(self, key=None):
+ if self.project:
+ return self._get_by_key(key, self.project)
+
+ project = self.module.params.get('project')
+ if not project:
+ return None
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ projects = self.cs.listProjects(**args)
+ if projects:
+ for p in projects['project']:
+ if project.lower() in [ p['name'].lower(), p['id'] ]:
+ self.project = p
+ return self._get_by_key(key, self.project)
+ self.module.fail_json(msg="project '%s' not found" % project)
+
+
+ def get_ip_address(self, key=None):
+ if self.ip_address:
+ return self._get_by_key(key, self.ip_address)
+
+ ip_address = self.module.params.get('ip_address')
+ if not ip_address:
+ self.module.fail_json(msg="IP address param 'ip_address' is required")
+
+ args = {}
+ args['ipaddress'] = ip_address
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ ip_addresses = self.cs.listPublicIpAddresses(**args)
+
+ if not ip_addresses:
+ self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
+
+ self.ip_address = ip_addresses['publicipaddress'][0]
+ return self._get_by_key(key, self.ip_address)
+
+
+ def get_vm(self, key=None):
+ if self.vm:
+ return self._get_by_key(key, self.vm)
+
+ vm = self.module.params.get('vm')
+ if not vm:
+ self.module.fail_json(msg="Virtual machine param 'vm' is required")
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ vms = self.cs.listVirtualMachines(**args)
+ if vms:
+ for v in vms['virtualmachine']:
+ if vm in [ v['name'], v['displayname'], v['id'] ]:
+ self.vm = v
+ return self._get_by_key(key, self.vm)
+ self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
+
+
+ def get_zone(self, key=None):
+ if self.zone:
+ return self._get_by_key(key, self.zone)
+
+ zone = self.module.params.get('zone')
+ zones = self.cs.listZones()
+
+ # use the first zone if no zone param given
+ if not zone:
+ self.zone = zones['zone'][0]
+ return self._get_by_key(key, self.zone)
+
+ if zones:
+ for z in zones['zone']:
+ if zone in [ z['name'], z['id'] ]:
+ self.zone = z
+ return self._get_by_key(key, self.zone)
+ self.module.fail_json(msg="zone '%s' not found" % zone)
+
+
+ def get_os_type(self, key=None):
+ if self.os_type:
+ return self._get_by_key(key, self.zone)
+
+ os_type = self.module.params.get('os_type')
+ if not os_type:
+ return None
+
+ os_types = self.cs.listOsTypes()
+ if os_types:
+ for o in os_types['ostype']:
+ if os_type in [ o['description'], o['id'] ]:
+ self.os_type = o
+ return self._get_by_key(key, self.os_type)
+ self.module.fail_json(msg="OS type '%s' not found" % os_type)
+
+
+ def get_hypervisor(self):
+ if self.hypervisor:
+ return self.hypervisor
+
+ hypervisor = self.module.params.get('hypervisor')
+ hypervisors = self.cs.listHypervisors()
+
+ # use the first hypervisor if no hypervisor param given
+ if not hypervisor:
+ self.hypervisor = hypervisors['hypervisor'][0]['name']
+ return self.hypervisor
+
+ for h in hypervisors['hypervisor']:
+ if hypervisor.lower() == h['name'].lower():
+ self.hypervisor = h['name']
+ return self.hypervisor
+ self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
+
+
+ def get_account(self, key=None):
+ if self.account:
+ return self._get_by_key(key, self.account)
+
+ account = self.module.params.get('account')
+ if not account:
+ return None
+
+ domain = self.module.params.get('domain')
+ if not domain:
+ self.module.fail_json(msg="Account must be specified with Domain")
+
+ args = {}
+ args['name'] = account
+ args['domainid'] = self.get_domain(key='id')
+ args['listall'] = True
+ accounts = self.cs.listAccounts(**args)
+ if accounts:
+ self.account = accounts['account'][0]
+ return self._get_by_key(key, self.account)
+ self.module.fail_json(msg="Account '%s' not found" % account)
+
+
+ def get_domain(self, key=None):
+ if self.domain:
+ return self._get_by_key(key, self.domain)
+
+ domain = self.module.params.get('domain')
+ if not domain:
+ return None
+
+ args = {}
+ args['listall'] = True
+ domains = self.cs.listDomains(**args)
+ if domains:
+ for d in domains['domain']:
+ if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
+ self.domain = d
+ return self._get_by_key(key, self.domain)
+ self.module.fail_json(msg="Domain '%s' not found" % domain)
+
+
+ def get_tags(self, resource=None):
+ existing_tags = self.cs.listTags(resourceid=resource['id'])
+ if existing_tags:
+ return existing_tags['tag']
+ return []
+
+
+ def _delete_tags(self, resource, resource_type, tags):
+ existing_tags = resource['tags']
+ tags_to_delete = []
+ for existing_tag in existing_tags:
+ if existing_tag['key'] in tags:
+ if existing_tag['value'] != tags[key]:
+ tags_to_delete.append(existing_tag)
+ else:
+ tags_to_delete.append(existing_tag)
+ if tags_to_delete:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args = {}
+ args['resourceids'] = resource['id']
+ args['resourcetype'] = resource_type
+ args['tags'] = tags_to_delete
+ self.cs.deleteTags(**args)
+
+
+ def _create_tags(self, resource, resource_type, tags):
+ tags_to_create = []
+ for i, tag_entry in enumerate(tags):
+ tag = {
+ 'key': tag_entry['key'],
+ 'value': tag_entry['value'],
+ }
+ tags_to_create.append(tag)
+ if tags_to_create:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args = {}
+ args['resourceids'] = resource['id']
+ args['resourcetype'] = resource_type
+ args['tags'] = tags_to_create
+ self.cs.createTags(**args)
+
+
+ def ensure_tags(self, resource, resource_type=None):
+ if not resource_type or not resource:
+ self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
+
+ if 'tags' in resource:
+ tags = self.module.params.get('tags')
+ if tags is not None:
+ self._delete_tags(resource, resource_type, tags)
+ self._create_tags(resource, resource_type, tags)
+ resource['tags'] = self.get_tags(resource)
+ return resource
+
+
+ def get_capabilities(self, key=None):
+ if self.capabilities:
+ return self._get_by_key(key, self.capabilities)
+ capabilities = self.cs.listCapabilities()
+ self.capabilities = capabilities['capability']
+ return self._get_by_key(key, self.capabilities)
+
+
+ # TODO: for backward compatibility only, remove if not used anymore
+ def _poll_job(self, job=None, key=None):
+ return self.poll_job(job=job, key=key)
+
+
+ def poll_job(self, job=None, key=None):
+ if 'jobid' in job:
+ while True:
+ res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
+ if res['jobstatus'] != 0 and 'jobresult' in res:
+ if 'errortext' in res['jobresult']:
+ self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
+ if key and key in res['jobresult']:
+ job = res['jobresult'][key]
+ break
+ time.sleep(2)
+ return job
diff --git a/v2/ansible/module_utils/database.py b/v1/ansible/module_utils/database.py
similarity index 100%
rename from v2/ansible/module_utils/database.py
rename to v1/ansible/module_utils/database.py
diff --git a/v2/ansible/module_utils/ec2.py b/v1/ansible/module_utils/ec2.py
similarity index 100%
rename from v2/ansible/module_utils/ec2.py
rename to v1/ansible/module_utils/ec2.py
diff --git a/v2/ansible/module_utils/facts.py b/v1/ansible/module_utils/facts.py
similarity index 98%
rename from v2/ansible/module_utils/facts.py
rename to v1/ansible/module_utils/facts.py
index b223c5f5f7d..1162e05b9cf 100644
--- a/v2/ansible/module_utils/facts.py
+++ b/v1/ansible/module_utils/facts.py
@@ -99,8 +99,9 @@ class Facts(object):
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
+ ('/etc/lsb-release', 'Mandriva'),
('/etc/os-release', 'NA'),
- ('/etc/lsb-release', 'Mandriva'))
+ )
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
@@ -416,11 +417,13 @@ class Facts(object):
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
elif name == 'Debian':
data = get_file_content(path)
- if 'Debian' in data or 'Raspbian' in data:
+ if 'Ubuntu' in data:
+ break # Ubuntu gets correct info from python functions
+ elif 'Debian' in data or 'Raspbian' in data:
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
- break
+ break
elif name == 'Mandriva':
data = get_file_content(path)
if 'Mandriva' in data:
@@ -2160,7 +2163,7 @@ class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
- current_if['media_type'] = words[2][1:]
+ current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
@@ -2545,6 +2548,43 @@ class LinuxVirtual(Virtual):
self.facts['virtualization_role'] = 'NA'
return
+class FreeBSDVirtual(Virtual):
+ """
+ This is a FreeBSD-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'FreeBSD'
+
+ def __init__(self):
+ Virtual.__init__(self)
+
+ def populate(self):
+ self.get_virtual_facts()
+ return self.facts
+
+ def get_virtual_facts(self):
+ self.facts['virtualization_type'] = ''
+ self.facts['virtualization_role'] = ''
+
+class OpenBSDVirtual(Virtual):
+ """
+ This is a OpenBSD-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'OpenBSD'
+
+ def __init__(self):
+ Virtual.__init__(self)
+
+ def populate(self):
+ self.get_virtual_facts()
+ return self.facts
+
+ def get_virtual_facts(self):
+ self.facts['virtualization_type'] = ''
+ self.facts['virtualization_role'] = ''
class HPUXVirtual(Virtual):
"""
diff --git a/v2/ansible/module_utils/gce.py b/v1/ansible/module_utils/gce.py
similarity index 100%
rename from v2/ansible/module_utils/gce.py
rename to v1/ansible/module_utils/gce.py
diff --git a/v2/ansible/module_utils/known_hosts.py b/v1/ansible/module_utils/known_hosts.py
similarity index 100%
rename from v2/ansible/module_utils/known_hosts.py
rename to v1/ansible/module_utils/known_hosts.py
diff --git a/v2/ansible/module_utils/openstack.py b/v1/ansible/module_utils/openstack.py
similarity index 98%
rename from v2/ansible/module_utils/openstack.py
rename to v1/ansible/module_utils/openstack.py
index b58cc534287..40694491443 100644
--- a/v2/ansible/module_utils/openstack.py
+++ b/v1/ansible/module_utils/openstack.py
@@ -93,11 +93,7 @@ def openstack_full_argument_spec(**kwargs):
def openstack_module_kwargs(**kwargs):
- ret = dict(
- required_one_of=[
- ['cloud', 'auth'],
- ],
- )
+ ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
diff --git a/v2/ansible/module_utils/powershell.ps1 b/v1/ansible/module_utils/powershell.ps1
similarity index 94%
rename from v2/ansible/module_utils/powershell.ps1
rename to v1/ansible/module_utils/powershell.ps1
index 57d2c1b101c..a11e316989c 100644
--- a/v2/ansible/module_utils/powershell.ps1
+++ b/v1/ansible/module_utils/powershell.ps1
@@ -65,7 +65,7 @@ Function Exit-Json($obj)
$obj = New-Object psobject
}
- echo $obj | ConvertTo-Json -Depth 99
+ echo $obj | ConvertTo-Json -Compress -Depth 99
Exit
}
@@ -89,7 +89,7 @@ Function Fail-Json($obj, $message = $null)
Set-Attr $obj "msg" $message
Set-Attr $obj "failed" $true
- echo $obj | ConvertTo-Json -Depth 99
+ echo $obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
@@ -142,16 +142,16 @@ Function ConvertTo-Bool
return
}
-# Helper function to calculate md5 of a file in a way which powershell 3
+# Helper function to calculate a hash of a file in a way which powershell 3
# and above can handle:
-Function Get-FileMd5($path)
+Function Get-FileChecksum($path)
{
$hash = ""
If (Test-Path -PathType Leaf $path)
{
- $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
+ $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
- [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container $path)
diff --git a/v2/ansible/module_utils/rax.py b/v1/ansible/module_utils/rax.py
similarity index 100%
rename from v2/ansible/module_utils/rax.py
rename to v1/ansible/module_utils/rax.py
diff --git a/v2/ansible/module_utils/redhat.py b/v1/ansible/module_utils/redhat.py
similarity index 100%
rename from v2/ansible/module_utils/redhat.py
rename to v1/ansible/module_utils/redhat.py
diff --git a/v2/ansible/module_utils/splitter.py b/v1/ansible/module_utils/splitter.py
similarity index 100%
rename from v2/ansible/module_utils/splitter.py
rename to v1/ansible/module_utils/splitter.py
diff --git a/v2/ansible/module_utils/urls.py b/v1/ansible/module_utils/urls.py
similarity index 92%
rename from v2/ansible/module_utils/urls.py
rename to v1/ansible/module_utils/urls.py
index d56cc89395e..18317e86aeb 100644
--- a/v2/ansible/module_utils/urls.py
+++ b/v1/ansible/module_utils/urls.py
@@ -50,6 +50,15 @@ try:
except:
HAS_SSL=False
+HAS_MATCH_HOSTNAME = True
+try:
+ from ssl import match_hostname, CertificateError
+except ImportError:
+ try:
+ from backports.ssl_match_hostname import match_hostname, CertificateError
+ except ImportError:
+ HAS_MATCH_HOSTNAME = False
+
import httplib
import os
import re
@@ -293,11 +302,13 @@ class SSLValidationHandler(urllib2.BaseHandler):
connect_result = s.recv(4096)
self.validate_proxy_response(connect_result)
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
else:
self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s.connect((self.hostname, self.port))
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
#ssl_s.unwrap()
s.close()
@@ -311,6 +322,9 @@ class SSLValidationHandler(urllib2.BaseHandler):
'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
'Paths checked for this platform: %s' % ", ".join(paths_checked)
)
+ except CertificateError:
+ self.module.fail_json(msg="SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=no (insecure)" % self.hostname)
+
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
@@ -363,28 +377,29 @@ def fetch_url(module, url, data=None, headers=None, method=None,
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse.urlparse(url)
- if parsed[0] == 'https':
- if not HAS_SSL and validate_certs:
+ if parsed[0] == 'https' and validate_certs:
+ if not HAS_SSL:
if distribution == 'Redhat':
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL')
else:
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
-
- elif validate_certs:
- # do the cert validation
- netloc = parsed[1]
- if '@' in netloc:
- netloc = netloc.split('@', 1)[1]
- if ':' in netloc:
- hostname, port = netloc.split(':', 1)
- port = int(port)
- else:
- hostname = netloc
- port = 443
- # create the SSL validation handler and
- # add it to the list of handlers
- ssl_handler = SSLValidationHandler(module, hostname, port)
- handlers.append(ssl_handler)
+ if not HAS_MATCH_HOSTNAME:
+ module.fail_json(msg='Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=no, however this is unsafe and not recommended')
+
+ # do the cert validation
+ netloc = parsed[1]
+ if '@' in netloc:
+ netloc = netloc.split('@', 1)[1]
+ if ':' in netloc:
+ hostname, port = netloc.split(':', 1)
+ port = int(port)
+ else:
+ hostname = netloc
+ port = 443
+ # create the SSL validation handler and
+ # add it to the list of handlers
+ ssl_handler = SSLValidationHandler(module, hostname, port)
+ handlers.append(ssl_handler)
if parsed[0] != 'ftp':
username = module.params.get('url_username', '')
diff --git a/v2/ansible/executor/task_queue_manager.py: b/v1/ansible/modules/__init__.py
similarity index 100%
rename from v2/ansible/executor/task_queue_manager.py:
rename to v1/ansible/modules/__init__.py
diff --git a/v1/ansible/modules/core b/v1/ansible/modules/core
new file mode 160000
index 00000000000..f8d8af17cdc
--- /dev/null
+++ b/v1/ansible/modules/core
@@ -0,0 +1 @@
+Subproject commit f8d8af17cdc72500af8319c96004b86ac702a0a4
diff --git a/v1/ansible/modules/extras b/v1/ansible/modules/extras
new file mode 160000
index 00000000000..495ad450e53
--- /dev/null
+++ b/v1/ansible/modules/extras
@@ -0,0 +1 @@
+Subproject commit 495ad450e53feb1cd26218dc68056cc34d1ea9ff
diff --git a/v1/ansible/playbook/__init__.py b/v1/ansible/playbook/__init__.py
new file mode 100644
index 00000000000..24ba2d3c6e0
--- /dev/null
+++ b/v1/ansible/playbook/__init__.py
@@ -0,0 +1,874 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import ansible.inventory
+import ansible.constants as C
+import ansible.runner
+from ansible.utils.template import template
+from ansible import utils
+from ansible import errors
+from ansible.module_utils.splitter import split_args, unquote
+import ansible.callbacks
+import ansible.cache
+import os
+import shlex
+import collections
+from play import Play
+import StringIO
+import pipes
+
+# the setup cache stores all variables about a host
+# gathered during the setup step, while the vars cache
+# holds all other variables about a host
+SETUP_CACHE = ansible.cache.FactCache()
+VARS_CACHE = collections.defaultdict(dict)
+RESERVED_TAGS = ['all','tagged','untagged','always']
+
+
+class PlayBook(object):
+ '''
+ runs an ansible playbook, given as a datastructure or YAML filename.
+ A playbook is a deployment, config management, or automation based
+ set of commands to run in series.
+
+ multiple plays/tasks do not execute simultaneously, but tasks in each
+ pattern do execute in parallel (according to the number of forks
+ requested) among the hosts they address
+ '''
+
+ # *****************************************************
+
+ def __init__(self,
+ playbook = None,
+ host_list = C.DEFAULT_HOST_LIST,
+ module_path = None,
+ forks = C.DEFAULT_FORKS,
+ timeout = C.DEFAULT_TIMEOUT,
+ remote_user = C.DEFAULT_REMOTE_USER,
+ remote_pass = C.DEFAULT_REMOTE_PASS,
+ remote_port = None,
+ transport = C.DEFAULT_TRANSPORT,
+ private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
+ callbacks = None,
+ runner_callbacks = None,
+ stats = None,
+ extra_vars = None,
+ only_tags = None,
+ skip_tags = None,
+ subset = C.DEFAULT_SUBSET,
+ inventory = None,
+ check = False,
+ diff = False,
+ any_errors_fatal = False,
+ vault_password = False,
+ force_handlers = False,
+ # privilege escalation
+ become = C.DEFAULT_BECOME,
+ become_method = C.DEFAULT_BECOME_METHOD,
+ become_user = C.DEFAULT_BECOME_USER,
+ become_pass = None,
+ ):
+
+ """
+ playbook: path to a playbook file
+ host_list: path to a file like /etc/ansible/hosts
+ module_path: path to ansible modules, like /usr/share/ansible/
+ forks: desired level of parallelism
+ timeout: connection timeout
+ remote_user: run as this user if not specified in a particular play
+ remote_pass: use this remote password (for all plays) vs using SSH keys
+ remote_port: default remote port to use if not specified with the host or play
+ transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
+ callbacks output callbacks for the playbook
+ runner_callbacks: more callbacks, this time for the runner API
+ stats: holds aggregrate data about events occurring to each host
+ inventory: can be specified instead of host_list to use a pre-existing inventory object
+ check: don't change anything, just try to detect some potential changes
+ any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
+ force_handlers: continue to notify and run handlers even if a task fails
+ """
+
+ self.SETUP_CACHE = SETUP_CACHE
+ self.VARS_CACHE = VARS_CACHE
+
+ arguments = []
+ if playbook is None:
+ arguments.append('playbook')
+ if callbacks is None:
+ arguments.append('callbacks')
+ if runner_callbacks is None:
+ arguments.append('runner_callbacks')
+ if stats is None:
+ arguments.append('stats')
+ if arguments:
+ raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
+
+ if extra_vars is None:
+ extra_vars = {}
+ if only_tags is None:
+ only_tags = [ 'all' ]
+ if skip_tags is None:
+ skip_tags = []
+
+ self.check = check
+ self.diff = diff
+ self.module_path = module_path
+ self.forks = forks
+ self.timeout = timeout
+ self.remote_user = remote_user
+ self.remote_pass = remote_pass
+ self.remote_port = remote_port
+ self.transport = transport
+ self.callbacks = callbacks
+ self.runner_callbacks = runner_callbacks
+ self.stats = stats
+ self.extra_vars = extra_vars
+ self.global_vars = {}
+ self.private_key_file = private_key_file
+ self.only_tags = only_tags
+ self.skip_tags = skip_tags
+ self.any_errors_fatal = any_errors_fatal
+ self.vault_password = vault_password
+ self.force_handlers = force_handlers
+
+ self.become = become
+ self.become_method = become_method
+ self.become_user = become_user
+ self.become_pass = become_pass
+
+ self.callbacks.playbook = self
+ self.runner_callbacks.playbook = self
+
+ if inventory is None:
+ self.inventory = ansible.inventory.Inventory(host_list)
+ self.inventory.subset(subset)
+ else:
+ self.inventory = inventory
+
+ if self.module_path is not None:
+ utils.plugins.module_finder.add_directory(self.module_path)
+
+ self.basedir = os.path.dirname(playbook) or '.'
+ utils.plugins.push_basedir(self.basedir)
+
+ # let inventory know the playbook basedir so it can load more vars
+ self.inventory.set_playbook_basedir(self.basedir)
+
+ vars = extra_vars.copy()
+ vars['playbook_dir'] = os.path.abspath(self.basedir)
+ if self.inventory.basedir() is not None:
+ vars['inventory_dir'] = self.inventory.basedir()
+
+ if self.inventory.src() is not None:
+ vars['inventory_file'] = self.inventory.src()
+
+ self.filename = playbook
+ (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
+ ansible.callbacks.load_callback_plugins()
+ ansible.callbacks.set_playbook(self.callbacks, self)
+
+ self._ansible_version = utils.version_info(gitinfo=True)
+
+ # *****************************************************
+
+ def _get_playbook_vars(self, play_ds, existing_vars):
+ '''
+ Gets the vars specified with the play and blends them
+ with any existing vars that have already been read in
+ '''
+ new_vars = existing_vars.copy()
+ if 'vars' in play_ds:
+ if isinstance(play_ds['vars'], dict):
+ new_vars.update(play_ds['vars'])
+ elif isinstance(play_ds['vars'], list):
+ for v in play_ds['vars']:
+ new_vars.update(v)
+ return new_vars
+
+ # *****************************************************
+
+ def _get_include_info(self, play_ds, basedir, existing_vars={}):
+ '''
+ Gets any key=value pairs specified with the included file
+ name and returns the merged vars along with the path
+ '''
+ new_vars = existing_vars.copy()
+ tokens = split_args(play_ds.get('include', ''))
+ for t in tokens[1:]:
+ try:
+ (k,v) = unquote(t).split("=", 1)
+ new_vars[k] = template(basedir, v, new_vars)
+ except ValueError, e:
+ raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
+
+ return (new_vars, unquote(tokens[0]))
+
+ # *****************************************************
+
+ def _get_playbook_vars_files(self, play_ds, existing_vars_files):
+ new_vars_files = list(existing_vars_files)
+ if 'vars_files' in play_ds:
+ new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
+ return new_vars_files
+
+ # *****************************************************
+
+ def _extend_play_vars(self, play, vars={}):
+ '''
+ Extends the given play's variables with the additional specified vars.
+ '''
+
+ if 'vars' not in play or not play['vars']:
+ # someone left out or put an empty "vars:" entry in their playbook
+ return vars.copy()
+
+ play_vars = None
+ if isinstance(play['vars'], dict):
+ play_vars = play['vars'].copy()
+ play_vars.update(vars)
+ elif isinstance(play['vars'], list):
+ # nobody should really do this, but handle vars: a=1 b=2
+ play_vars = play['vars'][:]
+ play_vars.extend([{k:v} for k,v in vars.iteritems()])
+
+ return play_vars
+
+ # *****************************************************
+
+ def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
+ '''
+ run top level error checking on playbooks and allow them to include other playbooks.
+ '''
+
+ playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
+ accumulated_plays = []
+ play_basedirs = []
+
+ if type(playbook_data) != list:
+ raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
+
+ basedir = os.path.dirname(path) or '.'
+ utils.plugins.push_basedir(basedir)
+ for play in playbook_data:
+ if type(play) != dict:
+ raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
+
+ if 'include' in play:
+ # a playbook (list of plays) decided to include some other list of plays
+ # from another file. The result is a flat list of plays in the end.
+
+ play_vars = self._get_playbook_vars(play, vars)
+ play_vars_files = self._get_playbook_vars_files(play, vars_files)
+ inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
+ play_vars.update(inc_vars)
+
+ included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
+ (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
+ for p in plays:
+ # support for parameterized play includes works by passing
+ # those variables along to the subservient play
+ p['vars'] = self._extend_play_vars(p, play_vars)
+ # now add in the vars_files
+ p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
+
+ accumulated_plays.extend(plays)
+ play_basedirs.extend(basedirs)
+
+ else:
+
+ # this is a normal (non-included play)
+ accumulated_plays.append(play)
+ play_basedirs.append(basedir)
+
+ return (accumulated_plays, play_basedirs)
+
+ # *****************************************************
+
+ def run(self):
+ ''' run all patterns in the playbook '''
+ plays = []
+ matched_tags_all = set()
+ unmatched_tags_all = set()
+
+ # loop through all patterns and run them
+ self.callbacks.on_start()
+ for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
+ play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
+ assert play is not None
+
+ matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
+
+ matched_tags_all = matched_tags_all | matched_tags
+ unmatched_tags_all = unmatched_tags_all | unmatched_tags
+
+ # Remove tasks we wish to skip
+ matched_tags = matched_tags - set(self.skip_tags)
+
+ # if we have matched_tags, the play must be run.
+ # if the play contains no tasks, assume we just want to gather facts
+ # in this case there are actually 3 meta tasks (handler flushes) not 0
+ # tasks, so that's why there's a check against 3
+ if (len(matched_tags) > 0 or len(play.tasks()) == 3):
+ plays.append(play)
+
+ # if the playbook is invoked with --tags or --skip-tags that don't
+ # exist at all in the playbooks then we need to raise an error so that
+ # the user can correct the arguments.
+ unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
+ (matched_tags_all | unmatched_tags_all))
+
+ for t in RESERVED_TAGS:
+ unknown_tags.discard(t)
+
+ if len(unknown_tags) > 0:
+ for t in RESERVED_TAGS:
+ unmatched_tags_all.discard(t)
+ msg = 'tag(s) not found in playbook: %s. possible values: %s'
+ unknown = ','.join(sorted(unknown_tags))
+ unmatched = ','.join(sorted(unmatched_tags_all))
+ raise errors.AnsibleError(msg % (unknown, unmatched))
+
+ for play in plays:
+ ansible.callbacks.set_play(self.callbacks, play)
+ ansible.callbacks.set_play(self.runner_callbacks, play)
+ if not self._run_play(play):
+ break
+
+ ansible.callbacks.set_play(self.callbacks, None)
+ ansible.callbacks.set_play(self.runner_callbacks, None)
+
+ # summarize the results
+ results = {}
+ for host in self.stats.processed.keys():
+ results[host] = self.stats.summarize(host)
+ return results
+
+ # *****************************************************
+
+ def _async_poll(self, poller, async_seconds, async_poll_interval):
+ ''' launch an async job, if poll_interval is set, wait for completion '''
+
+ results = poller.wait(async_seconds, async_poll_interval)
+
+ # mark any hosts that are still listed as started as failed
+ # since these likely got killed by async_wrapper
+ for host in poller.hosts_to_poll:
+ reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
+ self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
+ results['contacted'][host] = reason
+
+ return results
+
+ # *****************************************************
+
+ def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
+ ''' returns a list of hosts that haven't failed and aren't dark '''
+
+ return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
+
+ # *****************************************************
+
+ def _run_task_internal(self, task, include_failed=False):
+ ''' run a particular module step in a playbook '''
+
+ hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
+ self.inventory.restrict_to(hosts)
+
+ runner = ansible.runner.Runner(
+ pattern=task.play.hosts,
+ inventory=self.inventory,
+ module_name=task.module_name,
+ module_args=task.module_args,
+ forks=self.forks,
+ remote_pass=self.remote_pass,
+ module_path=self.module_path,
+ timeout=self.timeout,
+ remote_user=task.remote_user,
+ remote_port=task.play.remote_port,
+ module_vars=task.module_vars,
+ play_vars=task.play_vars,
+ play_file_vars=task.play_file_vars,
+ role_vars=task.role_vars,
+ role_params=task.role_params,
+ default_vars=task.default_vars,
+ extra_vars=self.extra_vars,
+ private_key_file=self.private_key_file,
+ setup_cache=self.SETUP_CACHE,
+ vars_cache=self.VARS_CACHE,
+ basedir=task.play.basedir,
+ conditional=task.when,
+ callbacks=self.runner_callbacks,
+ transport=task.transport,
+ is_playbook=True,
+ check=self.check,
+ diff=self.diff,
+ environment=task.environment,
+ complex_args=task.args,
+ accelerate=task.play.accelerate,
+ accelerate_port=task.play.accelerate_port,
+ accelerate_ipv6=task.play.accelerate_ipv6,
+ error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
+ vault_pass = self.vault_password,
+ run_hosts=hosts,
+ no_log=task.no_log,
+ run_once=task.run_once,
+ become=task.become,
+ become_method=task.become_method,
+ become_user=task.become_user,
+ become_pass=task.become_pass,
+ )
+
+ runner.module_vars.update({'play_hosts': hosts})
+ runner.module_vars.update({'ansible_version': self._ansible_version})
+
+ if task.async_seconds == 0:
+ results = runner.run()
+ else:
+ results, poller = runner.run_async(task.async_seconds)
+ self.stats.compute(results)
+ if task.async_poll_interval > 0:
+ # if not polling, playbook requested fire and forget, so don't poll
+ results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
+ else:
+ for (host, res) in results.get('contacted', {}).iteritems():
+ self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
+
+ contacted = results.get('contacted',{})
+ dark = results.get('dark', {})
+
+ self.inventory.lift_restriction()
+
+ if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
+ return None
+
+ return results
+
+ # *****************************************************
+
+ def _run_task(self, play, task, is_handler):
+ ''' run a single task in the playbook and recursively run any subtasks. '''
+
+ ansible.callbacks.set_task(self.callbacks, task)
+ ansible.callbacks.set_task(self.runner_callbacks, task)
+
+ if task.role_name:
+ name = '%s | %s' % (task.role_name, task.name)
+ else:
+ name = task.name
+
+ try:
+ # v1 HACK: we don't have enough information to template many names
+ # at this point. Rather than making this work for all cases in
+ # v1, just make this degrade gracefully. Will fix in v2
+ name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
+ except:
+ pass
+
+ self.callbacks.on_task_start(name, is_handler)
+ if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
+ ansible.callbacks.set_task(self.callbacks, None)
+ ansible.callbacks.set_task(self.runner_callbacks, None)
+ return True
+
+ # template ignore_errors
+ # TODO: Is this needed here? cond is templated again in
+ # check_conditional after some more manipulations.
+ # TODO: we don't have enough information here to template cond either
+ # (see note on templating name above)
+ cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
+ task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
+
+ # load up an appropriate ansible runner to run the task in parallel
+ include_failed = is_handler and play.force_handlers
+ results = self._run_task_internal(task, include_failed=include_failed)
+
+ # if no hosts are matched, carry on
+ hosts_remaining = True
+ if results is None:
+ hosts_remaining = False
+ results = {}
+
+ contacted = results.get('contacted', {})
+ self.stats.compute(results, ignore_errors=task.ignore_errors)
+
+ def _register_play_vars(host, result):
+ # when 'register' is used, persist the result in the vars cache
+ # rather than the setup cache - vars should be transient between
+ # playbook executions
+ if 'stdout' in result and 'stdout_lines' not in result:
+ result['stdout_lines'] = result['stdout'].splitlines()
+ utils.update_hash(self.VARS_CACHE, host, {task.register: result})
+
+ def _save_play_facts(host, facts):
+ # saves play facts in SETUP_CACHE, unless the module executed was
+ # set_fact, in which case we add them to the VARS_CACHE
+ if task.module_name in ('set_fact', 'include_vars'):
+ utils.update_hash(self.VARS_CACHE, host, facts)
+ else:
+ utils.update_hash(self.SETUP_CACHE, host, facts)
+
+ # add facts to the global setup cache
+ for host, result in contacted.iteritems():
+ if 'results' in result:
+ # task ran with_ lookup plugin, so facts are encapsulated in
+ # multiple list items in the results key
+ for res in result['results']:
+ if type(res) == dict:
+ facts = res.get('ansible_facts', {})
+ _save_play_facts(host, facts)
+ else:
+ # when facts are returned, persist them in the setup cache
+ facts = result.get('ansible_facts', {})
+ _save_play_facts(host, facts)
+
+ # if requested, save the result into the registered variable name
+ if task.register:
+ _register_play_vars(host, result)
+
+ # also have to register some failed, but ignored, tasks
+ if task.ignore_errors and task.register:
+ failed = results.get('failed', {})
+ for host, result in failed.iteritems():
+ _register_play_vars(host, result)
+
+ # flag which notify handlers need to be run
+ if len(task.notify) > 0:
+ for host, results in results.get('contacted',{}).iteritems():
+ if results.get('changed', False):
+ for handler_name in task.notify:
+ self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
+
+ ansible.callbacks.set_task(self.callbacks, None)
+ ansible.callbacks.set_task(self.runner_callbacks, None)
+ return hosts_remaining
+
+ # *****************************************************
+
+ def _flag_handler(self, play, handler_name, host):
+ '''
+ if a task has any notify elements, flag handlers for run
+ at end of execution cycle for hosts that have indicated
+ changes have been made
+ '''
+
+ found = False
+ for x in play.handlers():
+ if handler_name == template(play.basedir, x.name, x.module_vars):
+ found = True
+ self.callbacks.on_notify(host, x.name)
+ x.notified_by.append(host)
+ if not found:
+ raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
+
+ # *****************************************************
+
+ def _do_setup_step(self, play):
+ ''' get facts from the remote system '''
+
+ host_list = self._trim_unavailable_hosts(play._play_hosts)
+
+ if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
+ host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
+ if len(host_list) == 0:
+ return {}
+ elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
+ return {}
+
+ self.callbacks.on_setup()
+ self.inventory.restrict_to(host_list)
+
+ ansible.callbacks.set_task(self.callbacks, None)
+ ansible.callbacks.set_task(self.runner_callbacks, None)
+
+ # push any variables down to the system
+ setup_results = ansible.runner.Runner(
+ basedir=self.basedir,
+ pattern=play.hosts,
+ module_name='setup',
+ module_args={},
+ inventory=self.inventory,
+ forks=self.forks,
+ module_path=self.module_path,
+ timeout=self.timeout,
+ remote_user=play.remote_user,
+ remote_pass=self.remote_pass,
+ remote_port=play.remote_port,
+ private_key_file=self.private_key_file,
+ setup_cache=self.SETUP_CACHE,
+ vars_cache=self.VARS_CACHE,
+ callbacks=self.runner_callbacks,
+ become=play.become,
+ become_method=play.become_method,
+ become_user=play.become_user,
+ become_pass=self.become_pass,
+ vault_pass=self.vault_password,
+ transport=play.transport,
+ is_playbook=True,
+ module_vars=play.vars,
+ play_vars=play.vars,
+ play_file_vars=play.vars_file_vars,
+ role_vars=play.role_vars,
+ default_vars=play.default_vars,
+ check=self.check,
+ diff=self.diff,
+ accelerate=play.accelerate,
+ accelerate_port=play.accelerate_port,
+ ).run()
+ self.stats.compute(setup_results, setup=True)
+
+ self.inventory.lift_restriction()
+
+ # now for each result, load into the setup cache so we can
+ # let runner template out future commands
+ setup_ok = setup_results.get('contacted', {})
+ for (host, result) in setup_ok.iteritems():
+ utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
+ utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
+ return setup_results
+
+ # *****************************************************
+
+
+ def generate_retry_inventory(self, replay_hosts):
+ '''
+ called by /usr/bin/ansible when a playbook run fails. It generates an inventory
+ that allows re-running on ONLY the failed hosts. This may duplicate some
+ variable information in group_vars/host_vars but that is ok, and expected.
+ '''
+
+ buf = StringIO.StringIO()
+ for x in replay_hosts:
+ buf.write("%s\n" % x)
+ basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
+ filename = "%s.retry" % os.path.basename(self.filename)
+ filename = filename.replace(".yml","")
+ filename = os.path.join(basedir, filename)
+
+ try:
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+
+ fd = open(filename, 'w')
+ fd.write(buf.getvalue())
+ fd.close()
+ except:
+ ansible.callbacks.display(
+ "\nERROR: could not create retry file. Check the value of \n"
+ + "the configuration variable 'retry_files_save_path' or set \n"
+ + "'retry_files_enabled' to False to avoid this message.\n",
+ color='red'
+ )
+ return None
+
+ return filename
+
+ # *****************************************************
+ def tasks_to_run_in_play(self, play):
+
+ tasks = []
+
+ for task in play.tasks():
+ # only run the task if the requested tags match or has 'always' tag
+ u = set(['untagged'])
+ task_set = set(task.tags)
+
+ if 'always' in task.tags:
+ should_run = True
+ else:
+ if 'all' in self.only_tags:
+ should_run = True
+ else:
+ should_run = False
+ if 'tagged' in self.only_tags:
+ if task_set != u:
+ should_run = True
+ elif 'untagged' in self.only_tags:
+ if task_set == u:
+ should_run = True
+ else:
+ if task_set.intersection(self.only_tags):
+ should_run = True
+
+ # Check for tags that we need to skip
+ if 'all' in self.skip_tags:
+ should_run = False
+ else:
+ if 'tagged' in self.skip_tags:
+ if task_set != u:
+ should_run = False
+ elif 'untagged' in self.skip_tags:
+ if task_set == u:
+ should_run = False
+ else:
+ if should_run:
+ if task_set.intersection(self.skip_tags):
+ should_run = False
+
+ if should_run:
+ tasks.append(task)
+
+ return tasks
+
+ # *****************************************************
+ def _run_play(self, play):
+ ''' run a list of tasks for a given pattern, in order '''
+
+ self.callbacks.on_play_start(play.name)
+ # Get the hosts for this play
+ play._play_hosts = self.inventory.list_hosts(play.hosts)
+ # if no hosts matches this play, drop out
+ if not play._play_hosts:
+ self.callbacks.on_no_hosts_matched()
+ return True
+
+ # get facts from system
+ self._do_setup_step(play)
+
+ # now with that data, handle contentional variable file imports!
+ all_hosts = self._trim_unavailable_hosts(play._play_hosts)
+ play.update_vars_files(all_hosts, vault_password=self.vault_password)
+ hosts_count = len(all_hosts)
+
+ if play.serial.endswith("%"):
+
+ # This is a percentage, so calculate it based on the
+ # number of hosts
+ serial_pct = int(play.serial.replace("%",""))
+ serial = int((serial_pct/100.0) * len(all_hosts))
+
+ # Ensure that no matter how small the percentage, serial
+ # can never fall below 1, so that things actually happen
+ serial = max(serial, 1)
+ else:
+ serial = int(play.serial)
+
+ serialized_batch = []
+ if serial <= 0:
+ serialized_batch = [all_hosts]
+ else:
+ # do N forks all the way through before moving to next
+ while len(all_hosts) > 0:
+ play_hosts = []
+ for x in range(serial):
+ if len(all_hosts) > 0:
+ play_hosts.append(all_hosts.pop(0))
+ serialized_batch.append(play_hosts)
+
+ task_errors = False
+ for on_hosts in serialized_batch:
+
+ # restrict the play to just the hosts we have in our on_hosts block that are
+ # available.
+ play._play_hosts = self._trim_unavailable_hosts(on_hosts)
+ self.inventory.also_restrict_to(on_hosts)
+
+ for task in self.tasks_to_run_in_play(play):
+
+ if task.meta is not None:
+ # meta tasks can force handlers to run mid-play
+ if task.meta == 'flush_handlers':
+ self.run_handlers(play)
+
+ # skip calling the handler till the play is finished
+ continue
+
+ if not self._run_task(play, task, False):
+ # whether no hosts matched is fatal or not depends if it was on the initial step.
+ # if we got exactly no hosts on the first step (setup!) then the host group
+ # just didn't match anything and that's ok
+ return False
+
+ # Get a new list of what hosts are left as available, the ones that
+ # did not go fail/dark during the task
+ host_list = self._trim_unavailable_hosts(play._play_hosts)
+
+ # Set max_fail_pct to 0, So if any hosts fails, bail out
+ if task.any_errors_fatal and len(host_list) < hosts_count:
+ play.max_fail_pct = 0
+
+ # If threshold for max nodes failed is exceeded, bail out.
+ if play.serial > 0:
+ # if serial is set, we need to shorten the size of host_count
+ play_count = len(play._play_hosts)
+ if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
+ host_list = None
+ else:
+ if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
+ host_list = None
+
+ # if no hosts remain, drop out
+ if not host_list:
+ if play.force_handlers:
+ task_errors = True
+ break
+ else:
+ self.callbacks.on_no_hosts_remaining()
+ return False
+
+ # lift restrictions after each play finishes
+ self.inventory.lift_also_restriction()
+
+ if task_errors and not play.force_handlers:
+ # if there were failed tasks and handler execution
+ # is not forced, quit the play with an error
+ return False
+ else:
+ # no errors, go ahead and execute all handlers
+ if not self.run_handlers(play):
+ return False
+
+ return True
+
+
+ def run_handlers(self, play):
+ on_hosts = play._play_hosts
+ hosts_count = len(on_hosts)
+ for task in play.tasks():
+ if task.meta is not None:
+
+ fired_names = {}
+ for handler in play.handlers():
+ if len(handler.notified_by) > 0:
+ self.inventory.restrict_to(handler.notified_by)
+
+ # Resolve the variables first
+ handler_name = template(play.basedir, handler.name, handler.module_vars)
+ if handler_name not in fired_names:
+ self._run_task(play, handler, True)
+ # prevent duplicate handler includes from running more than once
+ fired_names[handler_name] = 1
+
+ host_list = self._trim_unavailable_hosts(play._play_hosts)
+ if handler.any_errors_fatal and len(host_list) < hosts_count:
+ play.max_fail_pct = 0
+ if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
+ host_list = None
+ if not host_list and not play.force_handlers:
+ self.callbacks.on_no_hosts_remaining()
+ return False
+
+ self.inventory.lift_restriction()
+ new_list = handler.notified_by[:]
+ for host in handler.notified_by:
+ if host in on_hosts:
+ while host in new_list:
+ new_list.remove(host)
+ handler.notified_by = new_list
+
+ continue
+
+ return True
diff --git a/v1/ansible/playbook/play.py b/v1/ansible/playbook/play.py
new file mode 100644
index 00000000000..6ee85e0bf48
--- /dev/null
+++ b/v1/ansible/playbook/play.py
@@ -0,0 +1,949 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+#############################################
+
+from ansible.utils.template import template
+from ansible import utils
+from ansible import errors
+from ansible.playbook.task import Task
+from ansible.module_utils.splitter import split_args, unquote
+import ansible.constants as C
+import pipes
+import shlex
+import os
+import sys
+import uuid
+
+
+class Play(object):
+
+ _pb_common = [
+ 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
+ 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
+ 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
+ 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
+ 'vault_password',
+ ]
+
+ __slots__ = _pb_common + [
+ '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
+ 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
+ 'role_vars', 'transport', 'vars_file_vars',
+ ]
+
+ # to catch typos and so forth -- these are userland names
+ # and don't line up 1:1 with how they are stored
+ VALID_KEYS = frozenset(_pb_common + [
+ 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
+ 'pre_tasks', 'role_names', 'tasks', 'user',
+ ])
+
+ # *************************************************
+
+ def __init__(self, playbook, ds, basedir, vault_password=None):
+ ''' constructor loads from a play datastructure '''
+
+ for x in ds.keys():
+ if not x in Play.VALID_KEYS:
+ raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
+
+ # allow all playbook keys to be set by --extra-vars
+ self.vars = ds.get('vars', {})
+ self.vars_prompt = ds.get('vars_prompt', {})
+ self.playbook = playbook
+ self.vars = self._get_vars()
+ self.vars_file_vars = dict() # these are vars read in from vars_files:
+ self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
+ self.basedir = basedir
+ self.roles = ds.get('roles', None)
+ self.tags = ds.get('tags', None)
+ self.vault_password = vault_password
+ self.environment = ds.get('environment', {})
+
+ if self.tags is None:
+ self.tags = []
+ elif type(self.tags) in [ str, unicode ]:
+ self.tags = self.tags.split(",")
+ elif type(self.tags) != list:
+ self.tags = []
+
+ # make sure we have some special internal variables set, which
+ # we use later when loading tasks and handlers
+ load_vars = dict()
+ load_vars['playbook_dir'] = os.path.abspath(self.basedir)
+ if self.playbook.inventory.basedir() is not None:
+ load_vars['inventory_dir'] = self.playbook.inventory.basedir()
+ if self.playbook.inventory.src() is not None:
+ load_vars['inventory_file'] = self.playbook.inventory.src()
+
+ # We first load the vars files from the datastructure
+ # so we have the default variables to pass into the roles
+ self.vars_files = ds.get('vars_files', [])
+ if not isinstance(self.vars_files, list):
+ raise errors.AnsibleError('vars_files must be a list')
+ processed_vars_files = self._update_vars_files_for_host(None)
+
+ # now we load the roles into the datastructure
+ self.included_roles = []
+ ds = self._load_roles(self.roles, ds)
+
+ # and finally re-process the vars files as they may have been updated
+ # by the included roles, but exclude any which have been processed
+ self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
+ if not isinstance(self.vars_files, list):
+ raise errors.AnsibleError('vars_files must be a list')
+
+ self._update_vars_files_for_host(None)
+
+ # template everything to be efficient, but do not pre-mature template
+ # tasks/handlers as they may have inventory scope overrides. We also
+ # create a set of temporary variables for templating, so we don't
+ # trample on the existing vars structures
+ _tasks = ds.pop('tasks', [])
+ _handlers = ds.pop('handlers', [])
+
+ temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
+ temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
+
+ try:
+ ds = template(basedir, ds, temp_vars)
+ except errors.AnsibleError, e:
+ utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
+
+ ds['tasks'] = _tasks
+ ds['handlers'] = _handlers
+
+ self._ds = ds
+
+ hosts = ds.get('hosts')
+ if hosts is None:
+ raise errors.AnsibleError('hosts declaration is required')
+ elif isinstance(hosts, list):
+ try:
+ hosts = ';'.join(hosts)
+ except TypeError,e:
+ raise errors.AnsibleError('improper host declaration: %s' % str(e))
+
+ self.serial = str(ds.get('serial', 0))
+ self.hosts = hosts
+ self.name = ds.get('name', self.hosts)
+ self._tasks = ds.get('tasks', [])
+ self._handlers = ds.get('handlers', [])
+ self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
+ self.remote_port = ds.get('port', self.playbook.remote_port)
+ self.transport = ds.get('connection', self.playbook.transport)
+ self.remote_port = self.remote_port
+ self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
+ self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
+ self.accelerate_port = ds.get('accelerate_port', None)
+ self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
+ self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
+ self.no_log = utils.boolean(ds.get('no_log', 'false'))
+ self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
+
+ # Fail out if user specifies conflicting privilege escalations
+ if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
+ raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
+ if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
+ raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
+ if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
+ raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
+
+ # become settings are inherited and updated normally
+ self.become = ds.get('become', self.playbook.become)
+ self.become_method = ds.get('become_method', self.playbook.become_method)
+ self.become_user = ds.get('become_user', self.playbook.become_user)
+
+ # Make sure current play settings are reflected in become fields
+ if 'sudo' in ds:
+ self.become=ds['sudo']
+ self.become_method='sudo'
+ if 'sudo_user' in ds:
+ self.become_user=ds['sudo_user']
+ elif 'su' in ds:
+ self.become=True
+ self.become=ds['su']
+ self.become_method='su'
+ if 'su_user' in ds:
+ self.become_user=ds['su_user']
+
+ # gather_facts is not a simple boolean, as None means that a 'smart'
+ # fact gathering mode will be used, so we need to be careful here as
+ # calling utils.boolean(None) returns False
+ self.gather_facts = ds.get('gather_facts', None)
+ if self.gather_facts is not None:
+ self.gather_facts = utils.boolean(self.gather_facts)
+
+ load_vars['role_names'] = ds.get('role_names', [])
+
+ self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
+ self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
+
+ # apply any missing tags to role tasks
+ self._late_merge_role_tags()
+
+ # place holder for the discovered hosts to be used in this play
+ self._play_hosts = None
+
+ # *************************************************
+
+ def _get_role_path(self, role):
+ """
+ Returns the path on disk to the directory containing
+ the role directories like tasks, templates, etc. Also
+ returns any variables that were included with the role
+ """
+ orig_path = template(self.basedir,role,self.vars)
+
+ role_vars = {}
+ if type(orig_path) == dict:
+ # what, not a path?
+ role_name = orig_path.get('role', None)
+ if role_name is None:
+ raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
+ role_vars = orig_path
+ else:
+ role_name = utils.role_spec_parse(orig_path)["name"]
+
+ role_path = None
+
+ possible_paths = [
+ utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
+ utils.path_dwim(self.basedir, role_name)
+ ]
+
+ if C.DEFAULT_ROLES_PATH:
+ search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
+ for loc in search_locations:
+ loc = os.path.expanduser(loc)
+ possible_paths.append(utils.path_dwim(loc, role_name))
+
+ for path_option in possible_paths:
+ if os.path.isdir(path_option):
+ role_path = path_option
+ break
+
+ if role_path is None:
+ raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
+
+ return (role_path, role_vars)
+
+ def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
+ # this number is arbitrary, but it seems sane
+ if level > 20:
+ raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
+ for role in roles:
+ role_path,role_vars = self._get_role_path(role)
+
+ # save just the role params for this role, which exclude the special
+ # keywords 'role', 'tags', and 'when'.
+ role_params = role_vars.copy()
+ for item in ('role', 'tags', 'when'):
+ if item in role_params:
+ del role_params[item]
+
+ role_vars = utils.combine_vars(passed_vars, role_vars)
+
+ vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
+ vars_data = {}
+ if os.path.isfile(vars):
+ vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
+ if vars_data:
+ if not isinstance(vars_data, dict):
+ raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
+ role_vars = utils.combine_vars(vars_data, role_vars)
+
+ defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
+ defaults_data = {}
+ if os.path.isfile(defaults):
+ defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
+
+ # the meta directory contains the yaml that should
+ # hold the list of dependencies (if any)
+ meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
+ if os.path.isfile(meta):
+ data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
+ if data:
+ dependencies = data.get('dependencies',[])
+ if dependencies is None:
+ dependencies = []
+ for dep in dependencies:
+ allow_dupes = False
+ (dep_path,dep_vars) = self._get_role_path(dep)
+
+ # save the dep params, just as we did above
+ dep_params = dep_vars.copy()
+ for item in ('role', 'tags', 'when'):
+ if item in dep_params:
+ del dep_params[item]
+
+ meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
+ if os.path.isfile(meta):
+ meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
+ if meta_data:
+ allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
+
+ # if any tags were specified as role/dep variables, merge
+ # them into the current dep_vars so they're passed on to any
+ # further dependencies too, and so we only have one place
+ # (dep_vars) to look for tags going forward
+ def __merge_tags(var_obj):
+ old_tags = dep_vars.get('tags', [])
+ if isinstance(old_tags, basestring):
+ old_tags = [old_tags, ]
+ if isinstance(var_obj, dict):
+ new_tags = var_obj.get('tags', [])
+ if isinstance(new_tags, basestring):
+ new_tags = [new_tags, ]
+ else:
+ new_tags = []
+ return list(set(old_tags).union(set(new_tags)))
+
+ dep_vars['tags'] = __merge_tags(role_vars)
+ dep_vars['tags'] = __merge_tags(passed_vars)
+
+ # if tags are set from this role, merge them
+ # into the tags list for the dependent role
+ if "tags" in passed_vars:
+ for included_role_dep in dep_stack:
+ included_dep_name = included_role_dep[0]
+ included_dep_vars = included_role_dep[2]
+ if included_dep_name == dep:
+ if "tags" in included_dep_vars:
+ included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
+ else:
+ included_dep_vars["tags"] = passed_vars["tags"][:]
+
+ dep_vars = utils.combine_vars(passed_vars, dep_vars)
+ dep_vars = utils.combine_vars(role_vars, dep_vars)
+
+ vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
+ vars_data = {}
+ if os.path.isfile(vars):
+ vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
+ if vars_data:
+ dep_vars = utils.combine_vars(dep_vars, vars_data)
+ pass
+
+ defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
+ dep_defaults_data = {}
+ if os.path.isfile(defaults):
+ dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
+ if 'role' in dep_vars:
+ del dep_vars['role']
+
+ if not allow_dupes:
+ if dep in self.included_roles:
+ # skip back to the top, since we don't want to
+ # do anything else with this role
+ continue
+ else:
+ self.included_roles.append(dep)
+
+ def _merge_conditional(cur_conditionals, new_conditionals):
+ if isinstance(new_conditionals, (basestring, bool)):
+ cur_conditionals.append(new_conditionals)
+ elif isinstance(new_conditionals, list):
+ cur_conditionals.extend(new_conditionals)
+
+ # pass along conditionals from roles to dep roles
+ passed_when = passed_vars.get('when')
+ role_when = role_vars.get('when')
+ dep_when = dep_vars.get('when')
+
+ tmpcond = []
+ _merge_conditional(tmpcond, passed_when)
+ _merge_conditional(tmpcond, role_when)
+ _merge_conditional(tmpcond, dep_when)
+
+ if len(tmpcond) > 0:
+ dep_vars['when'] = tmpcond
+
+ self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
+ dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
+
+ # only add the current role when we're at the top level,
+ # otherwise we'll end up in a recursive loop
+ if level == 0:
+ self.included_roles.append(role)
+ dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
+ return dep_stack
+
+ def _load_role_vars_files(self, vars_files):
+ # process variables stored in vars/main.yml files
+ role_vars = {}
+ for filename in vars_files:
+ if os.path.exists(filename):
+ new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
+ if new_vars:
+ if type(new_vars) != dict:
+ raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
+ role_vars = utils.combine_vars(role_vars, new_vars)
+
+ return role_vars
+
+ def _load_role_defaults(self, defaults_files):
+ # process default variables
+ default_vars = {}
+ for filename in defaults_files:
+ if os.path.exists(filename):
+ new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
+ if new_default_vars:
+ if type(new_default_vars) != dict:
+ raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
+ default_vars = utils.combine_vars(default_vars, new_default_vars)
+
+ return default_vars
+
+ def _load_roles(self, roles, ds):
+ # a role is a name that auto-includes the following if they exist
+ # /tasks/main.yml
+ # /handlers/main.yml
+ # /vars/main.yml
+ # /library
+ # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
+
+ if roles is None:
+ roles = []
+ if type(roles) != list:
+ raise errors.AnsibleError("value of 'roles:' must be a list")
+
+ new_tasks = []
+ new_handlers = []
+ role_vars_files = []
+ defaults_files = []
+
+ pre_tasks = ds.get('pre_tasks', None)
+ if type(pre_tasks) != list:
+ pre_tasks = []
+ for x in pre_tasks:
+ new_tasks.append(x)
+
+ # flush handlers after pre_tasks
+ new_tasks.append(dict(meta='flush_handlers'))
+
+ roles = self._build_role_dependencies(roles, [], {})
+
+ # give each role an uuid and
+ # make role_path available as variable to the task
+ for idx, val in enumerate(roles):
+ this_uuid = str(uuid.uuid4())
+ roles[idx][-3]['role_uuid'] = this_uuid
+ roles[idx][-3]['role_path'] = roles[idx][1]
+
+ role_names = []
+
+ for (role, role_path, role_vars, role_params, default_vars) in roles:
+ # special vars must be extracted from the dict to the included tasks
+ special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
+ special_vars = {}
+ for k in special_keys:
+ if k in role_vars:
+ special_vars[k] = role_vars[k]
+
+ task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
+ handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
+ vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
+ meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
+ defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
+
+ task = self._resolve_main(task_basepath)
+ handler = self._resolve_main(handler_basepath)
+ vars_file = self._resolve_main(vars_basepath)
+ meta_file = self._resolve_main(meta_basepath)
+ defaults_file = self._resolve_main(defaults_basepath)
+
+ library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
+
+ missing = lambda f: not os.path.isfile(f)
+ if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
+ raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
+
+ if isinstance(role, dict):
+ role_name = role['role']
+ else:
+ role_name = utils.role_spec_parse(role)["name"]
+
+ role_names.append(role_name)
+ if os.path.isfile(task):
+ nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
+ for k in special_keys:
+ if k in special_vars:
+ nt[k] = special_vars[k]
+ new_tasks.append(nt)
+ if os.path.isfile(handler):
+ nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
+ for k in special_keys:
+ if k in special_vars:
+ nt[k] = special_vars[k]
+ new_handlers.append(nt)
+ if os.path.isfile(vars_file):
+ role_vars_files.append(vars_file)
+ if os.path.isfile(defaults_file):
+ defaults_files.append(defaults_file)
+ if os.path.isdir(library):
+ utils.plugins.module_finder.add_directory(library)
+
+ tasks = ds.get('tasks', None)
+ post_tasks = ds.get('post_tasks', None)
+ handlers = ds.get('handlers', None)
+ vars_files = ds.get('vars_files', None)
+
+ if type(tasks) != list:
+ tasks = []
+ if type(handlers) != list:
+ handlers = []
+ if type(vars_files) != list:
+ vars_files = []
+ if type(post_tasks) != list:
+ post_tasks = []
+
+ new_tasks.extend(tasks)
+ # flush handlers after tasks + role tasks
+ new_tasks.append(dict(meta='flush_handlers'))
+ new_tasks.extend(post_tasks)
+ # flush handlers after post tasks
+ new_tasks.append(dict(meta='flush_handlers'))
+
+ new_handlers.extend(handlers)
+
+ ds['tasks'] = new_tasks
+ ds['handlers'] = new_handlers
+ ds['role_names'] = role_names
+
+ self.role_vars = self._load_role_vars_files(role_vars_files)
+ self.default_vars = self._load_role_defaults(defaults_files)
+
+ return ds
+
+ # *************************************************
+
+ def _resolve_main(self, basepath):
+ ''' flexibly handle variations in main filenames '''
+ # these filenames are acceptable:
+ mains = (
+ os.path.join(basepath, 'main'),
+ os.path.join(basepath, 'main.yml'),
+ os.path.join(basepath, 'main.yaml'),
+ os.path.join(basepath, 'main.json'),
+ )
+ if sum([os.path.isfile(x) for x in mains]) > 1:
+ raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
+ else:
+ for m in mains:
+ if os.path.isfile(m):
+ return m # exactly one main file
+ return mains[0] # zero mains (we still need to return something)
+
+ # *************************************************
+
+ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
+ additional_conditions=None, original_file=None, role_name=None):
+ ''' handle task and handler include statements '''
+
+ results = []
+ if tasks is None:
+ # support empty handler files, and the like.
+ tasks = []
+ if additional_conditions is None:
+ additional_conditions = []
+ if vars is None:
+ vars = {}
+ if role_params is None:
+ role_params = {}
+ if default_vars is None:
+ default_vars = {}
+ if become_vars is None:
+ become_vars = {}
+
+ old_conditions = list(additional_conditions)
+
+ for x in tasks:
+
+ # prevent assigning the same conditions to each task on an include
+ included_additional_conditions = list(old_conditions)
+
+ if not isinstance(x, dict):
+ raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
+
+ # evaluate privilege escalation vars for current and child tasks
+ included_become_vars = {}
+ for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
+ if k in x:
+ included_become_vars[k] = x[k]
+ elif k in become_vars:
+ included_become_vars[k] = become_vars[k]
+ x[k] = become_vars[k]
+
+ task_vars = vars.copy()
+ if original_file:
+ task_vars['_original_file'] = original_file
+
+ if 'meta' in x:
+ if x['meta'] == 'flush_handlers':
+ if role_name and 'role_name' not in x:
+ x['role_name'] = role_name
+ results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
+ continue
+
+ if 'include' in x:
+ tokens = split_args(str(x['include']))
+ included_additional_conditions = list(additional_conditions)
+ include_vars = {}
+ for k in x:
+ if k.startswith("with_"):
+ if original_file:
+ offender = " (in %s)" % original_file
+ else:
+ offender = ""
+ utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
+ elif k.startswith("when_"):
+ utils.deprecated("\"when_:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
+ elif k == 'when':
+ if isinstance(x[k], (basestring, bool)):
+ included_additional_conditions.append(x[k])
+ elif type(x[k]) is list:
+ included_additional_conditions.extend(x[k])
+ elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
+ continue
+ else:
+ include_vars[k] = x[k]
+
+ # get any role parameters specified
+ role_params = x.get('role_params', {})
+
+ # get any role default variables specified
+ default_vars = x.get('default_vars', {})
+ if not default_vars:
+ default_vars = self.default_vars
+ else:
+ default_vars = utils.combine_vars(self.default_vars, default_vars)
+
+ # append the vars defined with the include (from above)
+ # as well as the old-style 'vars' element. The old-style
+ # vars are given higher precedence here (just in case)
+ task_vars = utils.combine_vars(task_vars, include_vars)
+ if 'vars' in x:
+ task_vars = utils.combine_vars(task_vars, x['vars'])
+
+ new_role = None
+ if 'role_name' in x:
+ new_role = x['role_name']
+
+ mv = task_vars.copy()
+ for t in tokens[1:]:
+ (k,v) = t.split("=", 1)
+ v = unquote(v)
+ mv[k] = template(self.basedir, v, mv)
+ dirname = self.basedir
+ if original_file:
+ dirname = os.path.dirname(original_file)
+
+ # temp vars are used here to avoid trampling on the existing vars structures
+ temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
+ temp_vars = utils.combine_vars(temp_vars, mv)
+ temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
+ include_file = template(dirname, tokens[0], temp_vars)
+ include_filename = utils.path_dwim(dirname, include_file)
+
+ data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
+ if 'role_name' in x and data is not None:
+ for y in data:
+ if isinstance(y, dict) and 'include' in y:
+ y['role_name'] = new_role
+ loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
+ results += loaded
+ elif type(x) == dict:
+ task = Task(
+ self, x,
+ module_vars=task_vars,
+ play_vars=self.vars,
+ play_file_vars=self.vars_file_vars,
+ role_vars=self.role_vars,
+ role_params=role_params,
+ default_vars=default_vars,
+ additional_conditions=list(additional_conditions),
+ role_name=role_name
+ )
+ results.append(task)
+ else:
+ raise Exception("unexpected task type")
+
+ for x in results:
+ if self.tags is not None:
+ x.tags.extend(self.tags)
+
+ return results
+
+ # *************************************************
+
+ def tasks(self):
+ ''' return task objects for this play '''
+ return self._tasks
+
+ def handlers(self):
+ ''' return handler objects for this play '''
+ return self._handlers
+
+ # *************************************************
+
+ def _get_vars(self):
+ ''' load the vars section from a play, accounting for all sorts of variable features
+ including loading from yaml files, prompting, and conditional includes of the first
+ file found in a list. '''
+
+ if self.vars is None:
+ self.vars = {}
+
+ if type(self.vars) not in [dict, list]:
+ raise errors.AnsibleError("'vars' section must contain only key/value pairs")
+
+ vars = {}
+
+ # translate a list of vars into a dict
+ if type(self.vars) == list:
+ for item in self.vars:
+ if getattr(item, 'items', None) is None:
+ raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
+ k, v = item.items()[0]
+ vars[k] = v
+ else:
+ vars.update(self.vars)
+
+ if type(self.vars_prompt) == list:
+ for var in self.vars_prompt:
+ if not 'name' in var:
+ raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
+
+ vname = var['name']
+ prompt = var.get("prompt", vname)
+ default = var.get("default", None)
+ private = var.get("private", True)
+
+ confirm = var.get("confirm", False)
+ encrypt = var.get("encrypt", None)
+ salt_size = var.get("salt_size", None)
+ salt = var.get("salt", None)
+
+ if vname not in self.playbook.extra_vars:
+ vars[vname] = self.playbook.callbacks.on_vars_prompt(
+ vname, private, prompt, encrypt, confirm, salt_size, salt, default
+ )
+
+ elif type(self.vars_prompt) == dict:
+ for (vname, prompt) in self.vars_prompt.iteritems():
+ prompt_msg = "%s: " % prompt
+ if vname not in self.playbook.extra_vars:
+ vars[vname] = self.playbook.callbacks.on_vars_prompt(
+ varname=vname, private=False, prompt=prompt_msg, default=None
+ )
+
+ else:
+ raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
+
+ if type(self.playbook.extra_vars) == dict:
+ vars = utils.combine_vars(vars, self.playbook.extra_vars)
+
+ return vars
+
+ # *************************************************
+
+ def update_vars_files(self, hosts, vault_password=None):
+ ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
+
+ # now loop through all the hosts...
+ for h in hosts:
+ self._update_vars_files_for_host(h, vault_password=vault_password)
+
+ # *************************************************
+
+ def compare_tags(self, tags):
+ ''' given a list of tags that the user has specified, return two lists:
+ matched_tags: tags were found within the current play and match those given
+ by the user
+ unmatched_tags: tags that were found within the current play but do not match
+ any provided by the user '''
+
+ # gather all the tags in all the tasks and handlers into one list
+ # FIXME: isn't this in self.tags already?
+
+ all_tags = []
+ for task in self._tasks:
+ if not task.meta:
+ all_tags.extend(task.tags)
+ for handler in self._handlers:
+ all_tags.extend(handler.tags)
+
+ # compare the lists of tags using sets and return the matched and unmatched
+ all_tags_set = set(all_tags)
+ tags_set = set(tags)
+
+ matched_tags = all_tags_set.intersection(tags_set)
+ unmatched_tags = all_tags_set.difference(tags_set)
+
+ a = set(['always'])
+ u = set(['untagged'])
+ if 'always' in all_tags_set:
+ matched_tags = matched_tags.union(a)
+ unmatched_tags = all_tags_set.difference(a)
+
+ if 'all' in tags_set:
+ matched_tags = matched_tags.union(all_tags_set)
+ unmatched_tags = set()
+
+ if 'tagged' in tags_set:
+ matched_tags = all_tags_set.difference(u)
+ unmatched_tags = u
+
+ if 'untagged' in tags_set and 'untagged' in all_tags_set:
+ matched_tags = matched_tags.union(u)
+ unmatched_tags = unmatched_tags.difference(u)
+
+ return matched_tags, unmatched_tags
+
+ # *************************************************
+
+ def _late_merge_role_tags(self):
+ # build a local dict of tags for roles
+ role_tags = {}
+ for task in self._ds['tasks']:
+ if 'role_name' in task:
+ this_role = task['role_name'] + "-" + task['vars']['role_uuid']
+
+ if this_role not in role_tags:
+ role_tags[this_role] = []
+
+ if 'tags' in task['vars']:
+ if isinstance(task['vars']['tags'], basestring):
+ role_tags[this_role] += shlex.split(task['vars']['tags'])
+ else:
+ role_tags[this_role] += task['vars']['tags']
+
+ # apply each role's tags to its tasks
+ for idx, val in enumerate(self._tasks):
+ if getattr(val, 'role_name', None) is not None:
+ this_role = val.role_name + "-" + val.module_vars['role_uuid']
+ if this_role in role_tags:
+ self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
+
+ # *************************************************
+
+ def _update_vars_files_for_host(self, host, vault_password=None):
+
+ def generate_filenames(host, inject, filename):
+
+ """ Render the raw filename into 3 forms """
+
+ # filename2 is the templated version of the filename, which will
+ # be fully rendered if any variables contained within it are
+ # non-inventory related
+ filename2 = template(self.basedir, filename, self.vars)
+
+ # filename3 is the same as filename2, but when the host object is
+ # available, inventory variables will be expanded as well since the
+ # name is templated with the injected variables
+ filename3 = filename2
+ if host is not None:
+ filename3 = template(self.basedir, filename2, inject)
+
+ # filename4 is the dwim'd path, but may also be mixed-scope, so we use
+ # both play scoped vars and host scoped vars to template the filepath
+ if utils.contains_vars(filename3) and host is not None:
+ inject.update(self.vars)
+ filename4 = template(self.basedir, filename3, inject)
+ filename4 = utils.path_dwim(self.basedir, filename4)
+ else:
+ filename4 = utils.path_dwim(self.basedir, filename3)
+
+ return filename2, filename3, filename4
+
+
+ def update_vars_cache(host, data, target_filename=None):
+
+ """ update a host's varscache with new var data """
+
+ self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
+ if target_filename:
+ self.playbook.callbacks.on_import_for_host(host, target_filename)
+
+ def process_files(filename, filename2, filename3, filename4, host=None):
+
+ """ pseudo-algorithm for deciding where new vars should go """
+
+ data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
+ if data:
+ if type(data) != dict:
+ raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
+ if host is not None:
+ target_filename = None
+ if utils.contains_vars(filename2):
+ if not utils.contains_vars(filename3):
+ target_filename = filename3
+ else:
+ target_filename = filename4
+ update_vars_cache(host, data, target_filename=target_filename)
+ else:
+ self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
+ # we did process this file
+ return True
+ # we did not process this file
+ return False
+
+ # Enforce that vars_files is always a list
+ if type(self.vars_files) != list:
+ self.vars_files = [ self.vars_files ]
+
+ # Build an inject if this is a host run started by self.update_vars_files
+ if host is not None:
+ inject = {}
+ inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
+ inject.update(self.playbook.SETUP_CACHE.get(host, {}))
+ inject.update(self.playbook.VARS_CACHE.get(host, {}))
+ else:
+ inject = None
+
+ processed = []
+ for filename in self.vars_files:
+ if type(filename) == list:
+ # loop over all filenames, loading the first one, and failing if none found
+ found = False
+ sequence = []
+ for real_filename in filename:
+ filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
+ sequence.append(filename4)
+ if os.path.exists(filename4):
+ found = True
+ if process_files(filename, filename2, filename3, filename4, host=host):
+ processed.append(filename)
+ elif host is not None:
+ self.playbook.callbacks.on_not_import_for_host(host, filename4)
+ if found:
+ break
+ if not found and host is not None:
+ raise errors.AnsibleError(
+ "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
+ )
+ else:
+ # just one filename supplied, load it!
+ filename2, filename3, filename4 = generate_filenames(host, inject, filename)
+ if utils.contains_vars(filename4):
+ continue
+ if process_files(filename, filename2, filename3, filename4, host=host):
+ processed.append(filename)
+
+ return processed
diff --git a/v1/ansible/playbook/task.py b/v1/ansible/playbook/task.py
new file mode 100644
index 00000000000..70c1bc8df6b
--- /dev/null
+++ b/v1/ansible/playbook/task.py
@@ -0,0 +1,346 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from ansible import errors
+from ansible import utils
+from ansible.module_utils.splitter import split_args
+import os
+import ansible.utils.template as template
+import sys
+
+class Task(object):
+
+ _t_common = [
+ 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
+ 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
+ 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
+ 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
+ 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
+ ]
+
+ __slots__ = [
+ 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
+ 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
+ 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
+ ] + _t_common
+
+ # to prevent typos and such
+ VALID_KEYS = frozenset([
+ 'async', 'connection', 'include', 'poll',
+ ] + _t_common)
+
+ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
+ ''' constructor loads from a task or handler datastructure '''
+
+ # meta directives are used to tell things like ansible/playbook to run
+ # operations like handler execution. Meta tasks are not executed
+ # normally.
+ if 'meta' in ds:
+ self.meta = ds['meta']
+ self.tags = []
+ self.module_vars = module_vars
+ self.role_name = role_name
+ return
+ else:
+ self.meta = None
+
+
+ library = os.path.join(play.basedir, 'library')
+ if os.path.exists(library):
+ utils.plugins.module_finder.add_directory(library)
+
+ for x in ds.keys():
+
+ # code to allow for saying "modulename: args" versus "action: modulename args"
+ if x in utils.plugins.module_finder:
+
+ if 'action' in ds:
+ raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action'])))
+ if isinstance(ds[x], dict):
+ if 'args' in ds:
+ raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
+ ds['args'] = ds[x]
+ ds[x] = ''
+ elif ds[x] is None:
+ ds[x] = ''
+ if not isinstance(ds[x], basestring):
+ raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
+ ds['action'] = x + " " + ds[x]
+ ds.pop(x)
+
+ # code to allow "with_glob" and to reference a lookup plugin named glob
+ elif x.startswith("with_"):
+ if isinstance(ds[x], basestring):
+ param = ds[x].strip()
+
+ plugin_name = x.replace("with_","")
+ if plugin_name in utils.plugins.lookup_loader:
+ ds['items_lookup_plugin'] = plugin_name
+ ds['items_lookup_terms'] = ds[x]
+ ds.pop(x)
+ else:
+ raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
+
+ elif x in [ 'changed_when', 'failed_when', 'when']:
+ if isinstance(ds[x], basestring):
+ param = ds[x].strip()
+ # Only a variable, no logic
+ if (param.startswith('{{') and
+ param.find('}}') == len(ds[x]) - 2 and
+ param.find('|') == -1):
+ utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.")
+ elif x.startswith("when_"):
+ utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True)
+
+ if 'when' in ds:
+ raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
+ when_name = x.replace("when_","")
+ ds['when'] = "%s %s" % (when_name, ds[x])
+ ds.pop(x)
+ elif not x in Task.VALID_KEYS:
+ raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
+
+ self.module_vars = module_vars
+ self.play_vars = play_vars
+ self.play_file_vars = play_file_vars
+ self.role_vars = role_vars
+ self.role_params = role_params
+ self.default_vars = default_vars
+ self.play = play
+
+ # load various attributes
+ self.name = ds.get('name', None)
+ self.tags = [ 'untagged' ]
+ self.register = ds.get('register', None)
+ self.environment = ds.get('environment', play.environment)
+ self.role_name = role_name
+ self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
+ self.run_once = utils.boolean(ds.get('run_once', 'false'))
+
+ #Code to allow do until feature in a Task
+ if 'until' in ds:
+ if not ds.get('register'):
+ raise errors.AnsibleError("register keyword is mandatory when using do until feature")
+ self.module_vars['delay'] = ds.get('delay', 5)
+ self.module_vars['retries'] = ds.get('retries', 3)
+ self.module_vars['register'] = ds.get('register', None)
+ self.until = ds.get('until')
+ self.module_vars['until'] = self.until
+
+ # rather than simple key=value args on the options line, these represent structured data and the values
+ # can be hashes and lists, not just scalars
+ self.args = ds.get('args', {})
+
+ # get remote_user for task, then play, then playbook
+ if ds.get('remote_user') is not None:
+ self.remote_user = ds.get('remote_user')
+ elif ds.get('remote_user', play.remote_user) is not None:
+ self.remote_user = ds.get('remote_user', play.remote_user)
+ else:
+ self.remote_user = ds.get('remote_user', play.playbook.remote_user)
+
+ # Fail out if user specifies privilege escalation params in conflict
+ if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
+ raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+
+ if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
+ raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
+
+ if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
+ raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+
+ self.become = utils.boolean(ds.get('become', play.become))
+ self.become_method = ds.get('become_method', play.become_method)
+ self.become_user = ds.get('become_user', play.become_user)
+ self.become_pass = ds.get('become_pass', play.playbook.become_pass)
+
+ # set only if passed in current task data
+ if 'sudo' in ds or 'sudo_user' in ds:
+ self.become_method='sudo'
+
+ if 'sudo' in ds:
+ self.become=ds['sudo']
+ del ds['sudo']
+ else:
+ self.become=True
+ if 'sudo_user' in ds:
+ self.become_user = ds['sudo_user']
+ del ds['sudo_user']
+ if 'sudo_pass' in ds:
+ self.become_pass = ds['sudo_pass']
+ del ds['sudo_pass']
+
+ elif 'su' in ds or 'su_user' in ds:
+ self.become_method='su'
+
+ if 'su' in ds:
+ self.become=ds['su']
+ else:
+ self.become=True
+ del ds['su']
+ if 'su_user' in ds:
+ self.become_user = ds['su_user']
+ del ds['su_user']
+ if 'su_pass' in ds:
+ self.become_pass = ds['su_pass']
+ del ds['su_pass']
+
+ # Both are defined
+ if ('action' in ds) and ('local_action' in ds):
+ raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
+ # Both are NOT defined
+ elif (not 'action' in ds) and (not 'local_action' in ds):
+ raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', ''))
+ # Only one of them is defined
+ elif 'local_action' in ds:
+ self.action = ds.get('local_action', '')
+ self.delegate_to = '127.0.0.1'
+ else:
+ self.action = ds.get('action', '')
+ self.delegate_to = ds.get('delegate_to', None)
+ self.transport = ds.get('connection', ds.get('transport', play.transport))
+
+ if isinstance(self.action, dict):
+ if 'module' not in self.action:
+ raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
+ if self.args:
+ raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
+ self.args = self.action
+ self.action = self.args.pop('module')
+
+ # delegate_to can use variables
+ if not (self.delegate_to is None):
+ # delegate_to: localhost should use local transport
+ if self.delegate_to in ['127.0.0.1', 'localhost']:
+ self.transport = 'local'
+
+ # notified by is used by Playbook code to flag which hosts
+ # need to run a notifier
+ self.notified_by = []
+
+ # if no name is specified, use the action line as the name
+ if self.name is None:
+ self.name = self.action
+
+ # load various attributes
+ self.when = ds.get('when', None)
+ self.changed_when = ds.get('changed_when', None)
+ self.failed_when = ds.get('failed_when', None)
+
+ # combine the default and module vars here for use in templating
+ all_vars = self.default_vars.copy()
+ all_vars = utils.combine_vars(all_vars, self.play_vars)
+ all_vars = utils.combine_vars(all_vars, self.play_file_vars)
+ all_vars = utils.combine_vars(all_vars, self.role_vars)
+ all_vars = utils.combine_vars(all_vars, self.module_vars)
+ all_vars = utils.combine_vars(all_vars, self.role_params)
+
+ self.async_seconds = ds.get('async', 0) # not async by default
+ self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
+ self.async_seconds = int(self.async_seconds)
+ self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
+ self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
+ self.async_poll_interval = int(self.async_poll_interval)
+ self.notify = ds.get('notify', [])
+ self.first_available_file = ds.get('first_available_file', None)
+
+ self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
+ self.items_lookup_terms = ds.get('items_lookup_terms', None)
+
+
+ self.ignore_errors = ds.get('ignore_errors', False)
+ self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
+
+ self.always_run = ds.get('always_run', False)
+
+ # action should be a string
+ if not isinstance(self.action, basestring):
+ raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
+
+ # notify can be a string or a list, store as a list
+ if isinstance(self.notify, basestring):
+ self.notify = [ self.notify ]
+
+ # split the action line into a module name + arguments
+ try:
+ tokens = split_args(self.action)
+ except Exception, e:
+ if "unbalanced" in str(e):
+ raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
+ "Make sure quotes are matched or escaped properly")
+ else:
+ raise
+ if len(tokens) < 1:
+ raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
+ self.module_name = tokens[0]
+ self.module_args = ''
+ if len(tokens) > 1:
+ self.module_args = " ".join(tokens[1:])
+
+ import_tags = self.module_vars.get('tags',[])
+ if type(import_tags) in [int,float]:
+ import_tags = str(import_tags)
+ elif type(import_tags) in [str,unicode]:
+ # allow the user to list comma delimited tags
+ import_tags = import_tags.split(",")
+
+ # handle mutually incompatible options
+ incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
+ if len(incompatibles) > 1:
+ raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
+
+ # make first_available_file accessible to Runner code
+ if self.first_available_file:
+ self.module_vars['first_available_file'] = self.first_available_file
+ # make sure that the 'item' variable is set when using
+ # first_available_file (issue #8220)
+ if 'item' not in self.module_vars:
+ self.module_vars['item'] = ''
+
+ if self.items_lookup_plugin is not None:
+ self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
+ self.module_vars['items_lookup_terms'] = self.items_lookup_terms
+
+ # allow runner to see delegate_to option
+ self.module_vars['delegate_to'] = self.delegate_to
+
+ # make some task attributes accessible to Runner code
+ self.module_vars['ignore_errors'] = self.ignore_errors
+ self.module_vars['register'] = self.register
+ self.module_vars['changed_when'] = self.changed_when
+ self.module_vars['failed_when'] = self.failed_when
+ self.module_vars['always_run'] = self.always_run
+
+ # tags allow certain parts of a playbook to be run without running the whole playbook
+ apply_tags = ds.get('tags', None)
+ if apply_tags is not None:
+ if type(apply_tags) in [ str, unicode ]:
+ self.tags.append(apply_tags)
+ elif type(apply_tags) in [ int, float ]:
+ self.tags.append(str(apply_tags))
+ elif type(apply_tags) == list:
+ self.tags.extend(apply_tags)
+ self.tags.extend(import_tags)
+
+ if len(self.tags) > 1:
+ self.tags.remove('untagged')
+
+ if additional_conditions:
+ new_conditions = additional_conditions[:]
+ if self.when:
+ new_conditions.append(self.when)
+ self.when = new_conditions
diff --git a/lib/ansible/runner/__init__.py b/v1/ansible/runner/__init__.py
similarity index 99%
rename from lib/ansible/runner/__init__.py
rename to v1/ansible/runner/__init__.py
index 8b46683c37e..4ff273778ca 100644
--- a/lib/ansible/runner/__init__.py
+++ b/v1/ansible/runner/__init__.py
@@ -740,7 +740,7 @@ class Runner(object):
if type(items) != list:
raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
- if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]:
+ if len(items) and utils.is_list_of_strings(items) and self.module_name in ( 'apt', 'yum', 'pkgng', 'zypper', 'dnf' ):
# hack for apt, yum, and pkgng so that with_items maps back into a single module call
use_these_items = []
for x in items:
diff --git a/v1/ansible/runner/action_plugins/__init__.py b/v1/ansible/runner/action_plugins/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/runner/action_plugins/add_host.py b/v1/ansible/runner/action_plugins/add_host.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/add_host.py
rename to v1/ansible/runner/action_plugins/add_host.py
diff --git a/lib/ansible/runner/action_plugins/assemble.py b/v1/ansible/runner/action_plugins/assemble.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/assemble.py
rename to v1/ansible/runner/action_plugins/assemble.py
diff --git a/lib/ansible/runner/action_plugins/assert.py b/v1/ansible/runner/action_plugins/assert.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/assert.py
rename to v1/ansible/runner/action_plugins/assert.py
diff --git a/lib/ansible/runner/action_plugins/async.py b/v1/ansible/runner/action_plugins/async.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/async.py
rename to v1/ansible/runner/action_plugins/async.py
diff --git a/lib/ansible/runner/action_plugins/copy.py b/v1/ansible/runner/action_plugins/copy.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/copy.py
rename to v1/ansible/runner/action_plugins/copy.py
diff --git a/lib/ansible/runner/action_plugins/debug.py b/v1/ansible/runner/action_plugins/debug.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/debug.py
rename to v1/ansible/runner/action_plugins/debug.py
diff --git a/lib/ansible/runner/action_plugins/fail.py b/v1/ansible/runner/action_plugins/fail.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/fail.py
rename to v1/ansible/runner/action_plugins/fail.py
diff --git a/lib/ansible/runner/action_plugins/fetch.py b/v1/ansible/runner/action_plugins/fetch.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/fetch.py
rename to v1/ansible/runner/action_plugins/fetch.py
diff --git a/lib/ansible/runner/action_plugins/group_by.py b/v1/ansible/runner/action_plugins/group_by.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/group_by.py
rename to v1/ansible/runner/action_plugins/group_by.py
diff --git a/lib/ansible/runner/action_plugins/include_vars.py b/v1/ansible/runner/action_plugins/include_vars.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/include_vars.py
rename to v1/ansible/runner/action_plugins/include_vars.py
diff --git a/lib/ansible/runner/action_plugins/normal.py b/v1/ansible/runner/action_plugins/normal.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/normal.py
rename to v1/ansible/runner/action_plugins/normal.py
diff --git a/lib/ansible/runner/action_plugins/patch.py b/v1/ansible/runner/action_plugins/patch.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/patch.py
rename to v1/ansible/runner/action_plugins/patch.py
diff --git a/lib/ansible/runner/action_plugins/pause.py b/v1/ansible/runner/action_plugins/pause.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/pause.py
rename to v1/ansible/runner/action_plugins/pause.py
diff --git a/lib/ansible/runner/action_plugins/raw.py b/v1/ansible/runner/action_plugins/raw.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/raw.py
rename to v1/ansible/runner/action_plugins/raw.py
diff --git a/lib/ansible/runner/action_plugins/script.py b/v1/ansible/runner/action_plugins/script.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/script.py
rename to v1/ansible/runner/action_plugins/script.py
diff --git a/lib/ansible/runner/action_plugins/set_fact.py b/v1/ansible/runner/action_plugins/set_fact.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/set_fact.py
rename to v1/ansible/runner/action_plugins/set_fact.py
diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/v1/ansible/runner/action_plugins/synchronize.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/synchronize.py
rename to v1/ansible/runner/action_plugins/synchronize.py
diff --git a/lib/ansible/runner/action_plugins/template.py b/v1/ansible/runner/action_plugins/template.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/template.py
rename to v1/ansible/runner/action_plugins/template.py
diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/v1/ansible/runner/action_plugins/unarchive.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/unarchive.py
rename to v1/ansible/runner/action_plugins/unarchive.py
diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/v1/ansible/runner/action_plugins/win_copy.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/win_copy.py
rename to v1/ansible/runner/action_plugins/win_copy.py
diff --git a/lib/ansible/runner/action_plugins/win_template.py b/v1/ansible/runner/action_plugins/win_template.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/win_template.py
rename to v1/ansible/runner/action_plugins/win_template.py
diff --git a/lib/ansible/runner/connection.py b/v1/ansible/runner/connection.py
similarity index 100%
rename from lib/ansible/runner/connection.py
rename to v1/ansible/runner/connection.py
diff --git a/v1/ansible/runner/connection_plugins/__init__.py b/v1/ansible/runner/connection_plugins/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/v1/ansible/runner/connection_plugins/accelerate.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/accelerate.py
rename to v1/ansible/runner/connection_plugins/accelerate.py
diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/v1/ansible/runner/connection_plugins/chroot.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/chroot.py
rename to v1/ansible/runner/connection_plugins/chroot.py
diff --git a/lib/ansible/runner/connection_plugins/fireball.py b/v1/ansible/runner/connection_plugins/fireball.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/fireball.py
rename to v1/ansible/runner/connection_plugins/fireball.py
diff --git a/lib/ansible/runner/connection_plugins/funcd.py b/v1/ansible/runner/connection_plugins/funcd.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/funcd.py
rename to v1/ansible/runner/connection_plugins/funcd.py
diff --git a/lib/ansible/runner/connection_plugins/jail.py b/v1/ansible/runner/connection_plugins/jail.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/jail.py
rename to v1/ansible/runner/connection_plugins/jail.py
diff --git a/lib/ansible/runner/connection_plugins/libvirt_lxc.py b/v1/ansible/runner/connection_plugins/libvirt_lxc.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/libvirt_lxc.py
rename to v1/ansible/runner/connection_plugins/libvirt_lxc.py
diff --git a/lib/ansible/runner/connection_plugins/local.py b/v1/ansible/runner/connection_plugins/local.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/local.py
rename to v1/ansible/runner/connection_plugins/local.py
diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/v1/ansible/runner/connection_plugins/paramiko_ssh.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/paramiko_ssh.py
rename to v1/ansible/runner/connection_plugins/paramiko_ssh.py
diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/v1/ansible/runner/connection_plugins/ssh.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/ssh.py
rename to v1/ansible/runner/connection_plugins/ssh.py
diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/v1/ansible/runner/connection_plugins/winrm.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/winrm.py
rename to v1/ansible/runner/connection_plugins/winrm.py
diff --git a/lib/ansible/runner/connection_plugins/zone.py b/v1/ansible/runner/connection_plugins/zone.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/zone.py
rename to v1/ansible/runner/connection_plugins/zone.py
diff --git a/v1/ansible/runner/filter_plugins/__init__.py b/v1/ansible/runner/filter_plugins/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/runner/filter_plugins/core.py b/v1/ansible/runner/filter_plugins/core.py
similarity index 82%
rename from lib/ansible/runner/filter_plugins/core.py
rename to v1/ansible/runner/filter_plugins/core.py
index bdf45509c3a..f81da6f8942 100644
--- a/lib/ansible/runner/filter_plugins/core.py
+++ b/v1/ansible/runner/filter_plugins/core.py
@@ -270,6 +270,83 @@ def get_encrypted_password(password, hashtype='sha512', salt=None):
def to_uuid(string):
return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
+def comment(text, style='plain', **kw):
+ # Predefined comment types
+ comment_styles = {
+ 'plain': {
+ 'decoration': '# '
+ },
+ 'erlang': {
+ 'decoration': '% '
+ },
+ 'c': {
+ 'decoration': '// '
+ },
+ 'cblock': {
+ 'beginning': '/*',
+ 'decoration': ' * ',
+ 'end': ' */'
+ },
+ 'xml': {
+ 'beginning': ''
+ }
+ }
+
+ # Pointer to the right comment type
+ style_params = comment_styles[style]
+
+ if 'decoration' in kw:
+ prepostfix = kw['decoration']
+ else:
+ prepostfix = style_params['decoration']
+
+ # Default params
+ p = {
+ 'newline': '\n',
+ 'beginning': '',
+ 'prefix': (prepostfix).rstrip(),
+ 'prefix_count': 1,
+ 'decoration': '',
+ 'postfix': (prepostfix).rstrip(),
+ 'postfix_count': 1,
+ 'end': ''
+ }
+
+ # Update default params
+ p.update(style_params)
+ p.update(kw)
+
+ # Compose substrings for the final string
+ str_beginning = ''
+ if p['beginning']:
+ str_beginning = "%s%s" % (p['beginning'], p['newline'])
+ str_prefix = str(
+ "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
+ str_text = ("%s%s" % (
+ p['decoration'],
+ # Prepend each line of the text with the decorator
+ text.replace(
+ p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
+ # Remove trailing spaces when only decorator is on the line
+ "%s%s" % (p['decoration'], p['newline']),
+ "%s%s" % (p['decoration'].rstrip(), p['newline']))
+ str_postfix = p['newline'].join(
+ [''] + [p['postfix'] for x in range(p['postfix_count'])])
+ str_end = ''
+ if p['end']:
+ str_end = "%s%s" % (p['newline'], p['end'])
+
+ # Return the final string
+ return "%s%s%s%s%s" % (
+ str_beginning,
+ str_prefix,
+ str_text,
+ str_postfix,
+ str_end)
+
+
class FilterModule(object):
''' Ansible core jinja2 filters '''
@@ -348,4 +425,7 @@ class FilterModule(object):
# random stuff
'random': rand,
'shuffle': randomize_list,
+
+ # comment-style decoration of string
+ 'comment': comment,
}
diff --git a/lib/ansible/runner/filter_plugins/ipaddr.py b/v1/ansible/runner/filter_plugins/ipaddr.py
similarity index 100%
rename from lib/ansible/runner/filter_plugins/ipaddr.py
rename to v1/ansible/runner/filter_plugins/ipaddr.py
diff --git a/lib/ansible/runner/filter_plugins/mathstuff.py b/v1/ansible/runner/filter_plugins/mathstuff.py
similarity index 100%
rename from lib/ansible/runner/filter_plugins/mathstuff.py
rename to v1/ansible/runner/filter_plugins/mathstuff.py
diff --git a/v1/ansible/runner/lookup_plugins/__init__.py b/v1/ansible/runner/lookup_plugins/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/runner/lookup_plugins/cartesian.py b/v1/ansible/runner/lookup_plugins/cartesian.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/cartesian.py
rename to v1/ansible/runner/lookup_plugins/cartesian.py
diff --git a/lib/ansible/runner/lookup_plugins/consul_kv.py b/v1/ansible/runner/lookup_plugins/consul_kv.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/consul_kv.py
rename to v1/ansible/runner/lookup_plugins/consul_kv.py
diff --git a/lib/ansible/runner/lookup_plugins/csvfile.py b/v1/ansible/runner/lookup_plugins/csvfile.py
similarity index 93%
rename from lib/ansible/runner/lookup_plugins/csvfile.py
rename to v1/ansible/runner/lookup_plugins/csvfile.py
index ce5a2b77d2f..a9ea8ed90cd 100644
--- a/lib/ansible/runner/lookup_plugins/csvfile.py
+++ b/v1/ansible/runner/lookup_plugins/csvfile.py
@@ -63,7 +63,10 @@ class LookupModule(object):
for param in params[1:]:
name, value = param.split('=')
assert(name in paramvals)
- paramvals[name] = value
+ if name == 'delimiter':
+ paramvals[name] = str(value)
+ else:
+ paramvals[name] = value
except (ValueError, AssertionError), e:
raise errors.AnsibleError(e)
diff --git a/lib/ansible/runner/lookup_plugins/dict.py b/v1/ansible/runner/lookup_plugins/dict.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/dict.py
rename to v1/ansible/runner/lookup_plugins/dict.py
diff --git a/lib/ansible/runner/lookup_plugins/dig.py b/v1/ansible/runner/lookup_plugins/dig.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/dig.py
rename to v1/ansible/runner/lookup_plugins/dig.py
diff --git a/lib/ansible/runner/lookup_plugins/dnstxt.py b/v1/ansible/runner/lookup_plugins/dnstxt.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/dnstxt.py
rename to v1/ansible/runner/lookup_plugins/dnstxt.py
diff --git a/lib/ansible/runner/lookup_plugins/env.py b/v1/ansible/runner/lookup_plugins/env.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/env.py
rename to v1/ansible/runner/lookup_plugins/env.py
diff --git a/lib/ansible/runner/lookup_plugins/etcd.py b/v1/ansible/runner/lookup_plugins/etcd.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/etcd.py
rename to v1/ansible/runner/lookup_plugins/etcd.py
diff --git a/lib/ansible/runner/lookup_plugins/file.py b/v1/ansible/runner/lookup_plugins/file.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/file.py
rename to v1/ansible/runner/lookup_plugins/file.py
diff --git a/lib/ansible/runner/lookup_plugins/fileglob.py b/v1/ansible/runner/lookup_plugins/fileglob.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/fileglob.py
rename to v1/ansible/runner/lookup_plugins/fileglob.py
diff --git a/lib/ansible/runner/lookup_plugins/first_found.py b/v1/ansible/runner/lookup_plugins/first_found.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/first_found.py
rename to v1/ansible/runner/lookup_plugins/first_found.py
diff --git a/lib/ansible/runner/lookup_plugins/flattened.py b/v1/ansible/runner/lookup_plugins/flattened.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/flattened.py
rename to v1/ansible/runner/lookup_plugins/flattened.py
diff --git a/lib/ansible/runner/lookup_plugins/indexed_items.py b/v1/ansible/runner/lookup_plugins/indexed_items.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/indexed_items.py
rename to v1/ansible/runner/lookup_plugins/indexed_items.py
diff --git a/lib/ansible/runner/lookup_plugins/inventory_hostnames.py b/v1/ansible/runner/lookup_plugins/inventory_hostnames.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/inventory_hostnames.py
rename to v1/ansible/runner/lookup_plugins/inventory_hostnames.py
diff --git a/lib/ansible/runner/lookup_plugins/items.py b/v1/ansible/runner/lookup_plugins/items.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/items.py
rename to v1/ansible/runner/lookup_plugins/items.py
diff --git a/lib/ansible/runner/lookup_plugins/lines.py b/v1/ansible/runner/lookup_plugins/lines.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/lines.py
rename to v1/ansible/runner/lookup_plugins/lines.py
diff --git a/lib/ansible/runner/lookup_plugins/nested.py b/v1/ansible/runner/lookup_plugins/nested.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/nested.py
rename to v1/ansible/runner/lookup_plugins/nested.py
diff --git a/lib/ansible/runner/lookup_plugins/password.py b/v1/ansible/runner/lookup_plugins/password.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/password.py
rename to v1/ansible/runner/lookup_plugins/password.py
diff --git a/lib/ansible/runner/lookup_plugins/pipe.py b/v1/ansible/runner/lookup_plugins/pipe.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/pipe.py
rename to v1/ansible/runner/lookup_plugins/pipe.py
diff --git a/lib/ansible/runner/lookup_plugins/random_choice.py b/v1/ansible/runner/lookup_plugins/random_choice.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/random_choice.py
rename to v1/ansible/runner/lookup_plugins/random_choice.py
diff --git a/lib/ansible/runner/lookup_plugins/redis_kv.py b/v1/ansible/runner/lookup_plugins/redis_kv.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/redis_kv.py
rename to v1/ansible/runner/lookup_plugins/redis_kv.py
diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/v1/ansible/runner/lookup_plugins/sequence.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/sequence.py
rename to v1/ansible/runner/lookup_plugins/sequence.py
diff --git a/lib/ansible/runner/lookup_plugins/subelements.py b/v1/ansible/runner/lookup_plugins/subelements.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/subelements.py
rename to v1/ansible/runner/lookup_plugins/subelements.py
diff --git a/lib/ansible/runner/lookup_plugins/template.py b/v1/ansible/runner/lookup_plugins/template.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/template.py
rename to v1/ansible/runner/lookup_plugins/template.py
diff --git a/lib/ansible/runner/lookup_plugins/together.py b/v1/ansible/runner/lookup_plugins/together.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/together.py
rename to v1/ansible/runner/lookup_plugins/together.py
diff --git a/lib/ansible/runner/lookup_plugins/url.py b/v1/ansible/runner/lookup_plugins/url.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/url.py
rename to v1/ansible/runner/lookup_plugins/url.py
diff --git a/lib/ansible/runner/poller.py b/v1/ansible/runner/poller.py
similarity index 100%
rename from lib/ansible/runner/poller.py
rename to v1/ansible/runner/poller.py
diff --git a/lib/ansible/runner/return_data.py b/v1/ansible/runner/return_data.py
similarity index 100%
rename from lib/ansible/runner/return_data.py
rename to v1/ansible/runner/return_data.py
diff --git a/v1/ansible/runner/shell_plugins/__init__.py b/v1/ansible/runner/shell_plugins/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/runner/shell_plugins/csh.py b/v1/ansible/runner/shell_plugins/csh.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/csh.py
rename to v1/ansible/runner/shell_plugins/csh.py
diff --git a/lib/ansible/runner/shell_plugins/fish.py b/v1/ansible/runner/shell_plugins/fish.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/fish.py
rename to v1/ansible/runner/shell_plugins/fish.py
diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/v1/ansible/runner/shell_plugins/powershell.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/powershell.py
rename to v1/ansible/runner/shell_plugins/powershell.py
diff --git a/lib/ansible/runner/shell_plugins/sh.py b/v1/ansible/runner/shell_plugins/sh.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/sh.py
rename to v1/ansible/runner/shell_plugins/sh.py
diff --git a/v1/ansible/utils/__init__.py b/v1/ansible/utils/__init__.py
new file mode 100644
index 00000000000..eb6fa2a712b
--- /dev/null
+++ b/v1/ansible/utils/__init__.py
@@ -0,0 +1,1662 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import errno
+import sys
+import re
+import os
+import shlex
+import yaml
+import copy
+import optparse
+import operator
+from ansible import errors
+from ansible import __version__
+from ansible.utils.display_functions import *
+from ansible.utils.plugins import *
+from ansible.utils.su_prompts import *
+from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
+from ansible.callbacks import display
+from ansible.module_utils.splitter import split_args, unquote
+from ansible.module_utils.basic import heuristic_log_sanitize
+from ansible.utils.unicode import to_bytes, to_unicode
+import ansible.constants as C
+import ast
+import time
+import StringIO
+import stat
+import termios
+import tty
+import pipes
+import random
+import difflib
+import warnings
+import traceback
+import getpass
+import sys
+import subprocess
+import contextlib
+
+from vault import VaultLib
+
+VERBOSITY=0
+
+MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
+
+# caching the compilation of the regex used
+# to check for lookup calls within data
+LOOKUP_REGEX = re.compile(r'lookup\s*\(')
+PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
+CODE_REGEX = re.compile(r'(?:{%|%})')
+
+
+try:
+ # simplejson can be much faster if it's available
+ import simplejson as json
+except ImportError:
+ import json
+
+try:
+ from yaml import CSafeLoader as Loader
+except ImportError:
+ from yaml import SafeLoader as Loader
+
+PASSLIB_AVAILABLE = False
+try:
+ import passlib.hash
+ PASSLIB_AVAILABLE = True
+except:
+ pass
+
+try:
+ import builtin
+except ImportError:
+ import __builtin__ as builtin
+
+KEYCZAR_AVAILABLE=False
+try:
+ try:
+ # some versions of pycrypto may not have this?
+ from Crypto.pct_warnings import PowmInsecureWarning
+ except ImportError:
+ PowmInsecureWarning = RuntimeWarning
+
+ with warnings.catch_warnings(record=True) as warning_handler:
+ warnings.simplefilter("error", PowmInsecureWarning)
+ try:
+ import keyczar.errors as key_errors
+ from keyczar.keys import AesKey
+ except PowmInsecureWarning:
+ system_warning(
+ "The version of gmp you have installed has a known issue regarding " + \
+ "timing vulnerabilities when used with pycrypto. " + \
+ "If possible, you should update it (i.e. yum update gmp)."
+ )
+ warnings.resetwarnings()
+ warnings.simplefilter("ignore")
+ import keyczar.errors as key_errors
+ from keyczar.keys import AesKey
+ KEYCZAR_AVAILABLE=True
+except ImportError:
+ pass
+
+
+###############################################################
+# Abstractions around keyczar
+###############################################################
+
+def key_for_hostname(hostname):
+ # fireball mode is an implementation of ansible firing up zeromq via SSH
+ # to use no persistent daemons or key management
+
+ if not KEYCZAR_AVAILABLE:
+ raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
+
+ key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
+ if not os.path.exists(key_path):
+ os.makedirs(key_path, mode=0700)
+ os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
+ elif not os.path.isdir(key_path):
+ raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
+
+ if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
+ raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
+
+ key_path = os.path.join(key_path, hostname)
+
+ # use new AES keys every 2 hours, which means fireball must not allow running for longer either
+ if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
+ key = AesKey.Generate()
+ fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
+ fh = os.fdopen(fd, 'w')
+ fh.write(str(key))
+ fh.close()
+ return key
+ else:
+ if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
+ raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
+ fh = open(key_path)
+ key = AesKey.Read(fh.read())
+ fh.close()
+ return key
+
+def encrypt(key, msg):
+ return key.Encrypt(msg)
+
+def decrypt(key, msg):
+ try:
+ return key.Decrypt(msg)
+ except key_errors.InvalidSignatureError:
+ raise errors.AnsibleError("decryption failed")
+
+###############################################################
+# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
+###############################################################
+
+def read_vault_file(vault_password_file):
+ """Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
+ if vault_password_file:
+ this_path = os.path.realpath(os.path.expanduser(vault_password_file))
+ if is_executable(this_path):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
+ except OSError, e:
+ raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
+ stdout, stderr = p.communicate()
+ vault_pass = stdout.strip('\r\n')
+ else:
+ try:
+ f = open(this_path, "rb")
+ vault_pass=f.read().strip()
+ f.close()
+ except (OSError, IOError), e:
+ raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
+
+ return vault_pass
+ else:
+ return None
+
+def err(msg):
+ ''' print an error message to stderr '''
+
+ print >> sys.stderr, msg
+
+def exit(msg, rc=1):
+ ''' quit with an error to stdout and a failure code '''
+
+ err(msg)
+ sys.exit(rc)
+
+def jsonify(result, format=False):
+ ''' format JSON output (uncompressed or uncompressed) '''
+
+ if result is None:
+ return "{}"
+ result2 = result.copy()
+ for key, value in result2.items():
+ if type(value) is str:
+ result2[key] = value.decode('utf-8', 'ignore')
+
+ indent = None
+ if format:
+ indent = 4
+
+ try:
+ return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
+ except UnicodeDecodeError:
+ return json.dumps(result2, sort_keys=True, indent=indent)
+
+def write_tree_file(tree, hostname, buf):
+ ''' write something into treedir/hostname '''
+
+ # TODO: might be nice to append playbook runs per host in a similar way
+ # in which case, we'd want append mode.
+ path = os.path.join(tree, hostname)
+ fd = open(path, "w+")
+ fd.write(buf)
+ fd.close()
+
+def is_failed(result):
+ ''' is a given JSON result a failed result? '''
+
+ return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
+
+def is_changed(result):
+ ''' is a given JSON result a changed result? '''
+
+ return (result.get('changed', False) in [ True, 'True', 'true'])
+
+def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
+ from ansible.utils import template
+
+ if conditional is None or conditional == '':
+ return True
+
+ if isinstance(conditional, list):
+ for x in conditional:
+ if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
+ return False
+ return True
+
+ if not isinstance(conditional, basestring):
+ return conditional
+
+ conditional = conditional.replace("jinja2_compare ","")
+ # allow variable names
+ if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
+ conditional = to_unicode(inject[conditional], nonstring='simplerepr')
+ conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
+ original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
+ # a Jinja2 evaluation that results in something Python can eval!
+ presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
+ conditional = template.template(basedir, presented, inject)
+ val = conditional.strip()
+ if val == presented:
+ # the templating failed, meaning most likely a
+ # variable was undefined. If we happened to be
+ # looking for an undefined variable, return True,
+ # otherwise fail
+ if "is undefined" in conditional:
+ return True
+ elif "is defined" in conditional:
+ return False
+ else:
+ raise errors.AnsibleError("error while evaluating conditional: %s" % original)
+ elif val == "True":
+ return True
+ elif val == "False":
+ return False
+ else:
+ raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
+
+def is_executable(path):
+ '''is the given path executable?'''
+ return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
+ or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
+ or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
+
+def unfrackpath(path):
+ '''
+ returns a path that is free of symlinks, environment
+ variables, relative path traversals and symbols (~)
+ example:
+ '$HOME/../../var/mail' becomes '/var/spool/mail'
+ '''
+ return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
+
+def prepare_writeable_dir(tree,mode=0777):
+ ''' make sure a directory exists and is writeable '''
+
+ # modify the mode to ensure the owner at least
+ # has read/write access to this directory
+ mode |= 0700
+
+ # make sure the tree path is always expanded
+ # and normalized and free of symlinks
+ tree = unfrackpath(tree)
+
+ if not os.path.exists(tree):
+ try:
+ os.makedirs(tree, mode)
+ except (IOError, OSError), e:
+ raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
+ if not os.access(tree, os.W_OK):
+ raise errors.AnsibleError("Cannot write to path %s" % tree)
+ return tree
+
+def path_dwim(basedir, given):
+ '''
+ make relative paths work like folks expect.
+ '''
+
+ if given.startswith("'"):
+ given = given[1:-1]
+
+ if given.startswith("/"):
+ return os.path.abspath(given)
+ elif given.startswith("~"):
+ return os.path.abspath(os.path.expanduser(given))
+ else:
+ if basedir is None:
+ basedir = "."
+ return os.path.abspath(os.path.join(basedir, given))
+
+def path_dwim_relative(original, dirname, source, playbook_base, check=True):
+ ''' find one file in a directory one level up in a dir named dirname relative to current '''
+ # (used by roles code)
+
+ from ansible.utils import template
+
+
+ basedir = os.path.dirname(original)
+ if os.path.islink(basedir):
+ basedir = unfrackpath(basedir)
+ template2 = os.path.join(basedir, dirname, source)
+ else:
+ template2 = os.path.join(basedir, '..', dirname, source)
+ source2 = path_dwim(basedir, template2)
+ if os.path.exists(source2):
+ return source2
+ obvious_local_path = path_dwim(playbook_base, source)
+ if os.path.exists(obvious_local_path):
+ return obvious_local_path
+ if check:
+ raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
+ return source2 # which does not exist
+
+def repo_url_to_role_name(repo_url):
+ # gets the role name out of a repo like
+ # http://git.example.com/repos/repo.git" => "repo"
+
+ if '://' not in repo_url and '@' not in repo_url:
+ return repo_url
+ trailing_path = repo_url.split('/')[-1]
+ if trailing_path.endswith('.git'):
+ trailing_path = trailing_path[:-4]
+ if trailing_path.endswith('.tar.gz'):
+ trailing_path = trailing_path[:-7]
+ if ',' in trailing_path:
+ trailing_path = trailing_path.split(',')[0]
+ return trailing_path
+
+
+def role_spec_parse(role_spec):
+ # takes a repo and a version like
+ # git+http://git.example.com/repos/repo.git,v1.0
+ # and returns a list of properties such as:
+ # {
+ # 'scm': 'git',
+ # 'src': 'http://git.example.com/repos/repo.git',
+ # 'version': 'v1.0',
+ # 'name': 'repo'
+ # }
+
+ role_spec = role_spec.strip()
+ role_version = ''
+ default_role_versions = dict(git='master', hg='tip')
+ if role_spec == "" or role_spec.startswith("#"):
+ return (None, None, None, None)
+
+ tokens = [s.strip() for s in role_spec.split(',')]
+
+ # assume https://github.com URLs are git+https:// URLs and not
+ # tarballs unless they end in '.zip'
+ if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
+ tokens[0] = 'git+' + tokens[0]
+
+ if '+' in tokens[0]:
+ (scm, role_url) = tokens[0].split('+')
+ else:
+ scm = None
+ role_url = tokens[0]
+ if len(tokens) >= 2:
+ role_version = tokens[1]
+ if len(tokens) == 3:
+ role_name = tokens[2]
+ else:
+ role_name = repo_url_to_role_name(tokens[0])
+ if scm and not role_version:
+ role_version = default_role_versions.get(scm, '')
+ return dict(scm=scm, src=role_url, version=role_version, name=role_name)
+
+
+def role_yaml_parse(role):
+ if 'role' in role:
+ # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
+ role_info = role_spec_parse(role['role'])
+ if isinstance(role_info, dict):
+ # Warning: Slight change in behaviour here. name may be being
+ # overloaded. Previously, name was only a parameter to the role.
+ # Now it is both a parameter to the role and the name that
+ # ansible-galaxy will install under on the local system.
+ if 'name' in role and 'name' in role_info:
+ del role_info['name']
+ role.update(role_info)
+ else:
+ # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
+ role["src"] = "git+" + role["src"]
+
+ if '+' in role["src"]:
+ (scm, src) = role["src"].split('+')
+ role["scm"] = scm
+ role["src"] = src
+
+ if 'name' not in role:
+ role["name"] = repo_url_to_role_name(role["src"])
+
+ if 'version' not in role:
+ role['version'] = ''
+
+ if 'scm' not in role:
+ role['scm'] = None
+
+ return role
+
+
+def json_loads(data):
+ ''' parse a JSON string and return a data structure '''
+ try:
+ loaded = json.loads(data)
+ except ValueError,e:
+ raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
+
+ return loaded
+
+def _clean_data(orig_data, from_remote=False, from_inventory=False):
+ ''' remove jinja2 template tags from a string '''
+
+ if not isinstance(orig_data, basestring):
+ return orig_data
+
+ # when the data is marked as having come from a remote, we always
+ # replace any print blocks (ie. {{var}}), however when marked as coming
+ # from inventory we only replace print blocks that contain a call to
+ # a lookup plugin (ie. {{lookup('foo','bar'))}})
+ replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
+
+ regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
+
+ with contextlib.closing(StringIO.StringIO(orig_data)) as data:
+ # these variables keep track of opening block locations, as we only
+ # want to replace matched pairs of print/block tags
+ print_openings = []
+ block_openings = []
+ for mo in regex.finditer(orig_data):
+ token = mo.group(0)
+ token_start = mo.start(0)
+
+ if token[0] == '{':
+ if token == '{%':
+ block_openings.append(token_start)
+ elif token == '{{':
+ print_openings.append(token_start)
+
+ elif token[1] == '}':
+ prev_idx = None
+ if token == '%}' and block_openings:
+ prev_idx = block_openings.pop()
+ elif token == '}}' and print_openings:
+ prev_idx = print_openings.pop()
+
+ if prev_idx is not None:
+ # replace the opening
+ data.seek(prev_idx, os.SEEK_SET)
+ data.write('{#')
+ # replace the closing
+ data.seek(token_start, os.SEEK_SET)
+ data.write('#}')
+
+ else:
+ assert False, 'Unhandled regex match'
+
+ return data.getvalue()
+
+def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
+ '''
+ walk a complex data structure, and use _clean_data() to
+ remove any template tags that may exist
+ '''
+ if not from_remote and not from_inventory:
+ raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
+ if isinstance(orig_data, dict):
+ data = orig_data.copy()
+ for key in data:
+ new_key = _clean_data_struct(key, from_remote, from_inventory)
+ new_val = _clean_data_struct(data[key], from_remote, from_inventory)
+ if key != new_key:
+ del data[key]
+ data[new_key] = new_val
+ elif isinstance(orig_data, list):
+ data = orig_data[:]
+ for i in range(0, len(data)):
+ data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
+ elif isinstance(orig_data, basestring):
+ data = _clean_data(orig_data, from_remote, from_inventory)
+ else:
+ data = orig_data
+ return data
+
+def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
+ ''' this version for module return data only '''
+
+ orig_data = raw_data
+
+ # ignore stuff like tcgetattr spewage or other warnings
+ data = filter_leading_non_json_lines(raw_data)
+
+ try:
+ results = json.loads(data)
+ except:
+ if no_exceptions:
+ return dict(failed=True, parsed=False, msg=raw_data)
+ else:
+ raise
+
+ if from_remote:
+ results = _clean_data_struct(results, from_remote, from_inventory)
+
+ return results
+
+def serialize_args(args):
+ '''
+ Flattens a dictionary args to a k=v string
+ '''
+ module_args = ""
+ for (k,v) in args.iteritems():
+ if isinstance(v, basestring):
+ module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
+ elif isinstance(v, bool):
+ module_args = "%s=%s %s" % (k, str(v), module_args)
+ return module_args.strip()
+
+def merge_module_args(current_args, new_args):
+ '''
+ merges either a dictionary or string of k=v pairs with another string of k=v pairs,
+ and returns a new k=v string without duplicates.
+ '''
+ if not isinstance(current_args, basestring):
+ raise errors.AnsibleError("expected current_args to be a basestring")
+ # we use parse_kv to split up the current args into a dictionary
+ final_args = parse_kv(current_args)
+ if isinstance(new_args, dict):
+ final_args.update(new_args)
+ elif isinstance(new_args, basestring):
+ new_args_kv = parse_kv(new_args)
+ final_args.update(new_args_kv)
+ return serialize_args(final_args)
+
+def parse_yaml(data, path_hint=None):
+ ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
+
+ stripped_data = data.lstrip()
+ loaded = None
+ if stripped_data.startswith("{") or stripped_data.startswith("["):
+ # since the line starts with { or [ we can infer this is a JSON document.
+ try:
+ loaded = json.loads(data)
+ except ValueError, ve:
+ if path_hint:
+ raise errors.AnsibleError(path_hint + ": " + str(ve))
+ else:
+ raise errors.AnsibleError(str(ve))
+ else:
+ # else this is pretty sure to be a YAML document
+ loaded = yaml.load(data, Loader=Loader)
+
+ return loaded
+
+def process_common_errors(msg, probline, column):
+ replaced = probline.replace(" ","")
+
+ if ":{{" in replaced and "}}" in replaced:
+ msg = msg + """
+This one looks easy to fix. YAML thought it was looking for the start of a
+hash/dictionary and was confused to see a second "{". Most likely this was
+meant to be an ansible template evaluation instead, so we have to give the
+parser a small hint that we wanted a string instead. The solution here is to
+just quote the entire value.
+
+For instance, if the original line was:
+
+ app_path: {{ base_path }}/foo
+
+It should be written as:
+
+ app_path: "{{ base_path }}/foo"
+"""
+ return msg
+
+ elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
+ msg = msg + """
+This one looks easy to fix. There seems to be an extra unquoted colon in the line
+and this is confusing the parser. It was only expecting to find one free
+colon. The solution is just add some quotes around the colon, or quote the
+entire line after the first colon.
+
+For instance, if the original line was:
+
+ copy: src=file.txt dest=/path/filename:with_colon.txt
+
+It can be written as:
+
+ copy: src=file.txt dest='/path/filename:with_colon.txt'
+
+Or:
+
+ copy: 'src=file.txt dest=/path/filename:with_colon.txt'
+
+
+"""
+ return msg
+ else:
+ parts = probline.split(":")
+ if len(parts) > 1:
+ middle = parts[1].strip()
+ match = False
+ unbalanced = False
+ if middle.startswith("'") and not middle.endswith("'"):
+ match = True
+ elif middle.startswith('"') and not middle.endswith('"'):
+ match = True
+ if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
+ unbalanced = True
+ if match:
+ msg = msg + """
+This one looks easy to fix. It seems that there is a value started
+with a quote, and the YAML parser is expecting to see the line ended
+with the same kind of quote. For instance:
+
+ when: "ok" in result.stdout
+
+Could be written as:
+
+ when: '"ok" in result.stdout'
+
+or equivalently:
+
+ when: "'ok' in result.stdout"
+
+"""
+ return msg
+
+ if unbalanced:
+ msg = msg + """
+We could be wrong, but this one looks like it might be an issue with
+unbalanced quotes. If starting a value with a quote, make sure the
+line ends with the same set of quotes. For instance this arbitrary
+example:
+
+ foo: "bad" "wolf"
+
+Could be written as:
+
+ foo: '"bad" "wolf"'
+
+"""
+ return msg
+
+ return msg
+
+def process_yaml_error(exc, data, path=None, show_content=True):
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ if show_content:
+ if mark.line -1 >= 0:
+ before_probline = data.split("\n")[mark.line-1]
+ else:
+ before_probline = ''
+ probline = data.split("\n")[mark.line]
+ arrow = " " * mark.column + "^"
+ msg = """Syntax Error while loading YAML script, %s
+Note: The error may actually appear before this position: line %s, column %s
+
+%s
+%s
+%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
+
+ unquoted_var = None
+ if '{{' in probline and '}}' in probline:
+ if '"{{' not in probline or "'{{" not in probline:
+ unquoted_var = True
+
+ if not unquoted_var:
+ msg = process_common_errors(msg, probline, mark.column)
+ else:
+ msg = msg + """
+We could be wrong, but this one looks like it might be an issue with
+missing quotes. Always quote template expression brackets when they
+start a value. For instance:
+
+ with_items:
+ - {{ foo }}
+
+Should be written as:
+
+ with_items:
+ - "{{ foo }}"
+
+"""
+ else:
+ # most likely displaying a file with sensitive content,
+ # so don't show any of the actual lines of yaml just the
+ # line number itself
+ msg = """Syntax error while loading YAML script, %s
+The error appears to have been on line %s, column %s, but may actually
+be before there depending on the exact syntax problem.
+""" % (path, mark.line + 1, mark.column + 1)
+
+ else:
+ # No problem markers means we have to throw a generic
+ # "stuff messed up" type message. Sry bud.
+ if path:
+ msg = "Could not parse YAML. Check over %s again." % path
+ else:
+ msg = "Could not parse YAML."
+ raise errors.AnsibleYAMLValidationFailed(msg)
+
+
+def parse_yaml_from_file(path, vault_password=None):
+ ''' convert a yaml file to a data structure '''
+
+ data = None
+ show_content = True
+
+ try:
+ data = open(path).read()
+ except IOError:
+ raise errors.AnsibleError("file could not read: %s" % path)
+
+ vault = VaultLib(password=vault_password)
+ if vault.is_encrypted(data):
+ # if the file is encrypted and no password was specified,
+ # the decrypt call would throw an error, but we check first
+ # since the decrypt function doesn't know the file name
+ if vault_password is None:
+ raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
+ data = vault.decrypt(data)
+ show_content = False
+
+ try:
+ return parse_yaml(data, path_hint=path)
+ except yaml.YAMLError, exc:
+ process_yaml_error(exc, data, path, show_content)
+
+def parse_kv(args):
+ ''' convert a string of key/value items to a dict '''
+ options = {}
+ if args is not None:
+ try:
+ vargs = split_args(args)
+ except ValueError, ve:
+ if 'no closing quotation' in str(ve).lower():
+ raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
+ else:
+ raise
+ for x in vargs:
+ if "=" in x:
+ k, v = x.split("=",1)
+ options[k.strip()] = unquote(v.strip())
+ return options
+
+def _validate_both_dicts(a, b):
+
+ if not (isinstance(a, dict) and isinstance(b, dict)):
+ raise errors.AnsibleError(
+ "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
+ )
+
+def merge_hash(a, b):
+ ''' recursively merges hash b into a
+ keys from b take precedence over keys from a '''
+
+ result = {}
+
+ # we check here as well as in combine_vars() since this
+ # function can work recursively with nested dicts
+ _validate_both_dicts(a, b)
+
+ for dicts in a, b:
+ # next, iterate over b keys and values
+ for k, v in dicts.iteritems():
+ # if there's already such key in a
+ # and that key contains dict
+ if k in result and isinstance(result[k], dict):
+ # merge those dicts recursively
+ result[k] = merge_hash(a[k], v)
+ else:
+ # otherwise, just copy a value from b to a
+ result[k] = v
+
+ return result
+
+def default(value, function):
+ ''' syntactic sugar around lazy evaluation of defaults '''
+ if value is None:
+ return function()
+ return value
+
+
+def _git_repo_info(repo_path):
+ ''' returns a string containing git branch, commit id and commit date '''
+ result = None
+ if os.path.exists(repo_path):
+ # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
+ if os.path.isfile(repo_path):
+ try:
+ gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ repo_path = os.path.join(repo_path[:-4], gitdir)
+ except (IOError, AttributeError):
+ return ''
+ f = open(os.path.join(repo_path, "HEAD"))
+ branch = f.readline().split('/')[-1].rstrip("\n")
+ f.close()
+ branch_path = os.path.join(repo_path, "refs", "heads", branch)
+ if os.path.exists(branch_path):
+ f = open(branch_path)
+ commit = f.readline()[:10]
+ f.close()
+ else:
+ # detached HEAD
+ commit = branch[:10]
+ branch = 'detached HEAD'
+ branch_path = os.path.join(repo_path, "HEAD")
+
+ date = time.localtime(os.stat(branch_path).st_mtime)
+ if time.daylight == 0:
+ offset = time.timezone
+ else:
+ offset = time.altzone
+ result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
+ time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
+ else:
+ result = ''
+ return result
+
+
+def _gitinfo():
+ basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+ repo_path = os.path.join(basedir, '.git')
+ result = _git_repo_info(repo_path)
+ submodules = os.path.join(basedir, '.gitmodules')
+ if not os.path.exists(submodules):
+ return result
+ f = open(submodules)
+ for line in f:
+ tokens = line.strip().split(' ')
+ if tokens[0] == 'path':
+ submodule_path = tokens[2]
+ submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
+ if not submodule_info:
+ submodule_info = ' not found - use git submodule update --init ' + submodule_path
+ result += "\n {0}: {1}".format(submodule_path, submodule_info)
+ f.close()
+ return result
+
+
+def version(prog):
+ result = "{0} {1}".format(prog, __version__)
+ gitinfo = _gitinfo()
+ if gitinfo:
+ result = result + " {0}".format(gitinfo)
+ result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
+ return result
+
+def version_info(gitinfo=False):
+ if gitinfo:
+ # expensive call, user with care
+ ansible_version_string = version('')
+ else:
+ ansible_version_string = __version__
+ ansible_version = ansible_version_string.split()[0]
+ ansible_versions = ansible_version.split('.')
+ for counter in range(len(ansible_versions)):
+ if ansible_versions[counter] == "":
+ ansible_versions[counter] = 0
+ try:
+ ansible_versions[counter] = int(ansible_versions[counter])
+ except:
+ pass
+ if len(ansible_versions) < 3:
+ for counter in range(len(ansible_versions), 3):
+ ansible_versions.append(0)
+ return {'string': ansible_version_string.strip(),
+ 'full': ansible_version,
+ 'major': ansible_versions[0],
+ 'minor': ansible_versions[1],
+ 'revision': ansible_versions[2]}
+
+def getch():
+ ''' read in a single character '''
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+def sanitize_output(arg_string):
+ ''' strips private info out of a string '''
+
+ private_keys = ('password', 'login_password')
+
+ output = []
+ for part in arg_string.split():
+ try:
+ (k, v) = part.split('=', 1)
+ except ValueError:
+ v = heuristic_log_sanitize(part)
+ output.append(v)
+ continue
+
+ if k in private_keys:
+ v = 'VALUE_HIDDEN'
+ else:
+ v = heuristic_log_sanitize(v)
+ output.append('%s=%s' % (k, v))
+
+ output = ' '.join(output)
+ return output
+
+
+####################################################################
+# option handling code for /usr/bin/ansible and ansible-playbook
+# below this line
+
+class SortedOptParser(optparse.OptionParser):
+ '''Optparser which sorts the options by opt before outputting --help'''
+
+ def format_help(self, formatter=None):
+ self.option_list.sort(key=operator.methodcaller('get_opt_string'))
+ return optparse.OptionParser.format_help(self, formatter=None)
+
+def increment_debug(option, opt, value, parser):
+ global VERBOSITY
+ VERBOSITY += 1
+
+def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
+ async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
+ ''' create an options parser for any ansible script '''
+
+ parser = SortedOptParser(usage, version=version("%prog"))
+ parser.add_option('-v','--verbose', default=False, action="callback",
+ callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
+
+ parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
+ help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
+ parser.add_option('-i', '--inventory-file', dest='inventory',
+ help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
+ default=constants.DEFAULT_HOST_LIST)
+ parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
+ help="set additional variables as key=value or YAML/JSON", default=[])
+ parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
+ parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
+ help='ask for SSH password')
+ parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
+ help='use this file to authenticate the connection')
+ parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
+ help='ask for vault password')
+ parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
+ dest='vault_password_file', help="vault password file")
+ parser.add_option('--list-hosts', dest='listhosts', action='store_true',
+ help='outputs a list of matching hosts; does not execute anything else')
+ parser.add_option('-M', '--module-path', dest='module_path',
+ help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
+ default=None)
+
+ if subset_opts:
+ parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
+ help='further limit selected hosts to an additional pattern')
+
+ parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
+ dest='timeout',
+ help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
+
+ if output_opts:
+ parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
+ help='condense output')
+ parser.add_option('-t', '--tree', dest='tree', default=None,
+ help='log output to this directory')
+
+ if runas_opts:
+ # priv user defaults to root later on to enable detecting when this option was given here
+ parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password (deprecated, use become)')
+ parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
+ help='ask for su password (deprecated, use become)')
+ parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
+ help="run operations with sudo (nopasswd) (deprecated, use become)")
+ parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
+ help='desired sudo user (default=root) (deprecated, use become)')
+ parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
+ help='run operations with su (deprecated, use become)')
+ parser.add_option('-R', '--su-user', default=None,
+ help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
+
+ # consolidated privilege escalation (become)
+ parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
+ help="run operations with become (nopasswd implied)")
+ parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
+ help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
+ parser.add_option('--become-user', default=None, dest='become_user', type='string',
+ help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
+ parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
+ help='ask for privilege escalation password')
+
+
+ if connect_opts:
+ parser.add_option('-c', '--connection', dest='connection',
+ default=constants.DEFAULT_TRANSPORT,
+ help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
+
+ if async_opts:
+ parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
+ dest='poll_interval',
+ help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
+ parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
+ help='run asynchronously, failing after X seconds (default=N/A)')
+
+ if check_opts:
+ parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur"
+ )
+
+ if diff_opts:
+ parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those files; works great with --check"
+ )
+
+ return parser
+
+def parse_extra_vars(extra_vars_opts, vault_pass):
+ extra_vars = {}
+ for extra_vars_opt in extra_vars_opts:
+ extra_vars_opt = to_unicode(extra_vars_opt)
+ if extra_vars_opt.startswith(u"@"):
+ # Argument is a YAML file (JSON is a subset of YAML)
+ extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
+ elif extra_vars_opt and extra_vars_opt[0] in u'[{':
+ # Arguments as YAML
+ extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
+ else:
+ # Arguments as Key-value
+ extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
+ return extra_vars
+
+def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
+
+ vault_pass = None
+ new_vault_pass = None
+
+ if ask_vault_pass:
+ vault_pass = getpass.getpass(prompt="Vault password: ")
+
+ if ask_vault_pass and confirm_vault:
+ vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
+ if vault_pass != vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+
+ if ask_new_vault_pass:
+ new_vault_pass = getpass.getpass(prompt="New Vault password: ")
+
+ if ask_new_vault_pass and confirm_new:
+ new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
+ if new_vault_pass != new_vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+
+ # enforce no newline chars at the end of passwords
+ if vault_pass:
+ vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
+ if new_vault_pass:
+ new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
+
+ return vault_pass, new_vault_pass
+
+def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
+ sshpass = None
+ becomepass = None
+ vaultpass = None
+ become_prompt = ''
+
+ if ask_pass:
+ sshpass = getpass.getpass(prompt="SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
+ if sshpass:
+ sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
+ else:
+ become_prompt = "%s password: " % become_method.upper()
+
+ if become_ask_pass:
+ becomepass = getpass.getpass(prompt=become_prompt)
+ if ask_pass and becomepass == '':
+ becomepass = sshpass
+ if becomepass:
+ becomepass = to_bytes(becomepass)
+
+ if ask_vault_pass:
+ vaultpass = getpass.getpass(prompt="Vault password: ")
+ if vaultpass:
+ vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
+
+ return (sshpass, becomepass, vaultpass)
+
+
+def choose_pass_prompt(options):
+
+ if options.ask_su_pass:
+ return 'su'
+ elif options.ask_sudo_pass:
+ return 'sudo'
+
+ return options.become_method
+
+def normalize_become_options(options):
+
+ options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
+ options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
+
+ if options.become:
+ pass
+ elif options.sudo:
+ options.become = True
+ options.become_method = 'sudo'
+ elif options.su:
+ options.become = True
+ options.become_method = 'su'
+
+
+def do_encrypt(result, encrypt, salt_size=None, salt=None):
+ if PASSLIB_AVAILABLE:
+ try:
+ crypt = getattr(passlib.hash, encrypt)
+ except:
+ raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
+
+ if salt_size:
+ result = crypt.encrypt(result, salt_size=salt_size)
+ elif salt:
+ result = crypt.encrypt(result, salt=salt)
+ else:
+ result = crypt.encrypt(result)
+ else:
+ raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
+
+ return result
+
+def last_non_blank_line(buf):
+
+ all_lines = buf.splitlines()
+ all_lines.reverse()
+ for line in all_lines:
+ if (len(line) > 0):
+ return line
+ # shouldn't occur unless there's no output
+ return ""
+
+def filter_leading_non_json_lines(buf):
+ '''
+ used to avoid random output from SSH at the top of JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
+ filter only leading lines since multiline JSON is valid.
+ '''
+
+ filtered_lines = StringIO.StringIO()
+ stop_filtering = False
+ for line in buf.splitlines():
+ if stop_filtering or line.startswith('{') or line.startswith('['):
+ stop_filtering = True
+ filtered_lines.write(line + '\n')
+ return filtered_lines.getvalue()
+
+def boolean(value):
+ val = str(value)
+ if val.lower() in [ "true", "t", "y", "1", "yes" ]:
+ return True
+ else:
+ return False
+
+def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
+ """
+ helper function for connection plugins to create privilege escalation commands
+ """
+
+ randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
+ success_key = 'BECOME-SUCCESS-%s' % randbits
+ prompt = None
+ becomecmd = None
+
+ shell = shell or '$SHELL'
+
+ if method == 'sudo':
+ # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
+ # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
+ # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
+ # string to the user's shell. We loop reading output until we see the randomly-generated
+ # sudo prompt set with the -p option.
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ exe = exe or C.DEFAULT_SUDO_EXE
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
+
+ elif method == 'su':
+ exe = exe or C.DEFAULT_SU_EXE
+ flags = flags or C.DEFAULT_SU_FLAGS
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
+
+ elif method == 'pbrun':
+ prompt = 'assword:'
+ exe = exe or 'pbrun'
+ flags = flags or ''
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
+
+ elif method == 'pfexec':
+ exe = exe or 'pfexec'
+ flags = flags or ''
+ # No user as it uses it's own exec_attr to figure it out
+ becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
+
+ if becomecmd is None:
+ raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
+
+ return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
+
+
+def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
+ """
+ helper function for connection plugins to create sudo commands
+ """
+ return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
+
+
+def make_su_cmd(su_user, executable, cmd):
+ """
+ Helper function for connection plugins to create direct su commands
+ """
+ return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
+
+def get_diff(diff):
+ # called by --diff usage in playbook and runner via callbacks
+ # include names in diffs 'before' and 'after' and do diff -U 10
+
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ ret = []
+ if 'dst_binary' in diff:
+ ret.append("diff skipped: destination file appears to be binary\n")
+ if 'src_binary' in diff:
+ ret.append("diff skipped: source file appears to be binary\n")
+ if 'dst_larger' in diff:
+ ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
+ if 'src_larger' in diff:
+ ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
+ if 'before' in diff and 'after' in diff:
+ if 'before_header' in diff:
+ before_header = "before: %s" % diff['before_header']
+ else:
+ before_header = 'before'
+ if 'after_header' in diff:
+ after_header = "after: %s" % diff['after_header']
+ else:
+ after_header = 'after'
+ differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
+ for line in list(differ):
+ ret.append(line)
+ return u"".join(ret)
+ except UnicodeDecodeError:
+ return ">> the files are different, but the diff library cannot compare unicode strings"
+
+def is_list_of_strings(items):
+ for x in items:
+ if not isinstance(x, basestring):
+ return False
+ return True
+
+def list_union(a, b):
+ result = []
+ for x in a:
+ if x not in result:
+ result.append(x)
+ for x in b:
+ if x not in result:
+ result.append(x)
+ return result
+
+def list_intersection(a, b):
+ result = []
+ for x in a:
+ if x in b and x not in result:
+ result.append(x)
+ return result
+
+def list_difference(a, b):
+ result = []
+ for x in a:
+ if x not in b and x not in result:
+ result.append(x)
+ for x in b:
+ if x not in a and x not in result:
+ result.append(x)
+ return result
+
+def contains_vars(data):
+ '''
+ returns True if the data contains a variable pattern
+ '''
+ return "$" in data or "{{" in data
+
+def safe_eval(expr, locals={}, include_exceptions=False):
+ '''
+ This is intended for allowing things like:
+ with_items: a_list_variable
+
+ Where Jinja2 would return a string but we do not want to allow it to
+ call functions (outside of Jinja2, where the env is constrained). If
+ the input data to this function came from an untrusted (remote) source,
+ it should first be run through _clean_data_struct() to ensure the data
+ is further sanitized prior to evaluation.
+
+ Based on:
+ http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
+ '''
+
+ # this is the whitelist of AST nodes we are going to
+ # allow in the evaluation. Any node type other than
+ # those listed here will raise an exception in our custom
+ # visitor class defined below.
+ SAFE_NODES = set(
+ (
+ ast.Add,
+ ast.BinOp,
+ ast.Call,
+ ast.Compare,
+ ast.Dict,
+ ast.Div,
+ ast.Expression,
+ ast.List,
+ ast.Load,
+ ast.Mult,
+ ast.Num,
+ ast.Name,
+ ast.Str,
+ ast.Sub,
+ ast.Tuple,
+ ast.UnaryOp,
+ )
+ )
+
+ # AST node types were expanded after 2.6
+ if not sys.version.startswith('2.6'):
+ SAFE_NODES.union(
+ set(
+ (ast.Set,)
+ )
+ )
+
+ filter_list = []
+ for filter in filter_loader.all():
+ filter_list.extend(filter.filters().keys())
+
+ CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
+
+ class CleansingNodeVisitor(ast.NodeVisitor):
+ def generic_visit(self, node, inside_call=False):
+ if type(node) not in SAFE_NODES:
+ raise Exception("invalid expression (%s)" % expr)
+ elif isinstance(node, ast.Call):
+ inside_call = True
+ elif isinstance(node, ast.Name) and inside_call:
+ if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
+ raise Exception("invalid function: %s" % node.id)
+ # iterate over all child nodes
+ for child_node in ast.iter_child_nodes(node):
+ self.generic_visit(child_node, inside_call)
+
+ if not isinstance(expr, basestring):
+ # already templated to a datastructure, perhaps?
+ if include_exceptions:
+ return (expr, None)
+ return expr
+
+ cnv = CleansingNodeVisitor()
+ try:
+ parsed_tree = ast.parse(expr, mode='eval')
+ cnv.visit(parsed_tree)
+ compiled = compile(parsed_tree, expr, 'eval')
+ result = eval(compiled, {}, locals)
+
+ if include_exceptions:
+ return (result, None)
+ else:
+ return result
+ except SyntaxError, e:
+ # special handling for syntax errors, we just return
+ # the expression string back as-is
+ if include_exceptions:
+ return (expr, None)
+ return expr
+ except Exception, e:
+ if include_exceptions:
+ return (expr, e)
+ return expr
+
+
+def listify_lookup_plugin_terms(terms, basedir, inject):
+
+ from ansible.utils import template
+
+ if isinstance(terms, basestring):
+ # someone did:
+ # with_items: alist
+ # OR
+ # with_items: {{ alist }}
+
+ stripped = terms.strip()
+ if not (stripped.startswith('{') or stripped.startswith('[')) and \
+ not stripped.startswith("/") and \
+ not stripped.startswith('set([') and \
+ not LOOKUP_REGEX.search(terms):
+ # if not already a list, get ready to evaluate with Jinja2
+ # not sure why the "/" is in above code :)
+ try:
+ new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
+ if isinstance(new_terms, basestring) and "{{" in new_terms:
+ pass
+ else:
+ terms = new_terms
+ except:
+ pass
+
+ if '{' in terms or '[' in terms:
+ # Jinja2 already evaluated a variable to a list.
+ # Jinja2-ified list needs to be converted back to a real type
+ # TODO: something a bit less heavy than eval
+ return safe_eval(terms)
+
+ if isinstance(terms, basestring):
+ terms = [ terms ]
+
+ return terms
+
+def combine_vars(a, b):
+
+ _validate_both_dicts(a, b)
+
+ if C.DEFAULT_HASH_BEHAVIOUR == "merge":
+ return merge_hash(a, b)
+ else:
+ return dict(a.items() + b.items())
+
+def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
+ '''Return a random password string of length containing only chars.'''
+
+ password = []
+ while len(password) < length:
+ new_char = os.urandom(1)
+ if new_char in chars:
+ password.append(new_char)
+
+ return ''.join(password)
+
+def before_comment(msg):
+ ''' what's the part of a string before a comment? '''
+ msg = msg.replace("\#","**NOT_A_COMMENT**")
+ msg = msg.split("#")[0]
+ msg = msg.replace("**NOT_A_COMMENT**","#")
+ return msg
+
+def load_vars(basepath, results, vault_password=None):
+ """
+ Load variables from any potential yaml filename combinations of basepath,
+ returning result.
+ """
+
+ paths_to_check = [ "".join([basepath, ext])
+ for ext in C.YAML_FILENAME_EXTENSIONS ]
+
+ found_paths = []
+
+ for path in paths_to_check:
+ found, results = _load_vars_from_path(path, results, vault_password=vault_password)
+ if found:
+ found_paths.append(path)
+
+
+ # disallow the potentially confusing situation that there are multiple
+ # variable files for the same name. For example if both group_vars/all.yml
+ # and group_vars/all.yaml
+ if len(found_paths) > 1:
+ raise errors.AnsibleError("Multiple variable files found. "
+ "There should only be one. %s" % ( found_paths, ))
+
+ return results
+
+## load variables from yaml files/dirs
+# e.g. host/group_vars
+#
+def _load_vars_from_path(path, results, vault_password=None):
+ """
+ Robustly access the file at path and load variables, carefully reporting
+ errors in a friendly/informative way.
+
+ Return the tuple (found, new_results, )
+ """
+
+ try:
+ # in the case of a symbolic link, we want the stat of the link itself,
+ # not its target
+ pathstat = os.lstat(path)
+ except os.error, err:
+ # most common case is that nothing exists at that path.
+ if err.errno == errno.ENOENT:
+ return False, results
+ # otherwise this is a condition we should report to the user
+ raise errors.AnsibleError(
+ "%s is not accessible: %s."
+ " Please check its permissions." % ( path, err.strerror))
+
+ # symbolic link
+ if stat.S_ISLNK(pathstat.st_mode):
+ try:
+ target = os.path.realpath(path)
+ except os.error, err2:
+ raise errors.AnsibleError("The symbolic link at %s "
+ "is not readable: %s. Please check its permissions."
+ % (path, err2.strerror, ))
+ # follow symbolic link chains by recursing, so we repeat the same
+ # permissions checks above and provide useful errors.
+ return _load_vars_from_path(target, results, vault_password)
+
+ # directory
+ if stat.S_ISDIR(pathstat.st_mode):
+
+ # support organizing variables across multiple files in a directory
+ return True, _load_vars_from_folder(path, results, vault_password=vault_password)
+
+ # regular file
+ elif stat.S_ISREG(pathstat.st_mode):
+ data = parse_yaml_from_file(path, vault_password=vault_password)
+ if data and type(data) != dict:
+ raise errors.AnsibleError(
+ "%s must be stored as a dictionary/hash" % path)
+ elif data is None:
+ data = {}
+
+ # combine vars overrides by default but can be configured to do a
+ # hash merge in settings
+ results = combine_vars(results, data)
+ return True, results
+
+ # something else? could be a fifo, socket, device, etc.
+ else:
+ raise errors.AnsibleError("Expected a variable file or directory "
+ "but found a non-file object at path %s" % (path, ))
+
+def _load_vars_from_folder(folder_path, results, vault_password=None):
+ """
+ Load all variables within a folder recursively.
+ """
+
+ # this function and _load_vars_from_path are mutually recursive
+
+ try:
+ names = os.listdir(folder_path)
+ except os.error, err:
+ raise errors.AnsibleError(
+ "This folder cannot be listed: %s: %s."
+ % ( folder_path, err.strerror))
+
+ # evaluate files in a stable order rather than whatever order the
+ # filesystem lists them.
+ names.sort()
+
+ # do not parse hidden files or dirs, e.g. .svn/
+ paths = [os.path.join(folder_path, name) for name in names
+ if not name.startswith('.')
+ and os.path.splitext(name)[1] in C.YAML_FILENAME_EXTENSIONS]
+ for path in paths:
+ _found, results = _load_vars_from_path(path, results, vault_password=vault_password)
+ return results
+
+def update_hash(hash, key, new_value):
+ ''' used to avoid nested .update calls on the parent '''
+
+ value = hash.get(key, {})
+ value.update(new_value)
+ hash[key] = value
+
+def censor_unlogged_data(data):
+ '''
+ used when the no_log: True attribute is passed to a task to keep data from a callback.
+ NOT intended to prevent variable registration, but only things from showing up on
+ screen
+ '''
+ new_data = {}
+ for (x,y) in data.iteritems():
+ if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
+ new_data[x] = y
+ new_data['censored'] = 'results hidden due to no_log parameter'
+ return new_data
+
+def check_mutually_exclusive_privilege(options, parser):
+
+ # privilege escalation command line arguments need to be mutually exclusive
+ if (options.su or options.su_user or options.ask_su_pass) and \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
+ (options.su or options.su_user or options.ask_su_pass) and \
+ (options.become or options.become_user or options.become_ask_pass) or \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
+ (options.become or options.become_user or options.become_ask_pass):
+
+ parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
+ "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
+ "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
+ " are exclusive of each other")
+
+
diff --git a/v1/ansible/utils/cmd_functions.py b/v1/ansible/utils/cmd_functions.py
new file mode 100644
index 00000000000..6525260f107
--- /dev/null
+++ b/v1/ansible/utils/cmd_functions.py
@@ -0,0 +1,59 @@
+# (c) 2012, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import os
+import sys
+import shlex
+import subprocess
+import select
+
+def run_cmd(cmd, live=False, readsize=10):
+
+ #readsize = 10
+
+ cmdargs = shlex.split(cmd)
+ p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout = ''
+ stderr = ''
+ rpipes = [p.stdout, p.stderr]
+ while True:
+ rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
+
+ if p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), readsize)
+ if live:
+ sys.stdout.write(dat)
+ stdout += dat
+ if dat == '':
+ rpipes.remove(p.stdout)
+ if p.stderr in rfd:
+ dat = os.read(p.stderr.fileno(), readsize)
+ stderr += dat
+ if live:
+ sys.stdout.write(dat)
+ if dat == '':
+ rpipes.remove(p.stderr)
+ # only break out if we've emptied the pipes, or there is nothing to
+ # read from and the process has finished.
+ if (not rpipes or not rfd) and p.poll() is not None:
+ break
+ # Calling wait while there are still pipes to read can cause a lock
+ elif not rpipes and p.poll() == None:
+ p.wait()
+
+ return p.returncode, stdout, stderr
diff --git a/lib/ansible/utils/display_functions.py b/v1/ansible/utils/display_functions.py
similarity index 100%
rename from lib/ansible/utils/display_functions.py
rename to v1/ansible/utils/display_functions.py
diff --git a/v2/ansible/utils/hashing.py b/v1/ansible/utils/hashing.py
similarity index 92%
rename from v2/ansible/utils/hashing.py
rename to v1/ansible/utils/hashing.py
index 5e378db79f4..a7d142e5bd4 100644
--- a/v2/ansible/utils/hashing.py
+++ b/v1/ansible/utils/hashing.py
@@ -20,7 +20,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-from ansible.errors import AnsibleError
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
@@ -44,8 +43,6 @@ def secure_hash_s(data, hash_func=sha1):
digest = hash_func()
try:
- if not isinstance(data, basestring):
- data = "%s" % data
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
@@ -65,8 +62,8 @@ def secure_hash(filename, hash_func=sha1):
digest.update(block)
block = infile.read(blocksize)
infile.close()
- except IOError as e:
- raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
+ except IOError, e:
+ raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
diff --git a/v2/ansible/utils/module_docs.py b/v1/ansible/utils/module_docs.py
similarity index 83%
rename from v2/ansible/utils/module_docs.py
rename to v1/ansible/utils/module_docs.py
index 632b4a00c2a..c6920571726 100644
--- a/v2/ansible/utils/module_docs.py
+++ b/v1/ansible/utils/module_docs.py
@@ -23,7 +23,9 @@ import ast
import yaml
import traceback
-from ansible.plugins import fragment_loader
+from collections import MutableMapping, MutableSet, MutableSequence
+
+from ansible import utils
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
@@ -66,7 +68,7 @@ def get_docstring(filename, verbose=False):
if fragment_slug != 'doesnotexist':
- fragment_class = fragment_loader.get(fragment_name)
+ fragment_class = utils.plugins.fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
@@ -86,7 +88,14 @@ def get_docstring(filename, verbose=False):
if not doc.has_key(key):
doc[key] = value
else:
- doc[key].update(value)
+ if isinstance(doc[key], MutableMapping):
+ doc[key].update(value)
+ elif isinstance(doc[key], MutableSet):
+ doc[key].add(value)
+ elif isinstance(doc[key], MutableSequence):
+ doc[key] = sorted(frozenset(doc[key] + value))
+ else:
+ raise Exception("Attempt to extend a documentation fragement of unknown type")
if 'EXAMPLES' in (t.id for t in child.targets):
plainexamples = child.value.s[1:] # Skip first empty line
diff --git a/v2/ansible/utils/module_docs_fragments b/v1/ansible/utils/module_docs_fragments
similarity index 100%
rename from v2/ansible/utils/module_docs_fragments
rename to v1/ansible/utils/module_docs_fragments
diff --git a/lib/ansible/utils/plugins.py b/v1/ansible/utils/plugins.py
similarity index 100%
rename from lib/ansible/utils/plugins.py
rename to v1/ansible/utils/plugins.py
diff --git a/lib/ansible/utils/string_functions.py b/v1/ansible/utils/string_functions.py
similarity index 100%
rename from lib/ansible/utils/string_functions.py
rename to v1/ansible/utils/string_functions.py
diff --git a/lib/ansible/utils/su_prompts.py b/v1/ansible/utils/su_prompts.py
similarity index 100%
rename from lib/ansible/utils/su_prompts.py
rename to v1/ansible/utils/su_prompts.py
diff --git a/lib/ansible/utils/template.py b/v1/ansible/utils/template.py
similarity index 100%
rename from lib/ansible/utils/template.py
rename to v1/ansible/utils/template.py
diff --git a/v2/ansible/utils/unicode.py b/v1/ansible/utils/unicode.py
similarity index 93%
rename from v2/ansible/utils/unicode.py
rename to v1/ansible/utils/unicode.py
index 2cff2e5e45c..7bd035c0075 100644
--- a/v2/ansible/utils/unicode.py
+++ b/v1/ansible/utils/unicode.py
@@ -19,8 +19,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from six import string_types, text_type, binary_type, PY3
-
# to_bytes and to_unicode were written by Toshio Kuratomi for the
# python-kitchen library https://pypi.python.org/pypi/kitchen
# They are licensed in kitchen under the terms of the GPLv2+
@@ -37,9 +35,6 @@ _LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
# EXCEPTION_CONVERTERS is defined below due to using to_unicode
-if PY3:
- basestring = (str, bytes)
-
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
'''Convert an object into a :class:`unicode` string
@@ -94,12 +89,12 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring/isunicode here but we want this code to be as
# fast as possible
if isinstance(obj, basestring):
- if isinstance(obj, text_type):
+ if isinstance(obj, unicode):
return obj
if encoding in _UTF8_ALIASES:
- return text_type(obj, 'utf-8', errors)
+ return unicode(obj, 'utf-8', errors)
if encoding in _LATIN1_ALIASES:
- return text_type(obj, 'latin-1', errors)
+ return unicode(obj, 'latin-1', errors)
return obj.decode(encoding, errors)
if not nonstring:
@@ -115,19 +110,19 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
simple = None
if not simple:
try:
- simple = text_type(obj)
+ simple = str(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (UnicodeError, AttributeError):
simple = u''
- if isinstance(simple, binary_type):
- return text_type(simple, encoding, errors)
+ if isinstance(simple, str):
+ return unicode(simple, encoding, errors)
return simple
elif nonstring in ('repr', 'strict'):
obj_repr = repr(obj)
- if isinstance(obj_repr, binary_type):
- obj_repr = text_type(obj_repr, encoding, errors)
+ if isinstance(obj_repr, str):
+ obj_repr = unicode(obj_repr, encoding, errors)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_unicode was given "%(obj)s" which is neither'
@@ -203,19 +198,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring, isbytestring here but we want this to be as fast
# as possible
if isinstance(obj, basestring):
- if isinstance(obj, binary_type):
+ if isinstance(obj, str):
return obj
return obj.encode(encoding, errors)
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
- return b''
+ return ''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
- simple = binary_type(obj)
+ simple = str(obj)
except UnicodeError:
try:
simple = obj.__str__()
@@ -225,19 +220,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
try:
simple = obj.__unicode__()
except (AttributeError, UnicodeError):
- simple = b''
- if isinstance(simple, text_type):
+ simple = ''
+ if isinstance(simple, unicode):
simple = simple.encode(encoding, 'replace')
return simple
elif nonstring in ('repr', 'strict'):
try:
obj_repr = obj.__repr__()
except (AttributeError, UnicodeError):
- obj_repr = b''
- if isinstance(obj_repr, text_type):
+ obj_repr = ''
+ if isinstance(obj_repr, unicode):
obj_repr = obj_repr.encode(encoding, errors)
else:
- obj_repr = binary_type(obj_repr)
+ obj_repr = str(obj_repr)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_bytes was given "%(obj)s" which is neither'
diff --git a/lib/ansible/utils/vault.py b/v1/ansible/utils/vault.py
similarity index 100%
rename from lib/ansible/utils/vault.py
rename to v1/ansible/utils/vault.py
diff --git a/v1/bin/ansible b/v1/bin/ansible
new file mode 100755
index 00000000000..7fec34ec81e
--- /dev/null
+++ b/v1/bin/ansible
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+
+# (c) 2012, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+########################################################
+
+__requires__ = ['ansible']
+try:
+ import pkg_resources
+except Exception:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. But we
+ # have code that better expresses the errors in the places where the code
+ # is actually used (the deps are optional for many code paths) so we don't
+ # want to fail here.
+ pass
+
+import os
+import sys
+
+from ansible.runner import Runner
+import ansible.constants as C
+from ansible import utils
+from ansible import errors
+from ansible import callbacks
+from ansible import inventory
+########################################################
+
+class Cli(object):
+ ''' code behind bin/ansible '''
+
+ # ----------------------------------------------
+
+ def __init__(self):
+ self.stats = callbacks.AggregateStats()
+ self.callbacks = callbacks.CliRunnerCallbacks()
+ if C.DEFAULT_LOAD_CALLBACK_PLUGINS:
+ callbacks.load_callback_plugins()
+
+ # ----------------------------------------------
+
+ def parse(self):
+ ''' create an options parser for bin/ansible '''
+
+ parser = utils.base_parser(
+ constants=C,
+ runas_opts=True,
+ subset_opts=True,
+ async_opts=True,
+ output_opts=True,
+ connect_opts=True,
+ check_opts=True,
+ diff_opts=False,
+ usage='%prog [options]'
+ )
+
+ parser.add_option('-a', '--args', dest='module_args',
+ help="module arguments", default=C.DEFAULT_MODULE_ARGS)
+ parser.add_option('-m', '--module-name', dest='module_name',
+ help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
+ default=C.DEFAULT_MODULE_NAME)
+
+ options, args = parser.parse_args()
+ self.callbacks.options = options
+
+ if len(args) == 0 or len(args) > 1:
+ parser.print_help()
+ sys.exit(1)
+
+ # privlege escalation command line arguments need to be mutually exclusive
+ utils.check_mutually_exclusive_privilege(options, parser)
+
+ if (options.ask_vault_pass and options.vault_password_file):
+ parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+
+ return (options, args)
+
+ # ----------------------------------------------
+
+ def run(self, options, args):
+ ''' use Runner lib to do SSH things '''
+
+ pattern = args[0]
+
+ sshpass = becomepass = vault_pass = become_method = None
+
+ # Never ask for an SSH password when we run with local connection
+ if options.connection == "local":
+ options.ask_pass = False
+ else:
+ options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
+
+ options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
+
+ # become
+ utils.normalize_become_options(options)
+ prompt_method = utils.choose_pass_prompt(options)
+ (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
+
+ # read vault_pass from a file
+ if not options.ask_vault_pass and options.vault_password_file:
+ vault_pass = utils.read_vault_file(options.vault_password_file)
+
+ extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
+
+ inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass)
+ if options.subset:
+ inventory_manager.subset(options.subset)
+ hosts = inventory_manager.list_hosts(pattern)
+
+ if len(hosts) == 0:
+ callbacks.display("No hosts matched", stderr=True)
+ sys.exit(0)
+
+ if options.listhosts:
+ for host in hosts:
+ callbacks.display(' %s' % host)
+ sys.exit(0)
+
+ if options.module_name in ['command','shell'] and not options.module_args:
+ callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
+ sys.exit(1)
+
+ if options.tree:
+ utils.prepare_writeable_dir(options.tree)
+
+ runner = Runner(
+ module_name=options.module_name,
+ module_path=options.module_path,
+ module_args=options.module_args,
+ remote_user=options.remote_user,
+ remote_pass=sshpass,
+ inventory=inventory_manager,
+ timeout=options.timeout,
+ private_key_file=options.private_key_file,
+ forks=options.forks,
+ pattern=pattern,
+ callbacks=self.callbacks,
+ transport=options.connection,
+ subset=options.subset,
+ check=options.check,
+ diff=options.check,
+ vault_pass=vault_pass,
+ become=options.become,
+ become_method=options.become_method,
+ become_pass=becomepass,
+ become_user=options.become_user,
+ extra_vars=extra_vars,
+ )
+
+ if options.seconds:
+ callbacks.display("background launch...\n\n", color='cyan')
+ results, poller = runner.run_async(options.seconds)
+ results = self.poll_while_needed(poller, options)
+ else:
+ results = runner.run()
+
+ return (runner, results)
+
+ # ----------------------------------------------
+
+ def poll_while_needed(self, poller, options):
+ ''' summarize results from Runner '''
+
+ # BACKGROUND POLL LOGIC when -B and -P are specified
+ if options.seconds and options.poll_interval > 0:
+ poller.wait(options.seconds, options.poll_interval)
+
+ return poller.results
+
+
+########################################################
+
+if __name__ == '__main__':
+ callbacks.display("", log_only=True)
+ callbacks.display(" ".join(sys.argv), log_only=True)
+ callbacks.display("", log_only=True)
+
+ cli = Cli()
+ (options, args) = cli.parse()
+ try:
+ (runner, results) = cli.run(options, args)
+ for result in results['contacted'].values():
+ if 'failed' in result or result.get('rc', 0) != 0:
+ sys.exit(2)
+ if results['dark']:
+ sys.exit(3)
+ except errors.AnsibleError, e:
+ # Generic handler for ansible specific errors
+ callbacks.display("ERROR: %s" % str(e), stderr=True, color='red')
+ sys.exit(1)
+
diff --git a/v1/bin/ansible-doc b/v1/bin/ansible-doc
new file mode 100755
index 00000000000..dff7cecce79
--- /dev/null
+++ b/v1/bin/ansible-doc
@@ -0,0 +1,337 @@
+#!/usr/bin/env python
+
+# (c) 2012, Jan-Piet Mens
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+import os
+import sys
+import textwrap
+import re
+import optparse
+import datetime
+import subprocess
+import fcntl
+import termios
+import struct
+
+from ansible import utils
+from ansible.utils import module_docs
+import ansible.constants as C
+from ansible.utils import version
+import traceback
+
+MODULEDIR = C.DEFAULT_MODULE_PATH
+
+BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
+IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
+
+_ITALIC = re.compile(r"I\(([^)]+)\)")
+_BOLD = re.compile(r"B\(([^)]+)\)")
+_MODULE = re.compile(r"M\(([^)]+)\)")
+_URL = re.compile(r"U\(([^)]+)\)")
+_CONST = re.compile(r"C\(([^)]+)\)")
+PAGER = 'less'
+LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
+ # -S (chop long lines) -X (disable termcap init and de-init)
+
+def pager_print(text):
+ ''' just print text '''
+ print text
+
+def pager_pipe(text, cmd):
+ ''' pipe text through a pager '''
+ if 'LESS' not in os.environ:
+ os.environ['LESS'] = LESS_OPTS
+ try:
+ cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
+ cmd.communicate(input=text)
+ except IOError:
+ pass
+ except KeyboardInterrupt:
+ pass
+
+def pager(text):
+ ''' find reasonable way to display text '''
+ # this is a much simpler form of what is in pydoc.py
+ if not sys.stdout.isatty():
+ pager_print(text)
+ elif 'PAGER' in os.environ:
+ if sys.platform == 'win32':
+ pager_print(text)
+ else:
+ pager_pipe(text, os.environ['PAGER'])
+ elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
+ pager_pipe(text, 'less')
+ else:
+ pager_print(text)
+
+def tty_ify(text):
+
+ t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
+ t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
+ t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
+ t = _URL.sub(r"\1", t) # U(word) => word
+ t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
+
+ return t
+
+def get_man_text(doc):
+
+ opt_indent=" "
+ text = []
+ text.append("> %s\n" % doc['module'].upper())
+
+ desc = " ".join(doc['description'])
+
+ text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
+
+ if 'option_keys' in doc and len(doc['option_keys']) > 0:
+ text.append("Options (= is mandatory):\n")
+
+ for o in sorted(doc['option_keys']):
+ opt = doc['options'][o]
+
+ if opt.get('required', False):
+ opt_leadin = "="
+ else:
+ opt_leadin = "-"
+
+ text.append("%s %s" % (opt_leadin, o))
+
+ desc = " ".join(opt['description'])
+
+ if 'choices' in opt:
+ choices = ", ".join(str(i) for i in opt['choices'])
+ desc = desc + " (Choices: " + choices + ")"
+ if 'default' in opt:
+ default = str(opt['default'])
+ desc = desc + " [Default: " + default + "]"
+ text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+
+ if 'notes' in doc and len(doc['notes']) > 0:
+ notes = " ".join(doc['notes'])
+ text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ",
+ subsequent_indent=opt_indent))
+
+
+ if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
+ req = ", ".join(doc['requirements'])
+ text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ",
+ subsequent_indent=opt_indent))
+
+ if 'examples' in doc and len(doc['examples']) > 0:
+ text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
+ for ex in doc['examples']:
+ text.append("%s\n" % (ex['code']))
+
+ if 'plainexamples' in doc and doc['plainexamples'] is not None:
+ text.append("EXAMPLES:")
+ text.append(doc['plainexamples'])
+ if 'returndocs' in doc and doc['returndocs'] is not None:
+ text.append("RETURN VALUES:")
+ text.append(doc['returndocs'])
+ text.append('')
+
+ return "\n".join(text)
+
+
+def get_snippet_text(doc):
+
+ text = []
+ desc = tty_ify(" ".join(doc['short_description']))
+ text.append("- name: %s" % (desc))
+ text.append(" action: %s" % (doc['module']))
+
+ for o in sorted(doc['options'].keys()):
+ opt = doc['options'][o]
+ desc = tty_ify(" ".join(opt['description']))
+
+ if opt.get('required', False):
+ s = o + "="
+ else:
+ s = o
+
+ text.append(" %-20s # %s" % (s, desc))
+ text.append('')
+
+ return "\n".join(text)
+
+def get_module_list_text(module_list):
+ tty_size = 0
+ if os.isatty(0):
+ tty_size = struct.unpack('HHHH',
+ fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
+ columns = max(60, tty_size)
+ displace = max(len(x) for x in module_list)
+ linelimit = columns - displace - 5
+ text = []
+ deprecated = []
+ for module in sorted(set(module_list)):
+
+ if module in module_docs.BLACKLIST_MODULES:
+ continue
+
+ filename = utils.plugins.module_finder.find_plugin(module)
+
+ if filename is None:
+ continue
+ if filename.endswith(".ps1"):
+ continue
+ if os.path.isdir(filename):
+ continue
+
+ try:
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ desc = tty_ify(doc.get('short_description', '?')).strip()
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+
+ if module.startswith('_'): # Handle deprecated
+ deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
+ else:
+ text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
+ except:
+ traceback.print_exc()
+ sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
+
+ if len(deprecated) > 0:
+ text.append("\nDEPRECATED:")
+ text.extend(deprecated)
+ return "\n".join(text)
+
+def find_modules(path, module_list):
+
+ if os.path.isdir(path):
+ for module in os.listdir(path):
+ if module.startswith('.'):
+ continue
+ elif os.path.isdir(module):
+ find_modules(module, module_list)
+ elif any(module.endswith(x) for x in BLACKLIST_EXTS):
+ continue
+ elif module.startswith('__'):
+ continue
+ elif module in IGNORE_FILES:
+ continue
+ elif module.startswith('_'):
+ fullpath = '/'.join([path,module])
+ if os.path.islink(fullpath): # avoids aliases
+ continue
+
+ module = os.path.splitext(module)[0] # removes the extension
+ module_list.append(module)
+
+def main():
+
+ p = optparse.OptionParser(
+ version=version("%prog"),
+ usage='usage: %prog [options] [module...]',
+ description='Show Ansible module documentation',
+ )
+
+ p.add_option("-M", "--module-path",
+ action="store",
+ dest="module_path",
+ default=MODULEDIR,
+ help="Ansible modules/ directory")
+ p.add_option("-l", "--list",
+ action="store_true",
+ default=False,
+ dest='list_dir',
+ help='List available modules')
+ p.add_option("-s", "--snippet",
+ action="store_true",
+ default=False,
+ dest='show_snippet',
+ help='Show playbook snippet for specified module(s)')
+ p.add_option('-v', action='version', help='Show version number and exit')
+
+ (options, args) = p.parse_args()
+
+ if options.module_path is not None:
+ for i in options.module_path.split(os.pathsep):
+ utils.plugins.module_finder.add_directory(i)
+
+ if options.list_dir:
+ # list modules
+ paths = utils.plugins.module_finder._get_paths()
+ module_list = []
+ for path in paths:
+ find_modules(path, module_list)
+
+ pager(get_module_list_text(module_list))
+ sys.exit()
+
+ if len(args) == 0:
+ p.print_help()
+
+ def print_paths(finder):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in finder._get_paths():
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ text = ''
+ for module in args:
+
+ filename = utils.plugins.module_finder.find_plugin(module)
+ if filename is None:
+ sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder)))
+ continue
+
+ if any(filename.endswith(x) for x in BLACKLIST_EXTS):
+ continue
+
+ try:
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ except:
+ traceback.print_exc()
+ sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
+ continue
+
+ if doc is not None:
+
+ all_keys = []
+ for (k,v) in doc['options'].iteritems():
+ all_keys.append(k)
+ all_keys = sorted(all_keys)
+ doc['option_keys'] = all_keys
+
+ doc['filename'] = filename
+ doc['docuri'] = doc['module'].replace('_', '-')
+ doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
+ doc['plainexamples'] = plainexamples
+ doc['returndocs'] = returndocs
+
+ if options.show_snippet:
+ text += get_snippet_text(doc)
+ else:
+ text += get_man_text(doc)
+ else:
+ # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
+ # probably a quoting issue.
+ sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module)
+ pager(text)
+
+if __name__ == '__main__':
+ main()
diff --git a/v1/bin/ansible-galaxy b/v1/bin/ansible-galaxy
new file mode 100755
index 00000000000..a6d625671ec
--- /dev/null
+++ b/v1/bin/ansible-galaxy
@@ -0,0 +1,957 @@
+#!/usr/bin/env python
+
+########################################################################
+#
+# (C) 2013, James Cammarata
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+########################################################################
+
+import datetime
+import json
+import os
+import os.path
+import shutil
+import subprocess
+import sys
+import tarfile
+import tempfile
+import urllib
+import urllib2
+import yaml
+
+from collections import defaultdict
+from distutils.version import LooseVersion
+from jinja2 import Environment
+from optparse import OptionParser
+
+import ansible.constants as C
+import ansible.utils
+from ansible.errors import AnsibleError
+
+default_meta_template = """---
+galaxy_info:
+ author: {{ author }}
+ description: {{description}}
+ company: {{ company }}
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+ # Some suggested licenses:
+ # - BSD (default)
+ # - MIT
+ # - GPLv2
+ # - GPLv3
+ # - Apache
+ # - CC-BY
+ license: {{ license }}
+ min_ansible_version: {{ min_ansible_version }}
+ #
+ # Below are all platforms currently available. Just uncomment
+ # the ones that apply to your role. If you don't see your
+ # platform on this list, let us know and we'll get it added!
+ #
+ #platforms:
+ {%- for platform,versions in platforms.iteritems() %}
+ #- name: {{ platform }}
+ # versions:
+ # - all
+ {%- for version in versions %}
+ # - {{ version }}
+ {%- endfor %}
+ {%- endfor %}
+ #
+ # Below are all categories currently available. Just as with
+ # the platforms above, uncomment those that apply to your role.
+ #
+ #categories:
+ {%- for category in categories %}
+ #- {{ category.name }}
+ {%- endfor %}
+dependencies: []
+ # List your role dependencies here, one per line.
+ # Be sure to remove the '[]' above if you add dependencies
+ # to this list.
+ {% for dependency in dependencies %}
+ #- {{ dependency }}
+ {% endfor %}
+
+"""
+
+default_readme_template = """Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+"""
+
+#-------------------------------------------------------------------------------------
+# Utility functions for parsing actions/options
+#-------------------------------------------------------------------------------------
+
+VALID_ACTIONS = ("init", "info", "install", "list", "remove")
+SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
+
+def get_action(args):
+ """
+ Get the action the user wants to execute from the
+ sys argv list.
+ """
+ for i in range(0,len(args)):
+ arg = args[i]
+ if arg in VALID_ACTIONS:
+ del args[i]
+ return arg
+ return None
+
+def build_option_parser(action):
+ """
+ Builds an option parser object based on the action
+ the user wants to execute.
+ """
+
+ usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS)
+ epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ OptionParser.format_epilog = lambda self, formatter: self.epilog
+ parser = OptionParser(usage=usage, epilog=epilog)
+
+ if not action:
+ parser.print_help()
+ sys.exit()
+
+ # options for all actions
+ # - none yet
+
+ # options specific to actions
+ if action == "info":
+ parser.set_usage("usage: %prog info [options] role_name[,version]")
+ elif action == "init":
+ parser.set_usage("usage: %prog init [options] role_name")
+ parser.add_option(
+ '-p', '--init-path', dest='init_path', default="./",
+ help='The path in which the skeleton role will be created. '
+ 'The default is the current working directory.')
+ parser.add_option(
+ '--offline', dest='offline', default=False, action='store_true',
+ help="Don't query the galaxy API when creating roles")
+ elif action == "install":
+ parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
+ parser.add_option(
+ '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help='Ignore errors and continue with the next specified role.')
+ parser.add_option(
+ '-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help='Don\'t download roles listed as dependencies')
+ parser.add_option(
+ '-r', '--role-file', dest='role_file',
+ help='A file containing a list of roles to be imported')
+ elif action == "remove":
+ parser.set_usage("usage: %prog remove role1 role2 ...")
+ elif action == "list":
+ parser.set_usage("usage: %prog list [role_name]")
+
+ # options that apply to more than one action
+ if action != "init":
+ parser.add_option(
+ '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
+ help='The path to the directory containing your roles. '
+ 'The default is the roles_path configured in your '
+ 'ansible.cfg file (/etc/ansible/roles if not configured)')
+
+ if action in ("info","init","install"):
+ parser.add_option(
+ '-s', '--server', dest='api_server', default="galaxy.ansible.com",
+ help='The API server destination')
+
+ if action in ("init","install"):
+ parser.add_option(
+ '-f', '--force', dest='force', action='store_true', default=False,
+ help='Force overwriting an existing role')
+ # done, return the parser
+ return parser
+
+def get_opt(options, k, defval=""):
+ """
+ Returns an option from an Optparse values instance.
+ """
+ try:
+ data = getattr(options, k)
+ except:
+ return defval
+ if k == "roles_path":
+ if os.pathsep in data:
+ data = data.split(os.pathsep)[0]
+ return data
+
+def exit_without_ignore(options, rc=1):
+ """
+ Exits with the specified return code unless the
+ option --ignore-errors was specified
+ """
+
+ if not get_opt(options, "ignore_errors", False):
+ print '- you can use --ignore-errors to skip failed roles.'
+ sys.exit(rc)
+
+
+#-------------------------------------------------------------------------------------
+# Galaxy API functions
+#-------------------------------------------------------------------------------------
+
+def api_get_config(api_server):
+ """
+ Fetches the Galaxy API current version to ensure
+ the API server is up and reachable.
+ """
+
+ try:
+ url = 'https://%s/api/' % api_server
+ data = json.load(urllib2.urlopen(url))
+ if not data.get("current_version",None):
+ return None
+ else:
+ return data
+ except:
+ return None
+
+def api_lookup_role_by_name(api_server, role_name, notify=True):
+ """
+ Uses the Galaxy API to do a lookup on the role owner/name.
+ """
+
+ role_name = urllib.quote(role_name)
+
+ try:
+ parts = role_name.split(".")
+ user_name = ".".join(parts[0:-1])
+ role_name = parts[-1]
+ if notify:
+ print "- downloading role '%s', owned by %s" % (role_name, user_name)
+ except:
+ parser.print_help()
+ print "- invalid role name (%s). Specify role as format: username.rolename" % role_name
+ sys.exit(1)
+
+ url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name)
+ try:
+ data = json.load(urllib2.urlopen(url))
+ if len(data["results"]) == 0:
+ return None
+ else:
+ return data["results"][0]
+ except:
+ return None
+
+def api_fetch_role_related(api_server, related, role_id):
+ """
+ Uses the Galaxy API to fetch the list of related items for
+ the given role. The url comes from the 'related' field of
+ the role.
+ """
+
+ try:
+ url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related)
+ data = json.load(urllib2.urlopen(url))
+ results = data['results']
+ done = (data.get('next', None) == None)
+ while not done:
+ url = 'https://%s%s' % (api_server, data['next'])
+ print url
+ data = json.load(urllib2.urlopen(url))
+ results += data['results']
+ done = (data.get('next', None) == None)
+ return results
+ except:
+ return None
+
+def api_get_list(api_server, what):
+ """
+ Uses the Galaxy API to fetch the list of items specified.
+ """
+
+ try:
+ url = 'https://%s/api/v1/%s/?page_size' % (api_server, what)
+ data = json.load(urllib2.urlopen(url))
+ if "results" in data:
+ results = data['results']
+ else:
+ results = data
+ done = True
+ if "next" in data:
+ done = (data.get('next', None) == None)
+ while not done:
+ url = 'https://%s%s' % (api_server, data['next'])
+ print url
+ data = json.load(urllib2.urlopen(url))
+ results += data['results']
+ done = (data.get('next', None) == None)
+ return results
+ except:
+ print "- failed to download the %s list" % what
+ return None
+
+#-------------------------------------------------------------------------------------
+# scm repo utility functions
+#-------------------------------------------------------------------------------------
+
+def scm_archive_role(scm, role_url, role_version, role_name):
+ if scm not in ['hg', 'git']:
+ print "- scm %s is not currently supported" % scm
+ return False
+ tempdir = tempfile.mkdtemp()
+ clone_cmd = [scm, 'clone', role_url, role_name]
+ with open('/dev/null', 'w') as devnull:
+ try:
+ print "- executing: %s" % " ".join(clone_cmd)
+ popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
+ except:
+ raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
+ rc = popen.wait()
+ if rc != 0:
+ print "- command %s failed" % ' '.join(clone_cmd)
+ print " in directory %s" % tempdir
+ return False
+
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
+ if scm == 'hg':
+ archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
+ if role_version:
+ archive_cmd.extend(['-r', role_version])
+ archive_cmd.append(temp_file.name)
+ if scm == 'git':
+ archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
+ if role_version:
+ archive_cmd.append(role_version)
+ else:
+ archive_cmd.append('HEAD')
+
+ with open('/dev/null', 'w') as devnull:
+ print "- executing: %s" % " ".join(archive_cmd)
+ popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
+ stderr=devnull, stdout=devnull)
+ rc = popen.wait()
+ if rc != 0:
+ print "- command %s failed" % ' '.join(archive_cmd)
+ print " in directory %s" % tempdir
+ return False
+
+ shutil.rmtree(tempdir, ignore_errors=True)
+
+ return temp_file.name
+
+
+#-------------------------------------------------------------------------------------
+# Role utility functions
+#-------------------------------------------------------------------------------------
+
+def get_role_path(role_name, options):
+ """
+ Returns the role path based on the roles_path option
+ and the role name.
+ """
+ roles_path = get_opt(options,'roles_path')
+ roles_path = os.path.join(roles_path, role_name)
+ roles_path = os.path.expanduser(roles_path)
+ return roles_path
+
+def get_role_metadata(role_name, options):
+ """
+ Returns the metadata as YAML, if the file 'meta/main.yml'
+ exists in the specified role_path
+ """
+ role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
+ try:
+ if os.path.isfile(role_path):
+ f = open(role_path, 'r')
+ meta_data = yaml.safe_load(f)
+ f.close()
+ return meta_data
+ else:
+ return None
+ except:
+ return None
+
+def get_galaxy_install_info(role_name, options):
+ """
+ Returns the YAML data contained in 'meta/.galaxy_install_info',
+ if it exists.
+ """
+
+ try:
+ info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
+ if os.path.isfile(info_path):
+ f = open(info_path, 'r')
+ info_data = yaml.safe_load(f)
+ f.close()
+ return info_data
+ else:
+ return None
+ except:
+ return None
+
+def write_galaxy_install_info(role_name, role_version, options):
+ """
+ Writes a YAML-formatted file to the role's meta/ directory
+ (named .galaxy_install_info) which contains some information
+ we can use later for commands like 'list' and 'info'.
+ """
+
+ info = dict(
+ version = role_version,
+ install_date = datetime.datetime.utcnow().strftime("%c"),
+ )
+ try:
+ info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
+ f = open(info_path, 'w+')
+ info_data = yaml.safe_dump(info, f)
+ f.close()
+ except:
+ return False
+ return True
+
+
+def remove_role(role_name, options):
+ """
+ Removes the specified role from the roles path. There is a
+ sanity check to make sure there's a meta/main.yml file at this
+ path so the user doesn't blow away random directories
+ """
+ if get_role_metadata(role_name, options):
+ role_path = get_role_path(role_name, options)
+ shutil.rmtree(role_path)
+ return True
+ else:
+ return False
+
+def fetch_role(role_name, target, role_data, options):
+ """
+ Downloads the archived role from github to a temp location, extracts
+ it, and then copies the extracted role to the role library path.
+ """
+
+ # first grab the file and save it to a temp location
+ if '://' in role_name:
+ archive_url = role_name
+ else:
+ archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
+ print "- downloading role from %s" % archive_url
+
+ try:
+ url_file = urllib2.urlopen(archive_url)
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
+ data = url_file.read()
+ while data:
+ temp_file.write(data)
+ data = url_file.read()
+ temp_file.close()
+ return temp_file.name
+ except Exception, e:
+ # TODO: better urllib2 error handling for error
+ # messages that are more exact
+ print "- error: failed to download the file."
+ return False
+
+def install_role(role_name, role_version, role_filename, options):
+ # the file is a tar, so open it that way and extract it
+ # to the specified (or default) roles directory
+
+ if not tarfile.is_tarfile(role_filename):
+ print "- error: the file downloaded was not a tar.gz"
+ return False
+ else:
+ if role_filename.endswith('.gz'):
+ role_tar_file = tarfile.open(role_filename, "r:gz")
+ else:
+ role_tar_file = tarfile.open(role_filename, "r")
+ # verify the role's meta file
+ meta_file = None
+ members = role_tar_file.getmembers()
+ # next find the metadata file
+ for member in members:
+ if "/meta/main.yml" in member.name:
+ meta_file = member
+ break
+ if not meta_file:
+ print "- error: this role does not appear to have a meta/main.yml file."
+ return False
+ else:
+ try:
+ meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file))
+ except:
+ print "- error: this role does not appear to have a valid meta/main.yml file."
+ return False
+
+ # we strip off the top-level directory for all of the files contained within
+ # the tar file here, since the default is 'github_repo-target', and change it
+ # to the specified role's name
+ role_path = os.path.join(get_opt(options, 'roles_path'), role_name)
+ role_path = os.path.expanduser(role_path)
+ print "- extracting %s to %s" % (role_name, role_path)
+ try:
+ if os.path.exists(role_path):
+ if not os.path.isdir(role_path):
+ print "- error: the specified roles path exists and is not a directory."
+ return False
+ elif not get_opt(options, "force", False):
+ print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name
+ return False
+ else:
+ # using --force, remove the old path
+ if not remove_role(role_name, options):
+ print "- error: %s doesn't appear to contain a role." % role_path
+ print " please remove this directory manually if you really want to put the role here."
+ return False
+ else:
+ os.makedirs(role_path)
+
+ # now we do the actual extraction to the role_path
+ for member in members:
+ # we only extract files, and remove any relative path
+ # bits that might be in the file for security purposes
+ # and drop the leading directory, as mentioned above
+ if member.isreg() or member.issym():
+ parts = member.name.split("/")[1:]
+ final_parts = []
+ for part in parts:
+ if part != '..' and '~' not in part and '$' not in part:
+ final_parts.append(part)
+ member.name = os.path.join(*final_parts)
+ role_tar_file.extract(member, role_path)
+
+ # write out the install info file for later use
+ write_galaxy_install_info(role_name, role_version, options)
+ except OSError, e:
+ print "- error: you do not have permission to modify files in %s" % role_path
+ return False
+
+ # return the parsed yaml metadata
+ print "- %s was installed successfully" % role_name
+ return meta_file_data
+
+#-------------------------------------------------------------------------------------
+# Action functions
+#-------------------------------------------------------------------------------------
+
+def execute_init(args, options, parser):
+ """
+ Executes the init action, which creates the skeleton framework
+ of a role that complies with the galaxy metadata format.
+ """
+
+ init_path = get_opt(options, 'init_path', './')
+ api_server = get_opt(options, "api_server", "galaxy.ansible.com")
+ force = get_opt(options, 'force', False)
+ offline = get_opt(options, 'offline', False)
+
+ if not offline:
+ api_config = api_get_config(api_server)
+ if not api_config:
+ print "- the API server (%s) is not responding, please try again later." % api_server
+ sys.exit(1)
+
+ try:
+ role_name = args.pop(0).strip()
+ if role_name == "":
+ raise Exception("")
+ role_path = os.path.join(init_path, role_name)
+ if os.path.exists(role_path):
+ if os.path.isfile(role_path):
+ print "- the path %s already exists, but is a file - aborting" % role_path
+ sys.exit(1)
+ elif not force:
+ print "- the directory %s already exists." % role_path
+ print " you can use --force to re-initialize this directory,\n" + \
+ " however it will reset any main.yml files that may have\n" + \
+ " been modified there already."
+ sys.exit(1)
+ except Exception, e:
+ parser.print_help()
+ print "- no role name specified for init"
+ sys.exit(1)
+
+ ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
+
+ # create the default README.md
+ if not os.path.exists(role_path):
+ os.makedirs(role_path)
+ readme_path = os.path.join(role_path, "README.md")
+ f = open(readme_path, "wb")
+ f.write(default_readme_template)
+ f.close
+
+ for dir in ROLE_DIRS:
+ dir_path = os.path.join(init_path, role_name, dir)
+ main_yml_path = os.path.join(dir_path, 'main.yml')
+ # create the directory if it doesn't exist already
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+
+ # now create the main.yml file for that directory
+ if dir == "meta":
+ # create a skeleton meta/main.yml with a valid galaxy_info
+ # datastructure in place, plus with all of the available
+ # tags/platforms included (but commented out) and the
+ # dependencies section
+ platforms = []
+ if not offline:
+ platforms = api_get_list(api_server, "platforms") or []
+ categories = []
+ if not offline:
+ categories = api_get_list(api_server, "categories") or []
+
+ # group the list of platforms from the api based
+ # on their names, with the release field being
+ # appended to a list of versions
+ platform_groups = defaultdict(list)
+ for platform in platforms:
+ platform_groups[platform['name']].append(platform['release'])
+ platform_groups[platform['name']].sort()
+
+ inject = dict(
+ author = 'your name',
+ company = 'your company (optional)',
+ license = 'license (GPLv2, CC-BY, etc)',
+ issue_tracker_url = 'http://example.com/issue/tracker',
+ min_ansible_version = '1.2',
+ platforms = platform_groups,
+ categories = categories,
+ )
+ rendered_meta = Environment().from_string(default_meta_template).render(inject)
+ f = open(main_yml_path, 'w')
+ f.write(rendered_meta)
+ f.close()
+ pass
+ elif dir not in ('files','templates'):
+ # just write a (mostly) empty YAML file for main.yml
+ f = open(main_yml_path, 'w')
+ f.write('---\n# %s file for %s\n' % (dir,role_name))
+ f.close()
+ print "- %s was created successfully" % role_name
+
+def execute_info(args, options, parser):
+ """
+ Executes the info action. This action prints out detailed
+ information about an installed role as well as info available
+ from the galaxy API.
+ """
+
+ if len(args) == 0:
+ # the user needs to specify a role
+ parser.print_help()
+ print "- you must specify a user/role name"
+ sys.exit(1)
+
+ api_server = get_opt(options, "api_server", "galaxy.ansible.com")
+ api_config = api_get_config(api_server)
+ roles_path = get_opt(options, "roles_path")
+
+ for role in args:
+
+ role_info = {}
+
+ install_info = get_galaxy_install_info(role, options)
+ if install_info:
+ if 'version' in install_info:
+ install_info['intalled_version'] = install_info['version']
+ del install_info['version']
+ role_info.update(install_info)
+
+ remote_data = api_lookup_role_by_name(api_server, role, False)
+ if remote_data:
+ role_info.update(remote_data)
+
+ metadata = get_role_metadata(role, options)
+ if metadata:
+ role_info.update(metadata)
+
+ role_spec = ansible.utils.role_spec_parse(role)
+ if role_spec:
+ role_info.update(role_spec)
+
+ if role_info:
+ print "- %s:" % (role)
+ for k in sorted(role_info.keys()):
+
+ if k in SKIP_INFO_KEYS:
+ continue
+
+ if isinstance(role_info[k], dict):
+ print "\t%s: " % (k)
+ for key in sorted(role_info[k].keys()):
+ if key in SKIP_INFO_KEYS:
+ continue
+ print "\t\t%s: %s" % (key, role_info[k][key])
+ else:
+ print "\t%s: %s" % (k, role_info[k])
+ else:
+ print "- the role %s was not found" % role
+
+def execute_install(args, options, parser):
+ """
+ Executes the installation action. The args list contains the
+ roles to be installed, unless -f was specified. The list of roles
+ can be a name (which will be downloaded via the galaxy API and github),
+ or it can be a local .tar.gz file.
+ """
+
+ role_file = get_opt(options, "role_file", None)
+
+ if len(args) == 0 and role_file is None:
+ # the user needs to specify one of either --role-file
+ # or specify a single user/role name
+ parser.print_help()
+ print "- you must specify a user/role name or a roles file"
+ sys.exit()
+ elif len(args) == 1 and not role_file is None:
+ # using a role file is mutually exclusive of specifying
+ # the role name on the command line
+ parser.print_help()
+ print "- please specify a user/role name, or a roles file, but not both"
+ sys.exit(1)
+
+ api_server = get_opt(options, "api_server", "galaxy.ansible.com")
+ no_deps = get_opt(options, "no_deps", False)
+ roles_path = get_opt(options, "roles_path")
+
+ roles_done = []
+ if role_file:
+ f = open(role_file, 'r')
+ if role_file.endswith('.yaml') or role_file.endswith('.yml'):
+ roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
+ else:
+ # roles listed in a file, one per line
+ roles_left = map(ansible.utils.role_spec_parse, f.readlines())
+ f.close()
+ else:
+ # roles were specified directly, so we'll just go out grab them
+ # (and their dependencies, unless the user doesn't want us to).
+ roles_left = map(ansible.utils.role_spec_parse, args)
+
+ while len(roles_left) > 0:
+ # query the galaxy API for the role data
+ role_data = None
+ role = roles_left.pop(0)
+ role_src = role.get("src")
+ role_scm = role.get("scm")
+ role_path = role.get("path")
+
+ if role_path:
+ options.roles_path = role_path
+ else:
+ options.roles_path = roles_path
+
+ if os.path.isfile(role_src):
+ # installing a local tar.gz
+ tmp_file = role_src
+ else:
+ if role_scm:
+ # create tar file from scm url
+ tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name"))
+ elif '://' in role_src:
+ # just download a URL - version will probably be in the URL
+ tmp_file = fetch_role(role_src, None, None, options)
+ else:
+ # installing from galaxy
+ api_config = api_get_config(api_server)
+ if not api_config:
+ print "- the API server (%s) is not responding, please try again later." % api_server
+ sys.exit(1)
+
+ role_data = api_lookup_role_by_name(api_server, role_src)
+ if not role_data:
+ print "- sorry, %s was not found on %s." % (role_src, api_server)
+ exit_without_ignore(options)
+ continue
+
+ role_versions = api_fetch_role_related(api_server, 'versions', role_data['id'])
+ if "version" not in role or role['version'] == '':
+ # convert the version names to LooseVersion objects
+ # and sort them to get the latest version. If there
+ # are no versions in the list, we'll grab the head
+ # of the master branch
+ if len(role_versions) > 0:
+ loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
+ loose_versions.sort()
+ role["version"] = str(loose_versions[-1])
+ else:
+ role["version"] = 'master'
+ elif role['version'] != 'master':
+ if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]:
+ print 'role is %s' % role
+ print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions)
+ exit_without_ignore(options)
+ continue
+
+ # download the role. if --no-deps was specified, we stop here,
+ # otherwise we recursively grab roles and all of their deps.
+ tmp_file = fetch_role(role_src, role["version"], role_data, options)
+ installed = False
+ if tmp_file:
+ installed = install_role(role.get("name"), role.get("version"), tmp_file, options)
+ # we're done with the temp file, clean it up
+ if tmp_file != role_src:
+ os.unlink(tmp_file)
+ # install dependencies, if we want them
+ if not no_deps and installed:
+ if not role_data:
+ role_data = get_role_metadata(role.get("name"), options)
+ role_dependencies = role_data['dependencies']
+ else:
+ role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
+ for dep in role_dependencies:
+ if isinstance(dep, basestring):
+ dep = ansible.utils.role_spec_parse(dep)
+ else:
+ dep = ansible.utils.role_yaml_parse(dep)
+ if not get_role_metadata(dep["name"], options):
+ if dep not in roles_left:
+ print '- adding dependency: %s' % dep["name"]
+ roles_left.append(dep)
+ else:
+ print '- dependency %s already pending installation.' % dep["name"]
+ else:
+ print '- dependency %s is already installed, skipping.' % dep["name"]
+ if not tmp_file or not installed:
+ print "- %s was NOT installed successfully." % role.get("name")
+ exit_without_ignore(options)
+ sys.exit(0)
+
+def execute_remove(args, options, parser):
+ """
+ Executes the remove action. The args list contains the list
+ of roles to be removed. This list can contain more than one role.
+ """
+
+ if len(args) == 0:
+ parser.print_help()
+ print '- you must specify at least one role to remove.'
+ sys.exit()
+
+ for role in args:
+ if get_role_metadata(role, options):
+ if remove_role(role, options):
+ print '- successfully removed %s' % role
+ else:
+ print "- failed to remove role: %s" % role
+ else:
+ print '- %s is not installed, skipping.' % role
+ sys.exit(0)
+
+def execute_list(args, options, parser):
+ """
+ Executes the list action. The args list can contain zero
+ or one role. If one is specified, only that role will be
+ shown, otherwise all roles in the specified directory will
+ be shown.
+ """
+
+ if len(args) > 1:
+ print "- please specify only one role to list, or specify no roles to see a full list"
+ sys.exit(1)
+
+ if len(args) == 1:
+ # show only the request role, if it exists
+ role_name = args[0]
+ metadata = get_role_metadata(role_name, options)
+ if metadata:
+ install_info = get_galaxy_install_info(role_name, options)
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ # show some more info about single roles here
+ print "- %s, %s" % (role_name, version)
+ else:
+ print "- the role %s was not found" % role_name
+ else:
+ # show all valid roles in the roles_path directory
+ roles_path = get_opt(options, 'roles_path')
+ roles_path = os.path.expanduser(roles_path)
+ if not os.path.exists(roles_path):
+ parser.print_help()
+ print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path
+ sys.exit(1)
+ elif not os.path.isdir(roles_path):
+ print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path
+ parser.print_help()
+ sys.exit(1)
+ path_files = os.listdir(roles_path)
+ for path_file in path_files:
+ if get_role_metadata(path_file, options):
+ install_info = get_galaxy_install_info(path_file, options)
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ print "- %s, %s" % (path_file, version)
+ sys.exit(0)
+
+#-------------------------------------------------------------------------------------
+# The main entry point
+#-------------------------------------------------------------------------------------
+
+def main():
+ # parse the CLI options
+ action = get_action(sys.argv)
+ parser = build_option_parser(action)
+ (options, args) = parser.parse_args()
+
+ # execute the desired action
+ if 1: #try:
+ fn = globals()["execute_%s" % action]
+ fn(args, options, parser)
+ #except KeyError, e:
+ # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS))
+ # sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/v1/bin/ansible-playbook b/v1/bin/ansible-playbook
new file mode 100755
index 00000000000..3d6e1f9f402
--- /dev/null
+++ b/v1/bin/ansible-playbook
@@ -0,0 +1,330 @@
+#!/usr/bin/env python
+# (C) 2012, Michael DeHaan,
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+#######################################################
+
+__requires__ = ['ansible']
+try:
+ import pkg_resources
+except Exception:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. But we
+ # have code that better expresses the errors in the places where the code
+ # is actually used (the deps are optional for many code paths) so we don't
+ # want to fail here.
+ pass
+
+import sys
+import os
+import stat
+
+# Augment PYTHONPATH to find Python modules relative to this file path
+# This is so that we can find the modules when running from a local checkout
+# installed as editable with `pip install -e ...` or `python setup.py develop`
+local_module_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', 'lib')
+)
+sys.path.append(local_module_path)
+
+import ansible.playbook
+import ansible.constants as C
+import ansible.utils.template
+from ansible import errors
+from ansible import callbacks
+from ansible import utils
+from ansible.color import ANSIBLE_COLOR, stringc
+from ansible.callbacks import display
+
+def colorize(lead, num, color):
+ """ Print 'lead' = 'num' in 'color' """
+ if num != 0 and ANSIBLE_COLOR and color is not None:
+ return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
+ else:
+ return "%s=%-4s" % (lead, str(num))
+
+def hostcolor(host, stats, color=True):
+ if ANSIBLE_COLOR and color:
+ if stats['failures'] != 0 or stats['unreachable'] != 0:
+ return "%-37s" % stringc(host, 'red')
+ elif stats['changed'] != 0:
+ return "%-37s" % stringc(host, 'yellow')
+ else:
+ return "%-37s" % stringc(host, 'green')
+ return "%-26s" % host
+
+
+def main(args):
+ ''' run ansible-playbook operations '''
+
+ # create parser for CLI options
+ parser = utils.base_parser(
+ constants=C,
+ usage = "%prog playbook.yml",
+ connect_opts=True,
+ runas_opts=True,
+ subset_opts=True,
+ check_opts=True,
+ diff_opts=True
+ )
+ #parser.add_option('--vault-password', dest="vault_password",
+ # help="password for vault encrypted files")
+ parser.add_option('-t', '--tags', dest='tags', default='all',
+ help="only run plays and tasks tagged with these values")
+ parser.add_option('--skip-tags', dest='skip_tags',
+ help="only run plays and tasks whose tags do not match these values")
+ parser.add_option('--syntax-check', dest='syntax', action='store_true',
+ help="perform a syntax check on the playbook, but do not execute it")
+ parser.add_option('--list-tasks', dest='listtasks', action='store_true',
+ help="list all tasks that would be executed")
+ parser.add_option('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
+ parser.add_option('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+ parser.add_option('--start-at-task', dest='start_at',
+ help="start the playbook at the task matching this name")
+ parser.add_option('--force-handlers', dest='force_handlers',
+ default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
+ help="run handlers even if a task fails")
+ parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
+ help="clear the fact cache")
+
+ options, args = parser.parse_args(args)
+
+ if len(args) == 0:
+ parser.print_help(file=sys.stderr)
+ return 1
+
+ # privlege escalation command line arguments need to be mutually exclusive
+ utils.check_mutually_exclusive_privilege(options, parser)
+
+ if (options.ask_vault_pass and options.vault_password_file):
+ parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+
+ sshpass = None
+ becomepass = None
+ vault_pass = None
+
+ options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
+
+ if options.listhosts or options.syntax or options.listtasks or options.listtags:
+ (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
+ else:
+ options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
+ # Never ask for an SSH password when we run with local connection
+ if options.connection == "local":
+ options.ask_pass = False
+
+ # set pe options
+ utils.normalize_become_options(options)
+ prompt_method = utils.choose_pass_prompt(options)
+ (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
+ become_ask_pass=options.become_ask_pass,
+ ask_vault_pass=options.ask_vault_pass,
+ become_method=prompt_method)
+
+ # read vault_pass from a file
+ if not options.ask_vault_pass and options.vault_password_file:
+ vault_pass = utils.read_vault_file(options.vault_password_file)
+
+ extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
+
+ only_tags = options.tags.split(",")
+ skip_tags = options.skip_tags
+ if options.skip_tags is not None:
+ skip_tags = options.skip_tags.split(",")
+
+ for playbook in args:
+ if not os.path.exists(playbook):
+ raise errors.AnsibleError("the playbook: %s could not be found" % playbook)
+ if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
+ raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook)
+
+ inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass)
+
+ # Note: slightly wrong, this is written so that implicit localhost
+ # (which is not returned in list_hosts()) is taken into account for
+ # warning if inventory is empty. But it can't be taken into account for
+ # checking if limit doesn't match any hosts. Instead we don't worry about
+ # limit if only implicit localhost was in inventory to start with.
+ #
+ # Fix this in v2
+ no_hosts = False
+ if len(inventory.list_hosts()) == 0:
+ # Empty inventory
+ utils.warning("provided hosts list is empty, only localhost is available")
+ no_hosts = True
+ inventory.subset(options.subset)
+ if len(inventory.list_hosts()) == 0 and no_hosts is False:
+ # Invalid limit
+ raise errors.AnsibleError("Specified --limit does not match any hosts")
+
+ # run all playbooks specified on the command line
+ for playbook in args:
+
+ stats = callbacks.AggregateStats()
+ playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
+ if options.step:
+ playbook_cb.step = options.step
+ if options.start_at:
+ playbook_cb.start_at = options.start_at
+ runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
+
+ pb = ansible.playbook.PlayBook(
+ playbook=playbook,
+ module_path=options.module_path,
+ inventory=inventory,
+ forks=options.forks,
+ remote_user=options.remote_user,
+ remote_pass=sshpass,
+ callbacks=playbook_cb,
+ runner_callbacks=runner_cb,
+ stats=stats,
+ timeout=options.timeout,
+ transport=options.connection,
+ become=options.become,
+ become_method=options.become_method,
+ become_user=options.become_user,
+ become_pass=becomepass,
+ extra_vars=extra_vars,
+ private_key_file=options.private_key_file,
+ only_tags=only_tags,
+ skip_tags=skip_tags,
+ check=options.check,
+ diff=options.diff,
+ vault_password=vault_pass,
+ force_handlers=options.force_handlers,
+ )
+
+ if options.flush_cache:
+ display(callbacks.banner("FLUSHING FACT CACHE"))
+ pb.SETUP_CACHE.flush()
+
+ if options.listhosts or options.listtasks or options.syntax or options.listtags:
+ print ''
+ print 'playbook: %s' % playbook
+ print ''
+ playnum = 0
+ for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
+ playnum += 1
+ play = ansible.playbook.Play(pb, play_ds, play_basedir,
+ vault_password=pb.vault_password)
+ label = play.name
+ hosts = pb.inventory.list_hosts(play.hosts)
+
+ if options.listhosts:
+ print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts))
+ for host in hosts:
+ print ' %s' % host
+
+ if options.listtags or options.listtasks:
+ print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags))))
+
+ if options.listtags:
+ tags = []
+ for task in pb.tasks_to_run_in_play(play):
+ tags.extend(task.tags)
+ print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged']))))
+
+ if options.listtasks:
+
+ for task in pb.tasks_to_run_in_play(play):
+ if getattr(task, 'name', None) is not None:
+ # meta tasks have no names
+ print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged']))))
+
+ if options.listhosts or options.listtasks or options.listtags:
+ print ''
+ continue
+
+ if options.syntax:
+ # if we've not exited by now then we are fine.
+ print 'Playbook Syntax is fine'
+ return 0
+
+ failed_hosts = []
+ unreachable_hosts = []
+
+ try:
+
+ pb.run()
+
+ hosts = sorted(pb.stats.processed.keys())
+ display(callbacks.banner("PLAY RECAP"))
+ playbook_cb.on_stats(pb.stats)
+
+ for h in hosts:
+ t = pb.stats.summarize(h)
+ if t['failures'] > 0:
+ failed_hosts.append(h)
+ if t['unreachable'] > 0:
+ unreachable_hosts.append(h)
+
+ retries = failed_hosts + unreachable_hosts
+
+ if C.RETRY_FILES_ENABLED and len(retries) > 0:
+ filename = pb.generate_retry_inventory(retries)
+ if filename:
+ display(" to retry, use: --limit @%s\n" % filename)
+
+ for h in hosts:
+ t = pb.stats.summarize(h)
+
+ display("%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize('ok', t['ok'], 'green'),
+ colorize('changed', t['changed'], 'yellow'),
+ colorize('unreachable', t['unreachable'], 'red'),
+ colorize('failed', t['failures'], 'red')),
+ screen_only=True
+ )
+
+ display("%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize('ok', t['ok'], None),
+ colorize('changed', t['changed'], None),
+ colorize('unreachable', t['unreachable'], None),
+ colorize('failed', t['failures'], None)),
+ log_only=True
+ )
+
+
+ print ""
+ if len(failed_hosts) > 0:
+ return 2
+ if len(unreachable_hosts) > 0:
+ return 3
+
+ except errors.AnsibleError, e:
+ display("ERROR: %s" % e, color='red')
+ return 1
+
+ return 0
+
+
+if __name__ == "__main__":
+ display(" ", log_only=True)
+ display(" ".join(sys.argv), log_only=True)
+ display(" ", log_only=True)
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except errors.AnsibleError, e:
+ display("ERROR: %s" % e, color='red', stderr=True)
+ sys.exit(1)
+ except KeyboardInterrupt, ke:
+ display("ERROR: interrupted", color='red', stderr=True)
+ sys.exit(1)
diff --git a/v1/bin/ansible-pull b/v1/bin/ansible-pull
new file mode 100755
index 00000000000..d4887631e0f
--- /dev/null
+++ b/v1/bin/ansible-pull
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+
+# (c) 2012, Stephen Fromm
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+# ansible-pull is a script that runs ansible in local mode
+# after checking out a playbooks directory from source repo. There is an
+# example playbook to bootstrap this script in the examples/ dir which
+# installs ansible and sets it up to run on cron.
+
+# usage:
+# ansible-pull -d /var/lib/ansible \
+# -U http://example.net/content.git [-C production] \
+# [path/playbook.yml]
+#
+# the -d and -U arguments are required; the -C argument is optional.
+#
+# ansible-pull accepts an optional argument to specify a playbook
+# location underneath the workdir and then searches the source repo
+# for playbooks in the following order, stopping at the first match:
+#
+# 1. $workdir/path/playbook.yml, if specified
+# 2. $workdir/$fqdn.yml
+# 3. $workdir/$hostname.yml
+# 4. $workdir/local.yml
+#
+# the source repo must contain at least one of these playbooks.
+
+import os
+import shutil
+import sys
+import datetime
+import socket
+import random
+import time
+from ansible import utils
+from ansible.utils import cmd_functions
+from ansible import errors
+from ansible import inventory
+
+DEFAULT_REPO_TYPE = 'git'
+DEFAULT_PLAYBOOK = 'local.yml'
+PLAYBOOK_ERRORS = {1: 'File does not exist',
+ 2: 'File is not readable'}
+
+VERBOSITY=0
+
+def increment_debug(option, opt, value, parser):
+ global VERBOSITY
+ VERBOSITY += 1
+
+def try_playbook(path):
+ if not os.path.exists(path):
+ return 1
+ if not os.access(path, os.R_OK):
+ return 2
+ return 0
+
+
+def select_playbook(path, args):
+ playbook = None
+ if len(args) > 0 and args[0] is not None:
+ playbook = "%s/%s" % (path, args[0])
+ rc = try_playbook(playbook)
+ if rc != 0:
+ print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc])
+ return None
+ return playbook
+ else:
+ fqdn = socket.getfqdn()
+ hostpb = "%s/%s.yml" % (path, fqdn)
+ shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0])
+ localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK)
+ errors = []
+ for pb in [hostpb, shorthostpb, localpb]:
+ rc = try_playbook(pb)
+ if rc == 0:
+ playbook = pb
+ break
+ else:
+ errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc]))
+ if playbook is None:
+ print >>sys.stderr, "\n".join(errors)
+ return playbook
+
+
+def main(args):
+ """ Set up and run a local playbook """
+ usage = "%prog [options] [playbook.yml]"
+ parser = utils.SortedOptParser(usage=usage)
+ parser.add_option('--purge', default=False, action='store_true',
+ help='purge checkout after playbook run')
+ parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
+ help='only run the playbook if the repository has been updated')
+ parser.add_option('-s', '--sleep', dest='sleep', default=None,
+ help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests')
+ parser.add_option('-f', '--force', dest='force', default=False,
+ action='store_true',
+ help='run the playbook even if the repository could '
+ 'not be updated')
+ parser.add_option('-d', '--directory', dest='dest', default=None,
+ help='directory to checkout repository to')
+ #parser.add_option('-l', '--live', default=True, action='store_live',
+ # help='Print the ansible-playbook output while running')
+ parser.add_option('-U', '--url', dest='url', default=None,
+ help='URL of the playbook repository')
+ parser.add_option('-C', '--checkout', dest='checkout',
+ help='branch/tag/commit to checkout. '
+ 'Defaults to behavior of repository module.')
+ parser.add_option('-i', '--inventory-file', dest='inventory',
+ help="location of the inventory host file")
+ parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
+ help="set additional variables as key=value or YAML/JSON", default=[])
+ parser.add_option('-v', '--verbose', default=False, action="callback",
+ callback=increment_debug,
+ help='Pass -vvvv to ansible-playbook')
+ parser.add_option('-m', '--module-name', dest='module_name',
+ default=DEFAULT_REPO_TYPE,
+ help='Module name used to check out repository. '
+ 'Default is %s.' % DEFAULT_REPO_TYPE)
+ parser.add_option('--vault-password-file', dest='vault_password_file',
+ help="vault password file")
+ parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password')
+ parser.add_option('-t', '--tags', dest='tags', default=False,
+ help='only run plays and tasks tagged with these values')
+ parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
+ help='adds the hostkey for the repo url if not already added')
+ parser.add_option('--key-file', dest='key_file',
+ help="Pass '-i ' to the SSH arguments used by git.")
+ options, args = parser.parse_args(args)
+
+ hostname = socket.getfqdn()
+ if not options.dest:
+ # use a hostname dependent directory, in case of $HOME on nfs
+ options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname)
+
+ options.dest = os.path.abspath(options.dest)
+
+ if not options.url:
+ parser.error("URL for repository not specified, use -h for help")
+ return 1
+
+ now = datetime.datetime.now()
+ print now.strftime("Starting ansible-pull at %F %T")
+
+ # Attempt to use the inventory passed in as an argument
+ # It might not yet have been downloaded so use localhost if note
+ if not options.inventory or not os.path.exists(options.inventory):
+ inv_opts = 'localhost,'
+ else:
+ inv_opts = options.inventory
+ limit_opts = 'localhost:%s:127.0.0.1' % hostname
+ repo_opts = "name=%s dest=%s" % (options.url, options.dest)
+
+ if VERBOSITY == 0:
+ base_opts = '-c local --limit "%s"' % limit_opts
+ elif VERBOSITY > 0:
+ debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ])
+ base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts)
+
+ if options.checkout:
+ repo_opts += ' version=%s' % options.checkout
+
+ # Only git module is supported
+ if options.module_name == DEFAULT_REPO_TYPE:
+ if options.accept_host_key:
+ repo_opts += ' accept_hostkey=yes'
+
+ if options.key_file:
+ repo_opts += ' key_file=%s' % options.key_file
+
+ path = utils.plugins.module_finder.find_plugin(options.module_name)
+ if path is None:
+ sys.stderr.write("module '%s' not found.\n" % options.module_name)
+ return 1
+
+ bin_path = os.path.dirname(os.path.abspath(__file__))
+ cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
+ bin_path, inv_opts, base_opts, options.module_name, repo_opts
+ )
+
+ for ev in options.extra_vars:
+ cmd += ' -e "%s"' % ev
+
+ if options.sleep:
+ try:
+ secs = random.randint(0,int(options.sleep));
+ except ValueError:
+ parser.error("%s is not a number." % options.sleep)
+ return 1
+
+ print >>sys.stderr, "Sleeping for %d seconds..." % secs
+ time.sleep(secs);
+
+
+ # RUN THe CHECKOUT COMMAND
+ rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+
+ if rc != 0:
+ if options.force:
+ print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook."
+ else:
+ return rc
+ elif options.ifchanged and '"changed": true' not in out:
+ print "Repository has not changed, quitting."
+ return 0
+
+ playbook = select_playbook(options.dest, args)
+
+ if playbook is None:
+ print >>sys.stderr, "Could not find a playbook to run."
+ return 1
+
+ cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
+ if options.vault_password_file:
+ cmd += " --vault-password-file=%s" % options.vault_password_file
+ if options.inventory:
+ cmd += ' -i "%s"' % options.inventory
+ for ev in options.extra_vars:
+ cmd += ' -e "%s"' % ev
+ if options.ask_sudo_pass:
+ cmd += ' -K'
+ if options.tags:
+ cmd += ' -t "%s"' % options.tags
+ os.chdir(options.dest)
+
+ # RUN THE PLAYBOOK COMMAND
+ rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+
+ if options.purge:
+ os.chdir('/')
+ try:
+ shutil.rmtree(options.dest)
+ except Exception, e:
+ print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
+
+ return rc
+
+if __name__ == '__main__':
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except KeyboardInterrupt, e:
+ print >>sys.stderr, "Exit on user request.\n"
+ sys.exit(1)
diff --git a/v1/bin/ansible-vault b/v1/bin/ansible-vault
new file mode 100755
index 00000000000..22cfc0e1487
--- /dev/null
+++ b/v1/bin/ansible-vault
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+
+# (c) 2014, James Tanner
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+# ansible-vault is a script that encrypts/decrypts YAML files. See
+# http://docs.ansible.com/playbooks_vault.html for more details.
+
+__requires__ = ['ansible']
+try:
+ import pkg_resources
+except Exception:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. But we
+ # have code that better expresses the errors in the places where the code
+ # is actually used (the deps are optional for many code paths) so we don't
+ # want to fail here.
+ pass
+
+import os
+import sys
+import traceback
+
+import ansible.constants as C
+
+from ansible import utils
+from ansible import errors
+from ansible.utils.vault import VaultEditor
+
+from optparse import OptionParser
+
+#-------------------------------------------------------------------------------------
+# Utility functions for parsing actions/options
+#-------------------------------------------------------------------------------------
+
+VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
+
+def build_option_parser(action):
+ """
+ Builds an option parser object based on the action
+ the user wants to execute.
+ """
+
+ usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS)
+ epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ OptionParser.format_epilog = lambda self, formatter: self.epilog
+ parser = OptionParser(usage=usage, epilog=epilog)
+
+ if not action:
+ parser.print_help()
+ sys.exit()
+
+ # options for all actions
+ #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use")
+ parser.add_option('--debug', dest='debug', action="store_true", help="debug")
+ parser.add_option('--vault-password-file', dest='password_file',
+ help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE)
+
+ # options specific to actions
+ if action == "create":
+ parser.set_usage("usage: %prog create [options] file_name")
+ elif action == "decrypt":
+ parser.set_usage("usage: %prog decrypt [options] file_name")
+ elif action == "edit":
+ parser.set_usage("usage: %prog edit [options] file_name")
+ elif action == "view":
+ parser.set_usage("usage: %prog view [options] file_name")
+ elif action == "encrypt":
+ parser.set_usage("usage: %prog encrypt [options] file_name")
+ elif action == "rekey":
+ parser.set_usage("usage: %prog rekey [options] file_name")
+
+ # done, return the parser
+ return parser
+
+def get_action(args):
+ """
+ Get the action the user wants to execute from the
+ sys argv list.
+ """
+ for i in range(0,len(args)):
+ arg = args[i]
+ if arg in VALID_ACTIONS:
+ del args[i]
+ return arg
+ return None
+
+def get_opt(options, k, defval=""):
+ """
+ Returns an option from an Optparse values instance.
+ """
+ try:
+ data = getattr(options, k)
+ except:
+ return defval
+ if k == "roles_path":
+ if os.pathsep in data:
+ data = data.split(os.pathsep)[0]
+ return data
+
+#-------------------------------------------------------------------------------------
+# Command functions
+#-------------------------------------------------------------------------------------
+
+def execute_create(args, options, parser):
+ if len(args) > 1:
+ raise errors.AnsibleError("'create' does not accept more than one filename")
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = 'AES256'
+ if hasattr(options, 'cipher'):
+ cipher = options.cipher
+
+ this_editor = VaultEditor(cipher, password, args[0])
+ this_editor.create_file()
+
+def execute_decrypt(args, options, parser):
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = 'AES256'
+ if hasattr(options, 'cipher'):
+ cipher = options.cipher
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.decrypt_file()
+
+ print "Decryption successful"
+
+def execute_edit(args, options, parser):
+
+ if len(args) > 1:
+ raise errors.AnsibleError("edit does not accept more than one filename")
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = None
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.edit_file()
+
+def execute_view(args, options, parser):
+
+ if len(args) > 1:
+ raise errors.AnsibleError("view does not accept more than one filename")
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = None
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.view_file()
+
+def execute_encrypt(args, options, parser):
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = 'AES256'
+ if hasattr(options, 'cipher'):
+ cipher = options.cipher
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.encrypt_file()
+
+ print "Encryption successful"
+
+def execute_rekey(args, options, parser):
+
+ if not options.password_file:
+ password, __ = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
+
+ cipher = None
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.rekey_file(new_password)
+
+ print "Rekey successful"
+
+#-------------------------------------------------------------------------------------
+# MAIN
+#-------------------------------------------------------------------------------------
+
+def main():
+
+ action = get_action(sys.argv)
+ parser = build_option_parser(action)
+ (options, args) = parser.parse_args()
+
+ if not len(args):
+ raise errors.AnsibleError(
+ "The '%s' command requires a filename as the first argument" % action
+ )
+
+ # execute the desired action
+ try:
+ fn = globals()["execute_%s" % action]
+ fn(args, options, parser)
+ except Exception, err:
+ if options.debug:
+ print traceback.format_exc()
+ print "ERROR:",err
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/v2/hacking/README.md b/v1/hacking/README.md
similarity index 94%
rename from v2/hacking/README.md
rename to v1/hacking/README.md
index 6d65464eee8..ae8db7e3a9b 100644
--- a/v2/hacking/README.md
+++ b/v1/hacking/README.md
@@ -29,7 +29,7 @@ a module outside of the ansible program, locally, on the current machine.
Example:
- $ ./hacking/test-module -m library/commands/shell -a "echo hi"
+ $ ./hacking/test-module -m lib/ansible/modules/core/commands/shell -a "echo hi"
This is a good way to insert a breakpoint into a module, for instance.
diff --git a/v2/hacking/authors.sh b/v1/hacking/authors.sh
similarity index 100%
rename from v2/hacking/authors.sh
rename to v1/hacking/authors.sh
diff --git a/v2/hacking/env-setup b/v1/hacking/env-setup
similarity index 95%
rename from v2/hacking/env-setup
rename to v1/hacking/env-setup
index 8f2c331fe46..29f4828410a 100644
--- a/v2/hacking/env-setup
+++ b/v1/hacking/env-setup
@@ -54,11 +54,11 @@ else
current_dir="$ANSIBLE_HOME"
fi
cd "$ANSIBLE_HOME"
-#if [ "$verbosity" = silent ] ; then
-# gen_egg_info > /dev/null 2>&1
-#else
-# gen_egg_info
-#fi
+if [ "$verbosity" = silent ] ; then
+ gen_egg_info > /dev/null 2>&1
+else
+ gen_egg_info
+fi
cd "$current_dir"
if [ "$verbosity" != silent ] ; then
diff --git a/v2/hacking/env-setup.fish b/v1/hacking/env-setup.fish
similarity index 81%
rename from v2/hacking/env-setup.fish
rename to v1/hacking/env-setup.fish
index 05fb60672d1..9deffb4e3d9 100644
--- a/v2/hacking/env-setup.fish
+++ b/v1/hacking/env-setup.fish
@@ -4,8 +4,8 @@
set HACKING_DIR (dirname (status -f))
set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
set ANSIBLE_HOME (dirname $FULL_PATH)
-set PREFIX_PYTHONPATH $ANSIBLE_HOME/lib
-set PREFIX_PATH $ANSIBLE_HOME/bin
+set PREFIX_PYTHONPATH $ANSIBLE_HOME/
+set PREFIX_PATH $ANSIBLE_HOME/bin
set PREFIX_MANPATH $ANSIBLE_HOME/docs/man
# Set PYTHONPATH
@@ -36,6 +36,16 @@ end
set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library
+# Generate egg_info so that pkg_resources works
+pushd $ANSIBLE_HOME
+python setup.py egg_info
+if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
+ rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
+end
+mv ansible*egg-info $PREFIX_PYTHONPATH
+popd
+
+
if set -q argv
switch $argv
case '-q' '--quiet'
diff --git a/v2/hacking/get_library.py b/v1/hacking/get_library.py
similarity index 100%
rename from v2/hacking/get_library.py
rename to v1/hacking/get_library.py
diff --git a/v2/hacking/module_formatter.py b/v1/hacking/module_formatter.py
similarity index 96%
rename from v2/hacking/module_formatter.py
rename to v1/hacking/module_formatter.py
index e70eb982de0..acddd700930 100755
--- a/v2/hacking/module_formatter.py
+++ b/v1/hacking/module_formatter.py
@@ -33,8 +33,8 @@ import subprocess
import cgi
from jinja2 import Environment, FileSystemLoader
-import ansible.utils
-import ansible.utils.module_docs as module_docs
+from ansible.utils import module_docs
+from ansible.utils.vars import merge_hash
#####################################################################################
# constants and paths
@@ -68,7 +68,7 @@ def rst_ify(text):
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
- t = _MODULE.sub(r'``' + r"\1" + r"``", t)
+ t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
@@ -135,7 +135,7 @@ def list_modules(module_dir, depth=0):
res = list_modules(d, depth + 1)
for key in res.keys():
if key in categories:
- categories[key] = ansible.utils.merge_hash(categories[key], res[key])
+ categories[key] = merge_hash(categories[key], res[key])
res.pop(key, None)
if depth < 2:
@@ -236,11 +236,11 @@ def process_module(module, options, env, template, outputname, module_map, alias
print "rendering: %s" % module
# use ansible core library to parse out doc metadata YAML and plaintext examples
- doc, examples, returndocs = ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose)
+ doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
- if module in ansible.utils.module_docs.BLACKLIST_MODULES:
+ if module in module_docs.BLACKLIST_MODULES:
return "SKIPPED"
else:
sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
@@ -278,8 +278,9 @@ def process_module(module, options, env, template, outputname, module_map, alias
if added and added_float < TO_OLD_TO_BE_NOTABLE:
del doc['version_added']
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
+ if 'options' in doc:
+ for (k,v) in doc['options'].iteritems():
+ all_keys.append(k)
all_keys = sorted(all_keys)
@@ -289,6 +290,10 @@ def process_module(module, options, env, template, outputname, module_map, alias
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
+ if returndocs:
+ doc['returndocs'] = yaml.safe_load(returndocs)
+ else:
+ doc['returndocs'] = None
# here is where we build the table of contents...
@@ -384,7 +389,7 @@ def process_category(category, categories, options, env, template, outputname):
category_file.write("""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less activity maintained than 'core' modules.
+ - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules.
- Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_
""" % (DEPRECATED, NOTCORE))
category_file.close()
diff --git a/v2/hacking/templates/rst.j2 b/v1/hacking/templates/rst.j2
similarity index 65%
rename from v2/hacking/templates/rst.j2
rename to v1/hacking/templates/rst.j2
index 59b8f35474c..f6f38e59101 100644
--- a/v2/hacking/templates/rst.j2
+++ b/v1/hacking/templates/rst.j2
@@ -106,6 +106,64 @@ Examples
{% endif %}
{% endif %}
+
+{% if returndocs %}
+Return Values
+-------------
+
+Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
+
+.. raw:: html
+
+
+
+ name
+ description
+ returned
+ type
+ sample
+
+
+ {% for entry in returndocs %}
+
+ @{ entry }@
+ @{ returndocs[entry].description }@
+ @{ returndocs[entry].returned }@
+ @{ returndocs[entry].type }@
+ @{ returndocs[entry].sample}@
+
+ {% if returndocs[entry].type == 'dictionary' %}
+ contains:
+
+
+
+ name
+ description
+ returned
+ type
+ sample
+
+
+ {% for sub in returndocs[entry].contains %}
+
+ @{ sub }@
+ @{ returndocs[entry].contains[sub].description }@
+ @{ returndocs[entry].contains[sub].returned }@
+ @{ returndocs[entry].contains[sub].type }@
+ @{ returndocs[entry].contains[sub].sample}@
+
+ {% endfor %}
+
+
+
+
+ {% endif %}
+ {% endfor %}
+
+
+
+{% endif %}
+
{% if notes %}
{% for note in notes %}
.. note:: @{ note | convert_symbols_to_format }@
@@ -119,11 +177,11 @@ Examples
This is a Core Module
---------------------
-This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo.
-
+The source of this module is hosted on GitHub in the `ansible-modules-core `_ repo.
+
If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
+Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
@@ -135,10 +193,10 @@ This is an Extras Module
------------------------
This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo.
-
+
If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
+Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
diff --git a/v2/hacking/test-module b/v1/hacking/test-module
similarity index 94%
rename from v2/hacking/test-module
rename to v1/hacking/test-module
index b672e23e260..c226f32e889 100755
--- a/v2/hacking/test-module
+++ b/v1/hacking/test-module
@@ -34,9 +34,8 @@ import os
import subprocess
import traceback
import optparse
-
-from ansible import utils
-from ansible import module_common
+import ansible.utils as utils
+import ansible.module_common as module_common
import ansible.constants as C
try:
@@ -59,7 +58,7 @@ def parse():
parser.add_option('-D', '--debugger', dest='debugger',
help="path to python debugger (e.g. /usr/bin/pdb)")
parser.add_option('-I', '--interpreter', dest='interpreter',
- help="path to interpeter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)",
+ help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)",
metavar='INTERPRETER_TYPE=INTERPRETER_PATH')
parser.add_option('-c', '--check', dest='check', action='store_true',
help="run the module in check mode")
@@ -88,6 +87,8 @@ def boilerplate_module(modfile, args, interpreter, check):
#module_data = module_fh.read()
#module_fh.close()
+ replacer = module_common.ModuleReplacer()
+
#included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
complex_args = {}
@@ -103,7 +104,7 @@ def boilerplate_module(modfile, args, interpreter, check):
inject = {}
if interpreter:
if '=' not in interpreter:
- print 'interpeter must by in the form of ansible_python_interpreter=/usr/bin/python'
+ print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python'
sys.exit(1)
interpreter_type, interpreter_path = interpreter.split('=')
if not interpreter_type.startswith('ansible_'):
@@ -115,7 +116,7 @@ def boilerplate_module(modfile, args, interpreter, check):
if check:
complex_args['CHECKMODE'] = True
- (module_data, module_style, shebang) = module_common.modify_module(
+ (module_data, module_style, shebang) = replacer.modify_module(
modfile,
complex_args,
args,
diff --git a/v1/hacking/update.sh b/v1/hacking/update.sh
new file mode 100755
index 00000000000..5979dd0ab2b
--- /dev/null
+++ b/v1/hacking/update.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+git pull --rebase
+git submodule update --init --recursive
diff --git a/test/units/README.md b/v1/tests/README.md
similarity index 100%
rename from test/units/README.md
rename to v1/tests/README.md
diff --git a/test/units/TestConstants.py b/v1/tests/TestConstants.py
similarity index 100%
rename from test/units/TestConstants.py
rename to v1/tests/TestConstants.py
diff --git a/test/units/TestFilters.py b/v1/tests/TestFilters.py
similarity index 100%
rename from test/units/TestFilters.py
rename to v1/tests/TestFilters.py
diff --git a/test/units/TestInventory.py b/v1/tests/TestInventory.py
similarity index 100%
rename from test/units/TestInventory.py
rename to v1/tests/TestInventory.py
diff --git a/test/units/TestModuleUtilsBasic.py b/v1/tests/TestModuleUtilsBasic.py
similarity index 100%
rename from test/units/TestModuleUtilsBasic.py
rename to v1/tests/TestModuleUtilsBasic.py
diff --git a/v1/tests/TestModuleUtilsDatabase.py b/v1/tests/TestModuleUtilsDatabase.py
new file mode 100644
index 00000000000..67da0b60e0b
--- /dev/null
+++ b/v1/tests/TestModuleUtilsDatabase.py
@@ -0,0 +1,118 @@
+import collections
+import mock
+import os
+import re
+
+from nose.tools import eq_
+try:
+ from nose.tools import assert_raises_regexp
+except ImportError:
+ # Python < 2.7
+ def assert_raises_regexp(expected, regexp, callable, *a, **kw):
+ try:
+ callable(*a, **kw)
+ except expected as e:
+ if isinstance(regexp, basestring):
+ regexp = re.compile(regexp)
+ if not regexp.search(str(e)):
+ raise Exception('"%s" does not match "%s"' %
+ (regexp.pattern, str(e)))
+ else:
+ if hasattr(expected,'__name__'): excName = expected.__name__
+ else: excName = str(expected)
+ raise AssertionError("%s not raised" % excName)
+
+from ansible.module_utils.database import (
+ pg_quote_identifier,
+ SQLParseError,
+)
+
+
+# Note: Using nose's generator test cases here so we can't inherit from
+# unittest.TestCase
+class TestQuotePgIdentifier(object):
+
+ # These are all valid strings
+ # The results are based on interpreting the identifier as a table name
+ valid = {
+ # User quoted
+ '"public.table"': '"public.table"',
+ '"public"."table"': '"public"."table"',
+ '"schema test"."table test"': '"schema test"."table test"',
+
+ # We quote part
+ 'public.table': '"public"."table"',
+ '"public".table': '"public"."table"',
+ 'public."table"': '"public"."table"',
+ 'schema test.table test': '"schema test"."table test"',
+ '"schema test".table test': '"schema test"."table test"',
+ 'schema test."table test"': '"schema test"."table test"',
+
+ # Embedded double quotes
+ 'table "test"': '"table ""test"""',
+ 'public."table ""test"""': '"public"."table ""test"""',
+ 'public.table "test"': '"public"."table ""test"""',
+ 'schema "test".table': '"schema ""test"""."table"',
+ '"schema ""test""".table': '"schema ""test"""."table"',
+ '"""wat"""."""test"""': '"""wat"""."""test"""',
+ # Sigh, handle these as well:
+ '"no end quote': '"""no end quote"',
+ 'schema."table': '"schema"."""table"',
+ '"schema.table': '"""schema"."table"',
+ 'schema."table.something': '"schema"."""table"."something"',
+
+ # Embedded dots
+ '"schema.test"."table.test"': '"schema.test"."table.test"',
+ '"schema.".table': '"schema."."table"',
+ '"schema."."table"': '"schema."."table"',
+ 'schema.".table"': '"schema".".table"',
+ '"schema".".table"': '"schema".".table"',
+ '"schema.".".table"': '"schema.".".table"',
+ # These are valid but maybe not what the user intended
+ '."table"': '".""table"""',
+ 'table.': '"table."',
+ }
+
+ invalid = {
+ ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
+ ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
+ ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
+ ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
+ ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
+ ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
+ ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
+ ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema."table"','table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
+ }
+
+ def check_valid_quotes(self, identifier, quoted_identifier):
+ eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
+
+ def test_valid_quotes(self):
+ for identifier in self.valid:
+ yield self.check_valid_quotes, identifier, self.valid[identifier]
+
+ def check_invalid_quotes(self, identifier, id_type, msg):
+ assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
+
+ def test_invalid_quotes(self):
+ for test in self.invalid:
+ yield self.check_invalid_quotes, test[0], test[1], self.invalid[test]
+
+ def test_how_many_dots(self):
+ eq_(pg_quote_identifier('role', 'role'), '"role"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
+
+ eq_(pg_quote_identifier('db', 'database'), '"db"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
+
+ eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
+
+ eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
+
+ eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
diff --git a/test/units/TestModules.py b/v1/tests/TestModules.py
similarity index 100%
rename from test/units/TestModules.py
rename to v1/tests/TestModules.py
diff --git a/test/units/TestPlayVarsFiles.py b/v1/tests/TestPlayVarsFiles.py
similarity index 100%
rename from test/units/TestPlayVarsFiles.py
rename to v1/tests/TestPlayVarsFiles.py
diff --git a/test/units/TestSynchronize.py b/v1/tests/TestSynchronize.py
similarity index 100%
rename from test/units/TestSynchronize.py
rename to v1/tests/TestSynchronize.py
diff --git a/test/units/TestUtils.py b/v1/tests/TestUtils.py
similarity index 100%
rename from test/units/TestUtils.py
rename to v1/tests/TestUtils.py
diff --git a/test/units/TestUtilsStringFunctions.py b/v1/tests/TestUtilsStringFunctions.py
similarity index 100%
rename from test/units/TestUtilsStringFunctions.py
rename to v1/tests/TestUtilsStringFunctions.py
diff --git a/test/units/TestVault.py b/v1/tests/TestVault.py
similarity index 100%
rename from test/units/TestVault.py
rename to v1/tests/TestVault.py
diff --git a/test/units/TestVaultEditor.py b/v1/tests/TestVaultEditor.py
similarity index 100%
rename from test/units/TestVaultEditor.py
rename to v1/tests/TestVaultEditor.py
diff --git a/test/units/ansible.cfg b/v1/tests/ansible.cfg
similarity index 100%
rename from test/units/ansible.cfg
rename to v1/tests/ansible.cfg
diff --git a/test/units/inventory_test_data/ansible_hosts b/v1/tests/inventory_test_data/ansible_hosts
similarity index 100%
rename from test/units/inventory_test_data/ansible_hosts
rename to v1/tests/inventory_test_data/ansible_hosts
diff --git a/test/units/inventory_test_data/broken.yml b/v1/tests/inventory_test_data/broken.yml
similarity index 100%
rename from test/units/inventory_test_data/broken.yml
rename to v1/tests/inventory_test_data/broken.yml
diff --git a/test/units/inventory_test_data/common_vars.yml b/v1/tests/inventory_test_data/common_vars.yml
similarity index 100%
rename from test/units/inventory_test_data/common_vars.yml
rename to v1/tests/inventory_test_data/common_vars.yml
diff --git a/test/units/inventory_test_data/complex_hosts b/v1/tests/inventory_test_data/complex_hosts
similarity index 100%
rename from test/units/inventory_test_data/complex_hosts
rename to v1/tests/inventory_test_data/complex_hosts
diff --git a/test/units/inventory_test_data/encrypted.yml b/v1/tests/inventory_test_data/encrypted.yml
similarity index 100%
rename from test/units/inventory_test_data/encrypted.yml
rename to v1/tests/inventory_test_data/encrypted.yml
diff --git a/test/units/inventory_test_data/hosts_list.yml b/v1/tests/inventory_test_data/hosts_list.yml
similarity index 100%
rename from test/units/inventory_test_data/hosts_list.yml
rename to v1/tests/inventory_test_data/hosts_list.yml
diff --git a/test/units/inventory_test_data/inventory/test_alpha_end_before_beg b/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_alpha_end_before_beg
rename to v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
diff --git a/test/units/inventory_test_data/inventory/test_combined_range b/v1/tests/inventory_test_data/inventory/test_combined_range
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_combined_range
rename to v1/tests/inventory_test_data/inventory/test_combined_range
diff --git a/test/units/inventory_test_data/inventory/test_incorrect_format b/v1/tests/inventory_test_data/inventory/test_incorrect_format
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_incorrect_format
rename to v1/tests/inventory_test_data/inventory/test_incorrect_format
diff --git a/test/units/inventory_test_data/inventory/test_incorrect_range b/v1/tests/inventory_test_data/inventory/test_incorrect_range
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_incorrect_range
rename to v1/tests/inventory_test_data/inventory/test_incorrect_range
diff --git a/test/units/inventory_test_data/inventory/test_leading_range b/v1/tests/inventory_test_data/inventory/test_leading_range
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_leading_range
rename to v1/tests/inventory_test_data/inventory/test_leading_range
diff --git a/test/units/inventory_test_data/inventory/test_missing_end b/v1/tests/inventory_test_data/inventory/test_missing_end
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_missing_end
rename to v1/tests/inventory_test_data/inventory/test_missing_end
diff --git a/test/units/inventory_test_data/inventory_api.py b/v1/tests/inventory_test_data/inventory_api.py
similarity index 100%
rename from test/units/inventory_test_data/inventory_api.py
rename to v1/tests/inventory_test_data/inventory_api.py
diff --git a/test/units/inventory_test_data/inventory_dir/0hosts b/v1/tests/inventory_test_data/inventory_dir/0hosts
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/0hosts
rename to v1/tests/inventory_test_data/inventory_dir/0hosts
diff --git a/test/units/inventory_test_data/inventory_dir/1mythology b/v1/tests/inventory_test_data/inventory_dir/1mythology
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/1mythology
rename to v1/tests/inventory_test_data/inventory_dir/1mythology
diff --git a/test/units/inventory_test_data/inventory_dir/2levels b/v1/tests/inventory_test_data/inventory_dir/2levels
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/2levels
rename to v1/tests/inventory_test_data/inventory_dir/2levels
diff --git a/test/units/inventory_test_data/inventory_dir/3comments b/v1/tests/inventory_test_data/inventory_dir/3comments
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/3comments
rename to v1/tests/inventory_test_data/inventory_dir/3comments
diff --git a/test/units/inventory_test_data/inventory_dir/4skip_extensions.ini b/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/4skip_extensions.ini
rename to v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
diff --git a/test/units/inventory_test_data/large_range b/v1/tests/inventory_test_data/large_range
similarity index 100%
rename from test/units/inventory_test_data/large_range
rename to v1/tests/inventory_test_data/large_range
diff --git a/test/units/inventory_test_data/restrict_pattern b/v1/tests/inventory_test_data/restrict_pattern
similarity index 100%
rename from test/units/inventory_test_data/restrict_pattern
rename to v1/tests/inventory_test_data/restrict_pattern
diff --git a/test/units/inventory_test_data/simple_hosts b/v1/tests/inventory_test_data/simple_hosts
similarity index 100%
rename from test/units/inventory_test_data/simple_hosts
rename to v1/tests/inventory_test_data/simple_hosts
diff --git a/test/units/module_tests/TestApt.py b/v1/tests/module_tests/TestApt.py
similarity index 100%
rename from test/units/module_tests/TestApt.py
rename to v1/tests/module_tests/TestApt.py
diff --git a/test/units/module_tests/TestDocker.py b/v1/tests/module_tests/TestDocker.py
similarity index 100%
rename from test/units/module_tests/TestDocker.py
rename to v1/tests/module_tests/TestDocker.py
diff --git a/test/units/vault_test_data/foo-ansible-1.0.yml b/v1/tests/vault_test_data/foo-ansible-1.0.yml
similarity index 100%
rename from test/units/vault_test_data/foo-ansible-1.0.yml
rename to v1/tests/vault_test_data/foo-ansible-1.0.yml
diff --git a/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml b/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
similarity index 100%
rename from test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
rename to v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
diff --git a/test/units/vault_test_data/foo-ansible-1.1.yml b/v1/tests/vault_test_data/foo-ansible-1.1.yml
similarity index 100%
rename from test/units/vault_test_data/foo-ansible-1.1.yml
rename to v1/tests/vault_test_data/foo-ansible-1.1.yml
diff --git a/v2/README-tests.md b/v2/README-tests.md
deleted file mode 100644
index 956160b653a..00000000000
--- a/v2/README-tests.md
+++ /dev/null
@@ -1,33 +0,0 @@
-Ansible Test System
-===================
-
-Folders
-=======
-
-test
-----
-
-Unit tests that test small pieces of code not suited for the integration test
-layer, usually very API based, and should leverage mock interfaces rather than
-producing side effects.
-
-Playbook engine code is better suited for integration tests.
-
-Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib unittest2 mock
-
-integration
------------
-
-Integration test layer, constructed using playbooks.
-
-Some tests may require cloud credentials, others will not, and destructive
-tests are separated from non-destructive so a subset can be run on development
-machines.
-
-learn more
-----------
-
-hop into a subdirectory and see the associated README.md for more info.
-
-
-
diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py
deleted file mode 100644
index 1c168a8e264..00000000000
--- a/v2/ansible/executor/connection_info.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pipes
-import random
-
-from ansible import constants as C
-from ansible.template import Templar
-from ansible.utils.boolean import boolean
-from ansible.errors import AnsibleError
-
-__all__ = ['ConnectionInformation']
-
-
-class ConnectionInformation:
-
- '''
- This class is used to consolidate the connection information for
- hosts in a play and child tasks, where the task may override some
- connection/authentication information.
- '''
-
- def __init__(self, play=None, options=None, passwords=None):
-
- if passwords is None:
- passwords = {}
-
- # connection
- self.connection = None
- self.remote_addr = None
- self.remote_user = None
- self.password = passwords.get('conn_pass','')
- self.port = None
- self.private_key_file = C.DEFAULT_PRIVATE_KEY_FILE
- self.timeout = C.DEFAULT_TIMEOUT
-
- # privilege escalation
- self.become = None
- self.become_method = None
- self.become_user = None
- self.become_pass = passwords.get('become_pass','')
-
- # general flags (should we move out?)
- self.verbosity = 0
- self.only_tags = set()
- self.skip_tags = set()
- self.no_log = False
- self.check_mode = False
-
- #TODO: just pull options setup to above?
- # set options before play to allow play to override them
- if options:
- self.set_options(options)
-
- if play:
- self.set_play(play)
-
- def __repr__(self):
- value = "CONNECTION INFO:\n"
- fields = self._get_fields()
- fields.sort()
- for field in fields:
- value += "%20s : %s\n" % (field, getattr(self, field))
- return value
-
- def set_play(self, play):
- '''
- Configures this connection information instance with data from
- the play class.
- '''
-
- if play.connection:
- self.connection = play.connection
-
- if play.remote_user:
- self.remote_user = play.remote_user
-
- if play.port:
- self.port = int(play.port)
-
- if play.become is not None:
- self.become = play.become
- if play.become_method:
- self.become_method = play.become_method
- if play.become_user:
- self.become_user = play.become_user
- self.become_pass = play.become_pass
-
- # non connection related
- self.no_log = play.no_log
- self.environment = play.environment
-
- def set_options(self, options):
- '''
- Configures this connection information instance with data from
- options specified by the user on the command line. These have a
- higher precedence than those set on the play or host.
- '''
-
- if options.connection:
- self.connection = options.connection
-
- self.remote_user = options.remote_user
- self.private_key_file = options.private_key_file
-
- # privilege escalation
- self.become = options.become
- self.become_method = options.become_method
- self.become_user = options.become_user
- self.become_pass = ''
-
- # general flags (should we move out?)
- if options.verbosity:
- self.verbosity = options.verbosity
- #if options.no_log:
- # self.no_log = boolean(options.no_log)
- if options.check:
- self.check_mode = boolean(options.check)
-
- # get the tag info from options, converting a comma-separated list
- # of values into a proper list if need be. We check to see if the
- # options have the attribute, as it is not always added via the CLI
- if hasattr(options, 'tags'):
- if isinstance(options.tags, list):
- self.only_tags.update(options.tags)
- elif isinstance(options.tags, basestring):
- self.only_tags.update(options.tags.split(','))
-
- if len(self.only_tags) == 0:
- self.only_tags = set(['all'])
-
- if hasattr(options, 'skip_tags'):
- if isinstance(options.skip_tags, list):
- self.skip_tags.update(options.skip_tags)
- elif isinstance(options.skip_tags, basestring):
- self.skip_tags.update(options.skip_tags.split(','))
-
- def copy(self, ci):
- '''
- Copies the connection info from another connection info object, used
- when merging in data from task overrides.
- '''
-
- for field in self._get_fields():
- value = getattr(ci, field, None)
- if isinstance(value, dict):
- setattr(self, field, value.copy())
- elif isinstance(value, set):
- setattr(self, field, value.copy())
- elif isinstance(value, list):
- setattr(self, field, value[:])
- else:
- setattr(self, field, value)
-
- def set_task_override(self, task):
- '''
- Sets attributes from the task if they are set, which will override
- those from the play.
- '''
-
- new_info = ConnectionInformation()
- new_info.copy(self)
-
- for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'):
- if hasattr(task, attr):
- attr_val = getattr(task, attr)
- if attr_val:
- setattr(new_info, attr, attr_val)
-
- return new_info
-
- def make_become_cmd(self, cmd, executable, become_settings=None):
-
- """
- helper function to create privilege escalation commands
- """
-
- # FIXME: become settings should probably be stored in the connection info itself
- if become_settings is None:
- become_settings = {}
-
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- success_key = 'BECOME-SUCCESS-%s' % randbits
- prompt = None
- becomecmd = None
-
- executable = executable or '$SHELL'
-
- success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
- if self.become:
- if self.become_method == 'sudo':
- # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
- # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
- # string to the user's shell. We loop reading output until we see the randomly-generated
- # sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
- flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd)
-
- elif self.become_method == 'su':
- exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
- flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS)
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
-
- elif self.become_method == 'pbrun':
- exe = become_settings.get('pbrun_exe', 'pbrun')
- flags = become_settings.get('pbrun_flags', '')
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, success_cmd)
-
- elif self.become_method == 'pfexec':
- exe = become_settings.get('pfexec_exe', 'pbrun')
- flags = become_settings.get('pfexec_flags', '')
- # No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
-
- else:
- raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
-
- return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key)
-
- return (cmd, "", "")
-
- def check_become_success(self, output, become_settings):
- #TODO: implement
- pass
-
- def _get_fields(self):
- return [i for i in self.__dict__.keys() if i[:1] != '_']
-
- def post_validate(self, templar):
- '''
- Finalizes templated values which may be set on this objects fields.
- '''
-
- for field in self._get_fields():
- value = templar.template(getattr(self, field))
- setattr(self, field, value)
-
- def update_vars(self, variables):
- '''
- Adds 'magic' variables relating to connections to the variable dictionary provided.
- '''
-
- variables['ansible_connection'] = self.connection
- variables['ansible_ssh_host'] = self.remote_addr
- variables['ansible_ssh_pass'] = self.password
- variables['ansible_ssh_port'] = self.port
- variables['ansible_ssh_user'] = self.remote_user
- variables['ansible_ssh_private_key_file'] = self.private_key_file
diff --git a/v2/ansible/inventory/host.py b/v2/ansible/inventory/host.py
deleted file mode 100644
index 29d6afd9912..00000000000
--- a/v2/ansible/inventory/host.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible import constants as C
-from ansible.inventory.group import Group
-from ansible.utils.vars import combine_vars
-
-__all__ = ['Host']
-
-class Host:
- ''' a single ansible host '''
-
- #__slots__ = [ 'name', 'vars', 'groups' ]
-
- def __getstate__(self):
- return self.serialize()
-
- def __setstate__(self, data):
- return self.deserialize(data)
-
- def __eq__(self, other):
- return self.name == other.name
-
- def serialize(self):
- groups = []
- for group in self.groups:
- groups.append(group.serialize())
-
- return dict(
- name=self.name,
- vars=self.vars.copy(),
- ipv4_address=self.ipv4_address,
- ipv6_address=self.ipv6_address,
- port=self.port,
- gathered_facts=self._gathered_facts,
- groups=groups,
- )
-
- def deserialize(self, data):
- self.__init__()
-
- self.name = data.get('name')
- self.vars = data.get('vars', dict())
- self.ipv4_address = data.get('ipv4_address', '')
- self.ipv6_address = data.get('ipv6_address', '')
- self.port = data.get('port')
-
- groups = data.get('groups', [])
- for group_data in groups:
- g = Group()
- g.deserialize(group_data)
- self.groups.append(g)
-
- def __init__(self, name=None, port=None):
-
- self.name = name
- self.vars = {}
- self.groups = []
-
- self.ipv4_address = name
- self.ipv6_address = name
-
- if port and port != C.DEFAULT_REMOTE_PORT:
- self.port = int(port)
- else:
- self.port = C.DEFAULT_REMOTE_PORT
-
- self._gathered_facts = False
-
- def __repr__(self):
- return self.get_name()
-
- def get_name(self):
- return self.name
-
- @property
- def gathered_facts(self):
- return self._gathered_facts
-
- def set_gathered_facts(self, gathered):
- self._gathered_facts = gathered
-
- def add_group(self, group):
-
- self.groups.append(group)
-
- def set_variable(self, key, value):
-
- self.vars[key]=value
-
- def get_groups(self):
-
- groups = {}
- for g in self.groups:
- groups[g.name] = g
- ancestors = g.get_ancestors()
- for a in ancestors:
- groups[a.name] = a
- return groups.values()
-
- def get_vars(self):
-
- results = {}
- groups = self.get_groups()
- for group in sorted(groups, key=lambda g: g.depth):
- results = combine_vars(results, group.get_vars())
- results = combine_vars(results, self.vars)
- results['inventory_hostname'] = self.name
- results['inventory_hostname_short'] = self.name.split('.')[0]
- results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
- return results
-
diff --git a/v2/ansible/module_utils/cloudstack.py b/v2/ansible/module_utils/cloudstack.py
deleted file mode 100644
index 2c891434bde..00000000000
--- a/v2/ansible/module_utils/cloudstack.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2015, René Moser
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-try:
- from cs import CloudStack, CloudStackException, read_config
- has_lib_cs = True
-except ImportError:
- has_lib_cs = False
-
-
-class AnsibleCloudStack:
-
- def __init__(self, module):
- if not has_lib_cs:
- module.fail_json(msg="python library cs required: pip install cs")
-
- self.module = module
- self._connect()
-
- self.project_id = None
- self.ip_address_id = None
- self.zone_id = None
- self.vm_id = None
- self.os_type_id = None
- self.hypervisor = None
-
-
- def _connect(self):
- api_key = self.module.params.get('api_key')
- api_secret = self.module.params.get('secret_key')
- api_url = self.module.params.get('api_url')
- api_http_method = self.module.params.get('api_http_method')
-
- if api_key and api_secret and api_url:
- self.cs = CloudStack(
- endpoint=api_url,
- key=api_key,
- secret=api_secret,
- method=api_http_method
- )
- else:
- self.cs = CloudStack(**read_config())
-
-
- def get_project_id(self):
- if self.project_id:
- return self.project_id
-
- project = self.module.params.get('project')
- if not project:
- return None
-
- projects = self.cs.listProjects()
- if projects:
- for p in projects['project']:
- if project in [ p['name'], p['displaytext'], p['id'] ]:
- self.project_id = p['id']
- return self.project_id
- self.module.fail_json(msg="project '%s' not found" % project)
-
-
- def get_ip_address_id(self):
- if self.ip_address_id:
- return self.ip_address_id
-
- ip_address = self.module.params.get('ip_address')
- if not ip_address:
- self.module.fail_json(msg="IP address param 'ip_address' is required")
-
- args = {}
- args['ipaddress'] = ip_address
- args['projectid'] = self.get_project_id()
- ip_addresses = self.cs.listPublicIpAddresses(**args)
-
- if not ip_addresses:
- self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
-
- self.ip_address_id = ip_addresses['publicipaddress'][0]['id']
- return self.ip_address_id
-
-
- def get_vm_id(self):
- if self.vm_id:
- return self.vm_id
-
- vm = self.module.params.get('vm')
- if not vm:
- self.module.fail_json(msg="Virtual machine param 'vm' is required")
-
- args = {}
- args['projectid'] = self.get_project_id()
- vms = self.cs.listVirtualMachines(**args)
- if vms:
- for v in vms['virtualmachine']:
- if vm in [ v['displayname'], v['name'], v['id'] ]:
- self.vm_id = v['id']
- return self.vm_id
- self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
-
-
- def get_zone_id(self):
- if self.zone_id:
- return self.zone_id
-
- zone = self.module.params.get('zone')
- zones = self.cs.listZones()
-
- # use the first zone if no zone param given
- if not zone:
- self.zone_id = zones['zone'][0]['id']
- return self.zone_id
-
- if zones:
- for z in zones['zone']:
- if zone in [ z['name'], z['id'] ]:
- self.zone_id = z['id']
- return self.zone_id
- self.module.fail_json(msg="zone '%s' not found" % zone)
-
-
- def get_os_type_id(self):
- if self.os_type_id:
- return self.os_type_id
-
- os_type = self.module.params.get('os_type')
- if not os_type:
- return None
-
- os_types = self.cs.listOsTypes()
- if os_types:
- for o in os_types['ostype']:
- if os_type in [ o['description'], o['id'] ]:
- self.os_type_id = o['id']
- return self.os_type_id
- self.module.fail_json(msg="OS type '%s' not found" % os_type)
-
-
- def get_hypervisor(self):
- if self.hypervisor:
- return self.hypervisor
-
- hypervisor = self.module.params.get('hypervisor')
- hypervisors = self.cs.listHypervisors()
-
- # use the first hypervisor if no hypervisor param given
- if not hypervisor:
- self.hypervisor = hypervisors['hypervisor'][0]['name']
- return self.hypervisor
-
- for h in hypervisors['hypervisor']:
- if hypervisor.lower() == h['name'].lower():
- self.hypervisor = h['name']
- return self.hypervisor
- self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
-
-
- def _poll_job(self, job=None, key=None):
- if 'jobid' in job:
- while True:
- res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
- if res['jobstatus'] != 0 and 'jobresult' in res:
- if 'errortext' in res['jobresult']:
- self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
- if key and key in res['jobresult']:
- job = res['jobresult'][key]
- break
- time.sleep(2)
- return job
diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core
deleted file mode 160000
index 85c8a892c80..00000000000
--- a/v2/ansible/modules/core
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 85c8a892c80b92730831d95fa654ef6d35b0eca0
diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras
deleted file mode 160000
index 70ea0585635..00000000000
--- a/v2/ansible/modules/extras
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 70ea05856356ad36f48b4bb7267d637efc56d292
diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py
deleted file mode 100644
index 40e6638f239..00000000000
--- a/v2/ansible/playbook/__init__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-
-from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.parsing import DataLoader
-from ansible.playbook.attribute import Attribute, FieldAttribute
-from ansible.playbook.play import Play
-from ansible.playbook.playbook_include import PlaybookInclude
-from ansible.plugins import push_basedir
-
-
-__all__ = ['Playbook']
-
-
-class Playbook:
-
- def __init__(self, loader):
- # Entries in the datastructure of a playbook may
- # be either a play or an include statement
- self._entries = []
- self._basedir = os.getcwd()
- self._loader = loader
-
- @staticmethod
- def load(file_name, variable_manager=None, loader=None):
- pb = Playbook(loader=loader)
- pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
- return pb
-
- def _load_playbook_data(self, file_name, variable_manager):
-
- if os.path.isabs(file_name):
- self._basedir = os.path.dirname(file_name)
- else:
- self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
-
- # set the loaders basedir
- self._loader.set_basedir(self._basedir)
-
- # also add the basedir to the list of module directories
- push_basedir(self._basedir)
-
- ds = self._loader.load_from_file(os.path.basename(file_name))
- if not isinstance(ds, list):
- raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
-
- # Parse the playbook entries. For plays, we simply parse them
- # using the Play() object, and includes are parsed using the
- # PlaybookInclude() object
- for entry in ds:
- if not isinstance(entry, dict):
- raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
-
- if 'include' in entry:
- pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
- self._entries.extend(pb._entries)
- else:
- entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
- self._entries.append(entry_obj)
-
- def get_loader(self):
- return self._loader
-
- def get_plays(self):
- return self._entries[:]
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
deleted file mode 100644
index b99c01fdf74..00000000000
--- a/v2/ansible/playbook/play.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleError, AnsibleParserError
-
-from ansible.playbook.attribute import Attribute, FieldAttribute
-from ansible.playbook.base import Base
-from ansible.playbook.become import Become
-from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
-from ansible.playbook.role import Role
-from ansible.playbook.taggable import Taggable
-from ansible.playbook.block import Block
-
-from ansible.utils.vars import combine_vars
-
-
-__all__ = ['Play']
-
-
-class Play(Base, Taggable, Become):
-
- """
- A play is a language feature that represents a list of roles and/or
- task/handler blocks to execute on a given set of hosts.
-
- Usage:
-
- Play.load(datastructure) -> Play
- Play.something(...)
- """
-
- # =================================================================================
- # Connection-Related Attributes
-
- # TODO: generalize connection
- _accelerate = FieldAttribute(isa='bool', default=False)
- _accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
- _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
-
- # Connection
- _gather_facts = FieldAttribute(isa='string', default='smart')
- _hosts = FieldAttribute(isa='list', default=[], required=True)
- _name = FieldAttribute(isa='string', default='')
-
- # Variable Attributes
- _vars_files = FieldAttribute(isa='list', default=[])
- _vars_prompt = FieldAttribute(isa='dict', default=dict())
- _vault_password = FieldAttribute(isa='string')
-
- # Block (Task) Lists Attributes
- _handlers = FieldAttribute(isa='list', default=[])
- _pre_tasks = FieldAttribute(isa='list', default=[])
- _post_tasks = FieldAttribute(isa='list', default=[])
- _tasks = FieldAttribute(isa='list', default=[])
-
- # Role Attributes
- _roles = FieldAttribute(isa='list', default=[])
-
- # Flag/Setting Attributes
- _any_errors_fatal = FieldAttribute(isa='bool', default=False)
- _max_fail_percentage = FieldAttribute(isa='string', default='0')
- _serial = FieldAttribute(isa='int', default=0)
- _strategy = FieldAttribute(isa='string', default='linear')
-
- # =================================================================================
-
- def __init__(self):
- super(Play, self).__init__()
-
- def __repr__(self):
- return self.get_name()
-
- def get_name(self):
- ''' return the name of the Play '''
- return "PLAY: %s" % self._attributes.get('name')
-
- @staticmethod
- def load(data, variable_manager=None, loader=None):
- p = Play()
- return p.load_data(data, variable_manager=variable_manager, loader=loader)
-
- def preprocess_data(self, ds):
- '''
- Adjusts play datastructure to cleanup old/legacy items
- '''
-
- assert isinstance(ds, dict)
-
- # The use of 'user' in the Play datastructure was deprecated to
- # line up with the same change for Tasks, due to the fact that
- # 'user' conflicted with the user module.
- if 'user' in ds:
- # this should never happen, but error out with a helpful message
- # to the user if it does...
- if 'remote_user' in ds:
- raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
-
- ds['remote_user'] = ds['user']
- del ds['user']
-
- return super(Play, self).preprocess_data(ds)
-
- def _load_vars(self, attr, ds):
- '''
- Vars in a play can be specified either as a dictionary directly, or
- as a list of dictionaries. If the later, this method will turn the
- list into a single dictionary.
- '''
-
- try:
- if isinstance(ds, dict):
- return ds
- elif isinstance(ds, list):
- all_vars = dict()
- for item in ds:
- if not isinstance(item, dict):
- raise ValueError
- all_vars = combine_vars(all_vars, item)
- return all_vars
- else:
- raise ValueError
- except ValueError:
- raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds)
-
- def _load_tasks(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed tasks/blocks.
- Bare tasks outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_pre_tasks(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed tasks/blocks.
- Bare tasks outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_post_tasks(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed tasks/blocks.
- Bare tasks outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_handlers(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed handlers/blocks.
- Bare handlers outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_roles(self, attr, ds):
- '''
- Loads and returns a list of RoleInclude objects from the datastructure
- list of role definitions and creates the Role from those objects
- '''
-
- role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader)
-
- roles = []
- for ri in role_includes:
- roles.append(Role.load(ri))
- return roles
-
- # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
-
- def _compile_roles(self):
- '''
- Handles the role compilation step, returning a flat list of tasks
- with the lowest level dependencies first. For example, if a role R
- has a dependency D1, which also has a dependency D2, the tasks from
- D2 are merged first, followed by D1, and lastly by the tasks from
- the parent role R last. This is done for all roles in the Play.
- '''
-
- block_list = []
-
- if len(self.roles) > 0:
- for r in self.roles:
- block_list.extend(r.compile(play=self))
-
- return block_list
-
- def compile(self):
- '''
- Compiles and returns the task list for this play, compiled from the
- roles (which are themselves compiled recursively) and/or the list of
- tasks specified in the play.
- '''
-
- block_list = []
-
- block_list.extend(self.pre_tasks)
- block_list.extend(self._compile_roles())
- block_list.extend(self.tasks)
- block_list.extend(self.post_tasks)
-
- return block_list
-
- def get_vars(self):
- return self.vars.copy()
-
- def get_vars_files(self):
- return self.vars_files
-
- def get_handlers(self):
- return self.handlers[:]
-
- def get_roles(self):
- return self.roles[:]
-
- def get_tasks(self):
- tasklist = []
- for task in self.pre_tasks + self.tasks + self.post_tasks:
- if isinstance(task, Block):
- tasklist.append(task.block + task.rescue + task.always)
- else:
- tasklist.append(task)
- return tasklist
-
- def serialize(self):
- data = super(Play, self).serialize()
-
- roles = []
- for role in self.get_roles():
- roles.append(role.serialize())
- data['roles'] = roles
-
- return data
-
- def deserialize(self, data):
- super(Play, self).deserialize(data)
-
- if 'roles' in data:
- role_data = data.get('roles', [])
- roles = []
- for role in role_data:
- r = Role()
- r.deserialize(role)
- roles.append(r)
-
- setattr(self, 'roles', roles)
- del data['roles']
-
diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py
deleted file mode 100644
index 06060257985..00000000000
--- a/v2/ansible/playbook/task.py
+++ /dev/null
@@ -1,310 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleError
-
-from ansible.parsing.mod_args import ModuleArgsParser
-from ansible.parsing.splitter import parse_kv
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
-
-from ansible.plugins import module_loader, lookup_loader
-
-from ansible.playbook.attribute import Attribute, FieldAttribute
-from ansible.playbook.base import Base
-from ansible.playbook.become import Become
-from ansible.playbook.block import Block
-from ansible.playbook.conditional import Conditional
-from ansible.playbook.role import Role
-from ansible.playbook.taggable import Taggable
-
-__all__ = ['Task']
-
-class Task(Base, Conditional, Taggable, Become):
-
- """
- A task is a language feature that represents a call to a module, with given arguments and other parameters.
- A handler is a subclass of a task.
-
- Usage:
-
- Task.load(datastructure) -> Task
- Task.something(...)
- """
-
- # =================================================================================
- # ATTRIBUTES
- # load_ and
- # validate_
- # will be used if defined
- # might be possible to define others
-
- _args = FieldAttribute(isa='dict', default=dict())
- _action = FieldAttribute(isa='string')
-
- _always_run = FieldAttribute(isa='bool')
- _any_errors_fatal = FieldAttribute(isa='bool')
- _async = FieldAttribute(isa='int', default=0)
- _changed_when = FieldAttribute(isa='string')
- _delay = FieldAttribute(isa='int', default=5)
- _delegate_to = FieldAttribute(isa='string')
- _failed_when = FieldAttribute(isa='string')
- _first_available_file = FieldAttribute(isa='list')
- _ignore_errors = FieldAttribute(isa='bool')
-
- _loop = FieldAttribute(isa='string', private=True)
- _loop_args = FieldAttribute(isa='list', private=True)
- _local_action = FieldAttribute(isa='string')
-
- # FIXME: this should not be a Task
- _meta = FieldAttribute(isa='string')
-
- _name = FieldAttribute(isa='string', default='')
-
- _notify = FieldAttribute(isa='list')
- _poll = FieldAttribute(isa='int')
- _register = FieldAttribute(isa='string')
- _retries = FieldAttribute(isa='int', default=1)
- _run_once = FieldAttribute(isa='bool')
- _until = FieldAttribute(isa='list') # ?
-
- def __init__(self, block=None, role=None, task_include=None):
- ''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
-
- self._block = block
- self._role = role
- self._task_include = task_include
-
- super(Task, self).__init__()
-
- def get_name(self):
- ''' return the name of the task '''
-
- if self._role and self.name:
- return "%s : %s" % (self._role.get_name(), self.name)
- elif self.name:
- return self.name
- else:
- flattened_args = self._merge_kv(self.args)
- if self._role:
- return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
- else:
- return "%s %s" % (self.action, flattened_args)
-
- def _merge_kv(self, ds):
- if ds is None:
- return ""
- elif isinstance(ds, basestring):
- return ds
- elif isinstance(ds, dict):
- buf = ""
- for (k,v) in ds.iteritems():
- if k.startswith('_'):
- continue
- buf = buf + "%s=%s " % (k,v)
- buf = buf.strip()
- return buf
-
- @staticmethod
- def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
- t = Task(block=block, role=role, task_include=task_include)
- return t.load_data(data, variable_manager=variable_manager, loader=loader)
-
- def __repr__(self):
- ''' returns a human readable representation of the task '''
- return "TASK: %s" % self.get_name()
-
- def _preprocess_loop(self, ds, new_ds, k, v):
- ''' take a lookup plugin name and store it correctly '''
-
- loop_name = k.replace("with_", "")
- if new_ds.get('loop') is not None:
- raise AnsibleError("duplicate loop in task: %s" % loop_name)
- new_ds['loop'] = loop_name
- new_ds['loop_args'] = v
-
- def preprocess_data(self, ds):
- '''
- tasks are especially complex arguments so need pre-processing.
- keep it short.
- '''
-
- assert isinstance(ds, dict)
-
- # the new, cleaned datastructure, which will have legacy
- # items reduced to a standard structure suitable for the
- # attributes of the task class
- new_ds = AnsibleMapping()
- if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.ansible_pos = ds.ansible_pos
-
- # use the args parsing class to determine the action, args,
- # and the delegate_to value from the various possible forms
- # supported as legacy
- args_parser = ModuleArgsParser(task_ds=ds)
- (action, args, delegate_to) = args_parser.parse()
-
- new_ds['action'] = action
- new_ds['args'] = args
- new_ds['delegate_to'] = delegate_to
-
- for (k,v) in ds.iteritems():
- if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
- # we don't want to re-assign these values, which were
- # determined by the ModuleArgsParser() above
- continue
- elif k.replace("with_", "") in lookup_loader:
- self._preprocess_loop(ds, new_ds, k, v)
- else:
- new_ds[k] = v
-
- return super(Task, self).preprocess_data(new_ds)
-
- def post_validate(self, templar):
- '''
- Override of base class post_validate, to also do final validation on
- the block and task include (if any) to which this task belongs.
- '''
-
- if self._block:
- self._block.post_validate(templar)
- if self._task_include:
- self._task_include.post_validate(templar)
-
- super(Task, self).post_validate(templar)
-
- def get_vars(self):
- all_vars = self.vars.copy()
- if self._block:
- all_vars.update(self._block.get_vars())
- if self._task_include:
- all_vars.update(self._task_include.get_vars())
-
- all_vars.update(self.serialize())
-
- if 'tags' in all_vars:
- del all_vars['tags']
- if 'when' in all_vars:
- del all_vars['when']
- return all_vars
-
- def copy(self, exclude_block=False):
- new_me = super(Task, self).copy()
-
- new_me._block = None
- if self._block and not exclude_block:
- new_me._block = self._block.copy()
-
- new_me._role = None
- if self._role:
- new_me._role = self._role
-
- new_me._task_include = None
- if self._task_include:
- new_me._task_include = self._task_include.copy()
-
- return new_me
-
- def serialize(self):
- data = super(Task, self).serialize()
-
- if self._block:
- data['block'] = self._block.serialize()
-
- if self._role:
- data['role'] = self._role.serialize()
-
- if self._task_include:
- data['task_include'] = self._task_include.serialize()
-
- return data
-
- def deserialize(self, data):
-
- # import is here to avoid import loops
- #from ansible.playbook.task_include import TaskInclude
-
- block_data = data.get('block')
-
- if block_data:
- b = Block()
- b.deserialize(block_data)
- self._block = b
- del data['block']
-
- role_data = data.get('role')
- if role_data:
- r = Role()
- r.deserialize(role_data)
- self._role = r
- del data['role']
-
- ti_data = data.get('task_include')
- if ti_data:
- #ti = TaskInclude()
- ti = Task()
- ti.deserialize(ti_data)
- self._task_include = ti
- del data['task_include']
-
- super(Task, self).deserialize(data)
-
- def evaluate_conditional(self, all_vars):
- if self._block is not None:
- if not self._block.evaluate_conditional(all_vars):
- return False
- if self._task_include is not None:
- if not self._task_include.evaluate_conditional(all_vars):
- return False
- return super(Task, self).evaluate_conditional(all_vars)
-
- def set_loader(self, loader):
- '''
- Sets the loader on this object and recursively on parent, child objects.
- This is used primarily after the Task has been serialized/deserialized, which
- does not preserve the loader.
- '''
-
- self._loader = loader
-
- if self._block:
- self._block.set_loader(loader)
- if self._task_include:
- self._task_include.set_loader(loader)
-
- def _get_parent_attribute(self, attr, extend=False):
- '''
- Generic logic to get the attribute or parent attribute for a task value.
- '''
- value = self._attributes[attr]
- if self._block and (not value or extend):
- parent_value = getattr(self._block, attr)
- if extend:
- value = self._extend_value(value, parent_value)
- else:
- value = parent_value
- if self._task_include and (not value or extend):
- parent_value = getattr(self._task_include, attr)
- if extend:
- value = self._extend_value(value, parent_value)
- else:
- value = parent_value
- return value
-
diff --git a/v2/ansible/plugins/action/synchronize.py b/v2/ansible/plugins/action/synchronize.py
deleted file mode 100644
index 1bc64ff4d5b..00000000000
--- a/v2/ansible/plugins/action/synchronize.py
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2012-2013, Timothy Appnel
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os.path
-
-from ansible.plugins.action import ActionBase
-from ansible.utils.boolean import boolean
-
-class ActionModule(ActionBase):
-
- def _get_absolute_path(self, path):
- if self._task._role is not None:
- original_path = path
- path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
- if original_path and original_path[-1] == '/' and path[-1] != '/':
- # make sure the dwim'd path ends in a trailing "/"
- # if the original path did
- path += '/'
-
- return path
-
- def _process_origin(self, host, path, user):
-
- if not host in ['127.0.0.1', 'localhost']:
- if user:
- return '%s@%s:%s' % (user, host, path)
- else:
- return '%s:%s' % (host, path)
- else:
- if not ':' in path:
- if not path.startswith('/'):
- path = self._get_absolute_path(path=path)
- return path
-
- def _process_remote(self, host, task, path, user):
- transport = self._connection_info.connection
- return_data = None
- if not host in ['127.0.0.1', 'localhost'] or transport != "local":
- if user:
- return_data = '%s@%s:%s' % (user, host, path)
- else:
- return_data = '%s:%s' % (host, path)
- else:
- return_data = path
-
- if not ':' in return_data:
- if not return_data.startswith('/'):
- return_data = self._get_absolute_path(path=return_data)
-
- return return_data
-
- def run(self, tmp=None, task_vars=dict()):
- ''' generates params and passes them on to the rsync module '''
-
- original_transport = task_vars.get('ansible_connection', self._connection_info.connection)
- transport_overridden = False
- if task_vars.get('delegate_to') is None:
- task_vars['delegate_to'] = '127.0.0.1'
- # IF original transport is not local, override transport and disable sudo.
- if original_transport != 'local':
- task_vars['ansible_connection'] = 'local'
- transport_overridden = True
- self.runner.sudo = False
-
- src = self._task.args.get('src', None)
- dest = self._task.args.get('dest', None)
-
- # FIXME: this doesn't appear to be used anywhere?
- local_rsync_path = task_vars.get('ansible_rsync_path')
-
- # from the perspective of the rsync call the delegate is the localhost
- src_host = '127.0.0.1'
- dest_host = task_vars.get('ansible_ssh_host', task_vars.get('inventory_hostname'))
-
- # allow ansible_ssh_host to be templated
- dest_is_local = dest_host in ['127.0.0.1', 'localhost']
-
- # CHECK FOR NON-DEFAULT SSH PORT
- dest_port = self._task.args.get('dest_port')
- inv_port = task_vars.get('ansible_ssh_port', task_vars.get('inventory_hostname'))
- if inv_port != dest_port and inv_port != task_vars.get('inventory_hostname'):
- dest_port = inv_port
-
- # edge case: explicit delegate and dest_host are the same
- if dest_host == task_vars.get('delegate_to'):
- dest_host = '127.0.0.1'
-
- # SWITCH SRC AND DEST PER MODE
- if self._task.args.get('mode', 'push') == 'pull':
- (dest_host, src_host) = (src_host, dest_host)
-
- # CHECK DELEGATE HOST INFO
- use_delegate = False
- # FIXME: not sure if this is in connection info yet or not...
- #if conn.delegate != conn.host:
- # if 'hostvars' in task_vars:
- # if conn.delegate in task_vars['hostvars'] and original_transport != 'local':
- # # use a delegate host instead of localhost
- # use_delegate = True
-
- # COMPARE DELEGATE, HOST AND TRANSPORT
- process_args = False
- if not dest_host is src_host and original_transport != 'local':
- # interpret and task_vars remote host info into src or dest
- process_args = True
-
- # MUNGE SRC AND DEST PER REMOTE_HOST INFO
- if process_args or use_delegate:
-
- user = None
- if boolean(task_vars.get('set_remote_user', 'yes')):
- if use_delegate:
- user = task_vars['hostvars'][conn.delegate].get('ansible_ssh_user')
-
- if not use_delegate or not user:
- user = task_vars.get('ansible_ssh_user', self.runner.remote_user)
-
- if use_delegate:
- # FIXME
- private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file)
- else:
- private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file)
-
- if private_key is not None:
- private_key = os.path.expanduser(private_key)
-
- # use the mode to define src and dest's url
- if self._task.args.get('mode', 'push') == 'pull':
- # src is a remote path: @, dest is a local path
- src = self._process_remote(src_host, src, user)
- dest = self._process_origin(dest_host, dest, user)
- else:
- # src is a local path, dest is a remote path: @
- src = self._process_origin(src_host, src, user)
- dest = self._process_remote(dest_host, dest, user)
-
- # Allow custom rsync path argument.
- rsync_path = self._task.args.get('rsync_path', None)
-
- # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
- if not rsync_path and transport_overridden and self._connection_info.become and self._connection_info.become_method == 'sudo' and not dest_is_local:
- rsync_path = 'sudo rsync'
-
- # make sure rsync path is quoted.
- if rsync_path:
- self._task.args['rsync_path'] = '"%s"' % rsync_path
-
- # run the module and store the result
- result = self._execute_module('synchronize')
-
- return result
-
diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py
deleted file mode 100644
index a234ef2eee9..00000000000
--- a/v2/ansible/plugins/action/template.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# (c) 2015, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import base64
-import os
-
-from ansible.plugins.action import ActionBase
-from ansible.template import Templar
-from ansible.utils.hashing import checksum_s
-
-class ActionModule(ActionBase):
-
- TRANSFERS_FILES = True
-
- def get_checksum(self, tmp, dest, try_directory=False, source=None):
- remote_checksum = self._remote_checksum(tmp, dest)
-
- if remote_checksum in ('0', '2', '3', '4'):
- # Note: 1 means the file is not present which is fine; template
- # will create it. 3 means directory was specified instead of file
- if try_directory and remote_checksum == '3' and source:
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
- remote_checksum = self.get_checksum(tmp, dest, try_directory=False)
- if remote_checksum not in ('0', '2', '3', '4'):
- return remote_checksum
-
- result = dict(failed=True, msg="failed to checksum remote file."
- " Checksum error code: %s" % remote_checksum)
- return result
-
- return remote_checksum
-
- def run(self, tmp=None, task_vars=dict()):
- ''' handler for template operations '''
-
- source = self._task.args.get('src', None)
- dest = self._task.args.get('dest', None)
-
- if (source is None and 'first_available_file' not in task_vars) or dest is None:
- return dict(failed=True, msg="src and dest are required")
-
- if tmp is None:
- tmp = self._make_tmp_path()
-
- ##################################################################################################
- # FIXME: this all needs to be sorted out
- ##################################################################################################
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
- #if 'first_available_file' in task_vars:
- # found = False
- # for fn in task_vars.get('first_available_file'):
- # fn_orig = fn
- # fnt = template.template(self.runner.basedir, fn, task_vars)
- # fnd = utils.path_dwim(self.runner.basedir, fnt)
- # if not os.path.exists(fnd) and '_original_file' in task_vars:
- # fnd = utils.path_dwim_relative(task_vars['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
- # if os.path.exists(fnd):
- # source = fnd
- # found = True
- # break
- # if not found:
- # result = dict(failed=True, msg="could not find src in first_available_file list")
- # return ReturnData(conn=conn, comm_ok=False, result=result)
- #else:
- if 1:
- if self._task._role is not None:
- source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source)
- else:
- source = self._loader.path_dwim(source)
- ##################################################################################################
- # END FIXME
- ##################################################################################################
-
- # Expand any user home dir specification
- dest = self._remote_expand_user(dest, tmp)
-
- directory_prepended = False
- if dest.endswith(os.sep):
- directory_prepended = True
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
-
- # template the source data locally & get ready to transfer
- templar = Templar(loader=self._loader, variables=task_vars)
- try:
- with open(source, 'r') as f:
- template_data = f.read()
- resultant = templar.template(template_data, preserve_trailing_newlines=True)
- except Exception as e:
- return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
-
- local_checksum = checksum_s(resultant)
- remote_checksum = self.get_checksum(tmp, dest, not directory_prepended, source=source)
- if isinstance(remote_checksum, dict):
- # Error from remote_checksum is a dict. Valid return is a str
- return remote_checksum
-
- if local_checksum != remote_checksum:
- # if showing diffs, we need to get the remote value
- dest_contents = ''
-
- # FIXME: still need to implement diff mechanism
- #if self.runner.diff:
- # # using persist_files to keep the temp directory around to avoid needing to grab another
- # dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, task_vars=task_vars, persist_files=True)
- # if 'content' in dest_result.result:
- # dest_contents = dest_result.result['content']
- # if dest_result.result['encoding'] == 'base64':
- # dest_contents = base64.b64decode(dest_contents)
- # else:
- # raise Exception("unknown encoding, failed: %s" % dest_result.result)
-
- xfered = self._transfer_data(self._shell.join_path(tmp, 'source'), resultant)
-
- # fix file permissions when the copy is done as a different user
- if self._connection_info.become and self._connection_info.become_user != 'root':
- self._remote_chmod('a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = self._task.args.copy()
- new_module_args.update(
- dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(source),
- follow=True,
- ),
- )
-
- # FIXME: noop stuff needs to be sorted out
- #if self.runner.noop_on_check(task_vars):
- # return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
- #else:
- # res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, task_vars=task_vars, complex_args=complex_args)
- # if res.result.get('changed', False):
- # res.diff = dict(before=dest_contents, after=resultant)
- # return res
-
- result = self._execute_module(module_name='copy', module_args=new_module_args)
- if result.get('changed', False):
- result['diff'] = dict(before=dest_contents, after=resultant)
- return result
-
- else:
- # when running the file module based on the template data, we do
- # not want the source filename (the name of the template) to be used,
- # since this would mess up links, so we clear the src param and tell
- # the module to follow links. When doing that, we have to set
- # original_basename to the template just in case the dest is
- # a directory.
- new_module_args = self._task.args.copy()
- new_module_args.update(
- dict(
- src=None,
- original_basename=os.path.basename(source),
- follow=True,
- ),
- )
-
- # FIXME: this may not be required anymore, as the checkmod params
- # should be in the regular module args?
- # be sure to task_vars the check mode param into the module args and
- # rely on the file module to report its changed status
- #if self.runner.noop_on_check(task_vars):
- # new_module_args['CHECKMODE'] = True
-
- return self._execute_module(module_name='file', module_args=new_module_args)
-
diff --git a/v2/ansible/plugins/callback/__init__.py b/v2/ansible/plugins/callback/__init__.py
deleted file mode 100644
index 2c2e7e74c65..00000000000
--- a/v2/ansible/plugins/callback/__init__.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-#from ansible.utils.display import Display
-
-__all__ = ["CallbackBase"]
-
-class CallbackBase:
-
- '''
- This is a base ansible callback class that does nothing. New callbacks should
- use this class as a base and override any callback methods they wish to execute
- custom actions.
- '''
-
- # FIXME: the list of functions here needs to be updated once we have
- # finalized the list of callback methods used in the default callback
-
- def __init__(self, display):
- self._display = display
-
- def set_connection_info(self, conn_info):
- # FIXME: this is a temporary hack, as the connection info object
- # should be created early and passed down through objects
- self._display._verbosity = conn_info.verbosity
-
- def on_any(self, *args, **kwargs):
- pass
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- pass
-
- def runner_on_ok(self, host, res):
- pass
-
- def runner_on_skipped(self, host, item=None):
- pass
-
- def runner_on_unreachable(self, host, res):
- pass
-
- def runner_on_no_hosts(self):
- pass
-
- def runner_on_async_poll(self, host, res, jid, clock):
- pass
-
- def runner_on_async_ok(self, host, res, jid):
- pass
-
- def runner_on_async_failed(self, host, res, jid):
- pass
-
- def playbook_on_start(self):
- pass
-
- def playbook_on_notify(self, host, handler):
- pass
-
- def playbook_on_no_hosts_matched(self):
- pass
-
- def playbook_on_no_hosts_remaining(self):
- pass
-
- def playbook_on_task_start(self, name, is_conditional):
- pass
-
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def playbook_on_setup(self):
- pass
-
- def playbook_on_import_for_host(self, host, imported_file):
- pass
-
- def playbook_on_not_import_for_host(self, host, missing_file):
- pass
-
- def playbook_on_play_start(self, name):
- pass
-
- def playbook_on_stats(self, stats):
- pass
-
diff --git a/v2/ansible/plugins/callback/default.py b/v2/ansible/plugins/callback/default.py
deleted file mode 100644
index de6548ef188..00000000000
--- a/v2/ansible/plugins/callback/default.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.plugins.callback import CallbackBase
-
-class CallbackModule(CallbackBase):
-
- '''
- This is the default callback interface, which simply prints messages
- to stdout when new callback events are received.
- '''
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'stdout'
-
- def v2_on_any(self, *args, **kwargs):
- pass
-
- def v2_runner_on_failed(self, result, ignore_errors=False):
- if 'exception' in result._result and self._display.verbosity < 3:
- del result._result['exception']
- self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red')
-
- def v2_runner_on_ok(self, result):
-
- if result._task.action == 'include':
- msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
- color = 'cyan'
- elif result._result.get('changed', False):
- msg = "changed: [%s]" % result._host.get_name()
- color = 'yellow'
- else:
- msg = "ok: [%s]" % result._host.get_name()
- color = 'green'
-
- if (self._display._verbosity > 0 or 'verbose_always' in result._result) and result._task.action not in ('setup', 'include'):
- indent = None
- if 'verbose_always' in result._result:
- indent = 4
- del result._result['verbose_always']
- msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
- self._display.display(msg, color=color)
-
- def v2_runner_on_skipped(self, result):
- msg = "skipping: [%s]" % result._host.get_name()
- if self._display._verbosity > 0 or 'verbose_always' in result._result:
- indent = None
- if 'verbose_always' in result._result:
- indent = 4
- del result._result['verbose_always']
- msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
- self._display.display(msg, color='cyan')
-
- def v2_runner_on_unreachable(self, result):
- self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red')
-
- def v2_runner_on_no_hosts(self, task):
- pass
-
- def v2_runner_on_async_poll(self, result):
- pass
-
- def v2_runner_on_async_ok(self, result):
- pass
-
- def v2_runner_on_async_failed(self, result):
- pass
-
- def v2_runner_on_file_diff(self, result, diff):
- pass
-
- def v2_playbook_on_start(self):
- pass
-
- def v2_playbook_on_notify(self, result, handler):
- pass
-
- def v2_playbook_on_no_hosts_matched(self):
- self._display.display("skipping: no hosts matched", color='cyan')
-
- def v2_playbook_on_no_hosts_remaining(self):
- self._display.banner("NO MORE HOSTS LEFT")
-
- def v2_playbook_on_task_start(self, task, is_conditional):
- self._display.banner("TASK [%s]" % task.get_name().strip())
-
- def v2_playbook_on_cleanup_task_start(self, task):
- self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
-
- def v2_playbook_on_handler_task_start(self, task):
- self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
-
- def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def v2_playbook_on_setup(self):
- pass
-
- def v2_playbook_on_import_for_host(self, result, imported_file):
- pass
-
- def v2_playbook_on_not_import_for_host(self, result, missing_file):
- pass
-
- def v2_playbook_on_play_start(self, play):
- name = play.get_name().strip()
- if not name:
- msg = "PLAY"
- else:
- msg = "PLAY [%s]" % name
-
- self._display.banner(name)
-
- def v2_playbook_on_stats(self, stats):
- pass
-
diff --git a/v2/ansible/plugins/callback/minimal.py b/v2/ansible/plugins/callback/minimal.py
deleted file mode 100644
index c6b2282e62f..00000000000
--- a/v2/ansible/plugins/callback/minimal.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.plugins.callback import CallbackBase
-
-
-class CallbackModule(CallbackBase):
-
- '''
- This is the default callback interface, which simply prints messages
- to stdout when new callback events are received.
- '''
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'stdout'
-
- def v2_on_any(self, *args, **kwargs):
- pass
-
- def v2_runner_on_failed(self, result, ignore_errors=False):
- if 'exception' in result._result and self._display.verbosity < 3:
- del result._result['exception']
- self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red')
-
- def v2_runner_on_ok(self, result):
- self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), json.dumps(result._result, indent=4)), color='green')
-
- def v2_runner_on_skipped(self, result):
- pass
-
- def v2_runner_on_unreachable(self, result):
- self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
-
- def v2_runner_on_no_hosts(self, task):
- pass
-
- def v2_runner_on_async_poll(self, host, res, jid, clock):
- pass
-
- def v2_runner_on_async_ok(self, host, res, jid):
- pass
-
- def v2_runner_on_async_failed(self, host, res, jid):
- pass
-
- def v2_playbook_on_start(self):
- pass
-
- def v2_playbook_on_notify(self, host, handler):
- pass
-
- def v2_playbook_on_no_hosts_matched(self):
- pass
-
- def v2_playbook_on_no_hosts_remaining(self):
- pass
-
- def v2_playbook_on_task_start(self, task, is_conditional):
- pass
-
- def v2_playbook_on_cleanup_task_start(self, task):
- pass
-
- def v2_playbook_on_handler_task_start(self, task):
- pass
-
- def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def v2_playbook_on_setup(self):
- pass
-
- def v2_playbook_on_import_for_host(self, result, imported_file):
- pass
-
- def v2_playbook_on_not_import_for_host(self, result, missing_file):
- pass
-
- def v2_playbook_on_play_start(self, play):
- pass
-
- def v2_playbook_on_stats(self, stats):
- pass
-
diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py
deleted file mode 100644
index d11f3651827..00000000000
--- a/v2/ansible/plugins/connections/__init__.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-# (c) 2015 Toshio Kuratomi
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from abc import ABCMeta, abstractmethod, abstractproperty
-
-from six import add_metaclass
-
-from ansible import constants as C
-from ansible.errors import AnsibleError
-
-# FIXME: this object should be created upfront and passed through
-# the entire chain of calls to here, as there are other things
-# which may want to output display/logs too
-from ansible.utils.display import Display
-
-__all__ = ['ConnectionBase']
-
-@add_metaclass(ABCMeta)
-class ConnectionBase:
- '''
- A base class for connections to contain common code.
- '''
-
- has_pipelining = False
- become_methods = C.BECOME_METHODS
-
- def __init__(self, connection_info, new_stdin, *args, **kwargs):
- # All these hasattrs allow subclasses to override these parameters
- if not hasattr(self, '_connection_info'):
- self._connection_info = connection_info
- if not hasattr(self, '_new_stdin'):
- self._new_stdin = new_stdin
- if not hasattr(self, '_display'):
- self._display = Display(verbosity=connection_info.verbosity)
- if not hasattr(self, '_connected'):
- self._connected = False
-
- self._connect()
-
- def _become_method_supported(self, become_method):
- ''' Checks if the current class supports this privilege escalation method '''
-
- if become_method in self.__class__.become_methods:
- return True
-
- raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method)
-
- @abstractproperty
- def transport(self):
- """String used to identify this Connection class from other classes"""
- pass
-
- @abstractmethod
- def _connect(self):
- """Connect to the host we've been initialized with"""
- pass
-
- @abstractmethod
- def exec_command(self, cmd, tmp_path, executable=None, in_data=None):
- """Run a command on the remote host"""
- pass
-
- @abstractmethod
- def put_file(self, in_path, out_path):
- """Transfer a file from local to remote"""
- pass
-
- @abstractmethod
- def fetch_file(self, in_path, out_path):
- """Fetch a file from remote to local"""
- pass
-
- @abstractmethod
- def close(self):
- """Terminate the connection"""
- pass
diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py
deleted file mode 100644
index 1dc6076b0db..00000000000
--- a/v2/ansible/plugins/connections/local.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# (c) 2012, Michael DeHaan
-# (c) 2015 Toshio Kuratomi
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import traceback
-import os
-import shutil
-import subprocess
-#import select
-#import fcntl
-
-from ansible.errors import AnsibleError, AnsibleFileNotFound
-from ansible.plugins.connections import ConnectionBase
-
-from ansible.utils.debug import debug
-
-class Connection(ConnectionBase):
- ''' Local based connections '''
-
- @property
- def transport(self):
- ''' used to identify this connection object '''
- return 'local'
-
- def _connect(self, port=None):
- ''' connect to the local host; nothing to do here '''
-
- if not self._connected:
- self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._connection_info.remote_user, host=self._connection_info.remote_addr))
- self._connected = True
- return self
-
- def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
- ''' run a command on the local host '''
-
- debug("in local.exec_command()")
- # su requires to be run from a terminal, and therefore isn't supported here (yet?)
- #if self._connection_info.su:
- # raise AnsibleError("Internal Error: this module does not support running commands via su")
-
- if in_data:
- raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- executable = executable.split()[0] if executable else None
-
- self._display.vvv("{0} EXEC {1}".format(self._connection_info.remote_addr, cmd))
- # FIXME: cwd= needs to be set to the basedir of the playbook
- debug("opening command with Popen()")
- p = subprocess.Popen(
- cmd,
- shell=isinstance(cmd, basestring),
- executable=executable, #cwd=...
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- debug("done running command with Popen()")
-
- # FIXME: more su/sudo stuff
- #if self.runner.sudo and sudoable and self.runner.sudo_pass:
- # fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- # fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- # sudo_output = ''
- # while not sudo_output.endswith(prompt) and success_key not in sudo_output:
- # rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- # [p.stdout, p.stderr], self.runner.timeout)
- # if p.stdout in rfd:
- # chunk = p.stdout.read()
- # elif p.stderr in rfd:
- # chunk = p.stderr.read()
- # else:
- # stdout, stderr = p.communicate()
- # raise AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output)
- # if not chunk:
- # stdout, stderr = p.communicate()
- # raise AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output)
- # sudo_output += chunk
- # if success_key not in sudo_output:
- # p.stdin.write(self.runner.sudo_pass + '\n')
- # fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- # fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
-
- debug("getting output with communicate()")
- stdout, stderr = p.communicate()
- debug("done communicating")
-
- debug("done with local.exec_command()")
- return (p.returncode, '', stdout, stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to local '''
-
- #vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host)
- self._display.vvv("{0} PUT {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path))
- if not os.path.exists(in_path):
- raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise AnsibleError("failed to transfer file to {0}".format(out_path))
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from local to local -- for copatibility '''
- #vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
- self._display.vvv("{0} FETCH {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path))
- self.put_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- self._connected = False
diff --git a/v2/ansible/plugins/filter b/v2/ansible/plugins/filter
deleted file mode 120000
index fa1d5885700..00000000000
--- a/v2/ansible/plugins/filter
+++ /dev/null
@@ -1 +0,0 @@
-../../../lib/ansible/runner/filter_plugins
\ No newline at end of file
diff --git a/v2/ansible/plugins/lookup/subelements.py b/v2/ansible/plugins/lookup/subelements.py
deleted file mode 100644
index 09a2ca306a1..00000000000
--- a/v2/ansible/plugins/lookup/subelements.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# (c) 2013, Serge van Ginderachter
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import *
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.listify import listify_lookup_plugin_terms
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables, **kwargs):
-
- terms[0] = listify_lookup_plugin_terms(terms[0], variables, loader=self._loader)
-
- if not isinstance(terms, list) or not len(terms) == 2:
- raise AnsibleError("subelements lookup expects a list of two items, first a dict or a list, and second a string")
-
- if isinstance(terms[0], dict): # convert to list:
- if terms[0].get('skipped',False) != False:
- # the registered result was completely skipped
- return []
- elementlist = []
- for key in terms[0].iterkeys():
- elementlist.append(terms[0][key])
- else:
- elementlist = terms[0]
-
- subelement = terms[1]
-
- ret = []
- for item0 in elementlist:
- if not isinstance(item0, dict):
- raise AnsibleError("subelements lookup expects a dictionary, got '%s'" %item0)
- if item0.get('skipped', False) != False:
- # this particular item is to be skipped
- continue
- if not subelement in item0:
- raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subelement, item0))
- if not isinstance(item0[subelement], list):
- raise AnsibleError("the key %s should point to a list, got '%s'" % (subelement, item0[subelement]))
- sublist = item0.pop(subelement, [])
- for item1 in sublist:
- ret.append((item0, item1))
-
- return ret
-
diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py
deleted file mode 100644
index f6103343712..00000000000
--- a/v2/ansible/plugins/strategies/__init__.py
+++ /dev/null
@@ -1,432 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from six.moves import queue as Queue
-import time
-
-from ansible.errors import *
-
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-
-from ansible.playbook.handler import Handler
-from ansible.playbook.helpers import load_list_of_blocks
-from ansible.playbook.role import ROLE_CACHE, hash_params
-from ansible.plugins import filter_loader, lookup_loader, module_loader
-from ansible.utils.debug import debug
-
-
-__all__ = ['StrategyBase']
-
-# FIXME: this should probably be in the plugins/__init__.py, with
-# a smarter mechanism to set all of the attributes based on
-# the loaders created there
-class SharedPluginLoaderObj:
- '''
- A simple object to make pass the various plugin loaders to
- the forked processes over the queue easier
- '''
- def __init__(self):
- self.filter_loader = filter_loader
- self.lookup_loader = lookup_loader
- self.module_loader = module_loader
-
-class StrategyBase:
-
- '''
- This is the base class for strategy plugins, which contains some common
- code useful to all strategies like running handlers, cleanup actions, etc.
- '''
-
- def __init__(self, tqm):
- self._tqm = tqm
- self._inventory = tqm.get_inventory()
- self._workers = tqm.get_workers()
- self._notified_handlers = tqm.get_notified_handlers()
- #self._callback = tqm.get_callback()
- self._variable_manager = tqm.get_variable_manager()
- self._loader = tqm.get_loader()
- self._final_q = tqm._final_q
-
- # internal counters
- self._pending_results = 0
- self._cur_worker = 0
-
- # this dictionary is used to keep track of hosts that have
- # outstanding tasks still in queue
- self._blocked_hosts = dict()
-
- def run(self, iterator, connection_info, result=True):
- # save the counts on failed/unreachable hosts, as the cleanup/handler
- # methods will clear that information during their runs
- num_failed = len(self._tqm._failed_hosts)
- num_unreachable = len(self._tqm._unreachable_hosts)
-
- #debug("running the cleanup portion of the play")
- #result &= self.cleanup(iterator, connection_info)
- debug("running handlers")
- result &= self.run_handlers(iterator, connection_info)
-
- # send the stats callback
- self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
-
- if not result:
- if num_unreachable > 0:
- return 3
- elif num_failed > 0:
- return 2
- else:
- return 1
- else:
- return 0
-
- def get_hosts_remaining(self, play):
- return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
-
- def get_failed_hosts(self, play):
- return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
-
- def _queue_task(self, host, task, task_vars, connection_info):
- ''' handles queueing the task up to be sent to a worker '''
-
- debug("entering _queue_task() for %s/%s" % (host, task))
-
- # and then queue the new task
- debug("%s - putting task (%s) in queue" % (host, task))
- try:
- debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
-
- (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
- self._cur_worker += 1
- if self._cur_worker >= len(self._workers):
- self._cur_worker = 0
-
- self._pending_results += 1
-
- # create a dummy object with plugin loaders set as an easier
- # way to share them with the forked processes
- shared_loader_obj = SharedPluginLoaderObj()
-
- main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, shared_loader_obj), block=False)
- except (EOFError, IOError, AssertionError) as e:
- # most likely an abort
- debug("got an error while queuing: %s" % e)
- return
- debug("exiting _queue_task() for %s/%s" % (host, task))
-
- def _process_pending_results(self, iterator):
- '''
- Reads results off the final queue and takes appropriate action
- based on the result (executing callbacks, updating state, etc.).
- '''
-
- ret_results = []
-
- while not self._final_q.empty() and not self._tqm._terminated:
- try:
- result = self._final_q.get(block=False)
- debug("got result from result worker: %s" % (result,))
-
- # all host status messages contain 2 entries: (msg, task_result)
- if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
- task_result = result[1]
- host = task_result._host
- task = task_result._task
- if result[0] == 'host_task_failed':
- if not task.ignore_errors:
- debug("marking %s as failed" % host.name)
- iterator.mark_host_failed(host)
- self._tqm._failed_hosts[host.name] = True
- self._tqm._stats.increment('failures', host.name)
- self._tqm.send_callback('v2_runner_on_failed', task_result)
- elif result[0] == 'host_unreachable':
- self._tqm._unreachable_hosts[host.name] = True
- self._tqm._stats.increment('dark', host.name)
- self._tqm.send_callback('v2_runner_on_unreachable', task_result)
- elif result[0] == 'host_task_skipped':
- self._tqm._stats.increment('skipped', host.name)
- self._tqm.send_callback('v2_runner_on_skipped', task_result)
- elif result[0] == 'host_task_ok':
- self._tqm._stats.increment('ok', host.name)
- if 'changed' in task_result._result and task_result._result['changed']:
- self._tqm._stats.increment('changed', host.name)
- self._tqm.send_callback('v2_runner_on_ok', task_result)
-
- self._pending_results -= 1
- if host.name in self._blocked_hosts:
- del self._blocked_hosts[host.name]
-
- # If this is a role task, mark the parent role as being run (if
- # the task was ok or failed, but not skipped or unreachable)
- if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
- # lookup the role in the ROLE_CACHE to make sure we're dealing
- # with the correct object and mark it as executed
- for (entry, role_obj) in ROLE_CACHE[task_result._task._role._role_name].iteritems():
- hashed_entry = hash_params(task_result._task._role._role_params)
- if entry == hashed_entry :
- role_obj._had_task_run = True
-
- ret_results.append(task_result)
-
- elif result[0] == 'add_host':
- task_result = result[1]
- new_host_info = task_result.get('add_host', dict())
-
- self._add_host(new_host_info)
-
- elif result[0] == 'add_group':
- host = result[1]
- task_result = result[2]
- group_name = task_result.get('add_group')
-
- self._add_group(host, group_name)
-
- elif result[0] == 'notify_handler':
- host = result[1]
- handler_name = result[2]
-
- if handler_name not in self._notified_handlers:
- self._notified_handlers[handler_name] = []
-
- if host not in self._notified_handlers[handler_name]:
- self._notified_handlers[handler_name].append(host)
-
- elif result[0] == 'set_host_var':
- host = result[1]
- var_name = result[2]
- var_value = result[3]
- self._variable_manager.set_host_variable(host, var_name, var_value)
-
- elif result[0] == 'set_host_facts':
- host = result[1]
- facts = result[2]
- self._variable_manager.set_host_facts(host, facts)
-
- else:
- raise AnsibleError("unknown result message received: %s" % result[0])
- except Queue.Empty:
- pass
-
- return ret_results
-
- def _wait_on_pending_results(self, iterator):
- '''
- Wait for the shared counter to drop to zero, using a short sleep
- between checks to ensure we don't spin lock
- '''
-
- ret_results = []
-
- while self._pending_results > 0 and not self._tqm._terminated:
- debug("waiting for pending results (%d left)" % self._pending_results)
- results = self._process_pending_results(iterator)
- ret_results.extend(results)
- if self._tqm._terminated:
- break
- time.sleep(0.01)
-
- return ret_results
-
- def _add_host(self, host_info):
- '''
- Helper function to add a new host to inventory based on a task result.
- '''
-
- host_name = host_info.get('host_name')
-
- # Check if host in cache, add if not
- if host_name in self._inventory._hosts_cache:
- new_host = self._inventory._hosts_cache[host_name]
- else:
- new_host = Host(host_name)
- self._inventory._hosts_cache[host_name] = new_host
-
- allgroup = self._inventory.get_group('all')
- allgroup.add_host(new_host)
-
- # Set/update the vars for this host
- # FIXME: probably should have a set vars method for the host?
- new_vars = host_info.get('host_vars', dict())
- new_host.vars.update(new_vars)
-
- new_groups = host_info.get('groups', [])
- for group_name in new_groups:
- if not self._inventory.get_group(group_name):
- new_group = Group(group_name)
- self._inventory.add_group(new_group)
- new_group.vars = self._inventory.get_group_variables(group_name)
- else:
- new_group = self._inventory.get_group(group_name)
-
- new_group.add_host(new_host)
-
- # add this host to the group cache
- if self._inventory._groups_list is not None:
- if group_name in self._inventory._groups_list:
- if new_host.name not in self._inventory._groups_list[group_name]:
- self._inventory._groups_list[group_name].append(new_host.name)
-
- # clear pattern caching completely since it's unpredictable what
- # patterns may have referenced the group
- # FIXME: is this still required?
- self._inventory.clear_pattern_cache()
-
- def _add_group(self, host, group_name):
- '''
- Helper function to add a group (if it does not exist), and to assign the
- specified host to that group.
- '''
-
- new_group = self._inventory.get_group(group_name)
- if not new_group:
- # create the new group and add it to inventory
- new_group = Group(group_name)
- self._inventory.add_group(new_group)
-
- # and add the group to the proper hierarchy
- allgroup = self._inventory.get_group('all')
- allgroup.add_child_group(new_group)
-
- # the host here is from the executor side, which means it was a
- # serialized/cloned copy and we'll need to look up the proper
- # host object from the master inventory
- actual_host = self._inventory.get_host(host.name)
-
- # and add the host to the group
- new_group.add_host(actual_host)
-
- def _load_included_file(self, included_file):
- '''
- Loads an included YAML file of tasks, applying the optional set of variables.
- '''
-
- data = self._loader.load_from_file(included_file._filename)
- if not isinstance(data, list):
- raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds)
-
- is_handler = isinstance(included_file._task, Handler)
- block_list = load_list_of_blocks(
- data,
- play=included_file._task._block._play,
- parent_block=included_file._task._block,
- task_include=included_file._task,
- role=included_file._task._role,
- use_handlers=is_handler,
- loader=self._loader
- )
-
- # set the vars for this task from those specified as params to the include
- for b in block_list:
- b._vars = included_file._args.copy()
-
- return block_list
-
- def cleanup(self, iterator, connection_info):
- '''
- Iterates through failed hosts and runs any outstanding rescue/always blocks
- and handlers which may still need to be run after a failure.
- '''
-
- debug("in cleanup")
- result = True
-
- debug("getting failed hosts")
- failed_hosts = self.get_failed_hosts(iterator._play)
- if len(failed_hosts) == 0:
- debug("there are no failed hosts")
- return result
-
- debug("marking hosts failed in the iterator")
- # mark the host as failed in the iterator so it will take
- # any required rescue paths which may be outstanding
- for host in failed_hosts:
- iterator.mark_host_failed(host)
-
- debug("clearing the failed hosts list")
- # clear the failed hosts dictionary now while also
- for entry in self._tqm._failed_hosts.keys():
- del self._tqm._failed_hosts[entry]
-
- work_to_do = True
- while work_to_do:
- work_to_do = False
- for host in failed_hosts:
- host_name = host.name
-
- if host_name in self._tqm._failed_hosts:
- iterator.mark_host_failed(host)
- del self._tqm._failed_hosts[host_name]
-
- if host_name in self._blocked_hosts:
- work_to_do = True
- continue
- elif iterator.get_next_task_for_host(host, peek=True) and host_name not in self._tqm._unreachable_hosts:
- work_to_do = True
-
- # pop the task, mark the host blocked, and queue it
- self._blocked_hosts[host_name] = True
- task = iterator.get_next_task_for_host(host)
- task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
- self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task)
- self._queue_task(host, task, task_vars, connection_info)
-
- self._process_pending_results(iterator)
- time.sleep(0.01)
-
- # no more work, wait until the queue is drained
- self._wait_on_pending_results(iterator)
-
- return result
-
- def run_handlers(self, iterator, connection_info):
- '''
- Runs handlers on those hosts which have been notified.
- '''
-
- result = True
-
- # FIXME: getting the handlers from the iterators play should be
- # a method on the iterator, which may also filter the list
- # of handlers based on the notified list
-
- for handler_block in iterator._play.handlers:
- # FIXME: handlers need to support the rescue/always portions of blocks too,
- # but this may take some work in the iterator and gets tricky when
- # we consider the ability of meta tasks to flush handlers
- for handler in handler_block.block:
- handler_name = handler.get_name()
- if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
- if not len(self.get_hosts_remaining(iterator._play)):
- self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
- result = False
- break
- self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
- for host in self._notified_handlers[handler_name]:
- if not handler.has_triggered(host):
- task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
- self._queue_task(host, handler, task_vars, connection_info)
- handler.flag_for_host(host)
- self._process_pending_results(iterator)
- self._wait_on_pending_results(iterator)
- # wipe the notification list
- self._notified_handlers[handler_name] = []
- debug("done running handlers, result is: %s" % result)
- return result
diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py
deleted file mode 100644
index d5b6ad71a93..00000000000
--- a/v2/ansible/utils/display.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# (c) 2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see