Making the switch to v2

pull/10943/head
James Cammarata 10 years ago
parent 8cf4452d48
commit ce3ef7f4c1

16
.gitmodules vendored

@ -1,16 +0,0 @@
[submodule "lib/ansible/modules/core"]
path = lib/ansible/modules/core
url = https://github.com/ansible/ansible-modules-core.git
branch = devel
[submodule "lib/ansible/modules/extras"]
path = lib/ansible/modules/extras
url = https://github.com/ansible/ansible-modules-extras.git
branch = devel
[submodule "v2/ansible/modules/core"]
path = v2/ansible/modules/core
url = https://github.com/ansible/ansible-modules-core.git
branch = devel
[submodule "v2/ansible/modules/extras"]
path = v2/ansible/modules/extras
url = https://github.com/ansible/ansible-modules-extras.git
branch = devel

@ -18,6 +18,8 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################## ########################################################
from __future__ import (absolute_import)
__metaclass__ = type
__requires__ = ['ansible'] __requires__ = ['ansible']
try: try:
@ -33,175 +35,45 @@ except Exception:
import os import os
import sys import sys
from ansible.runner import Runner from ansible.errors import AnsibleError, AnsibleOptionsError
import ansible.constants as C from ansible.utils.display import Display
from ansible import utils
from ansible import errors
from ansible import callbacks
from ansible import inventory
########################################################
class Cli(object):
''' code behind bin/ansible '''
# ----------------------------------------------
def __init__(self):
self.stats = callbacks.AggregateStats()
self.callbacks = callbacks.CliRunnerCallbacks()
if C.DEFAULT_LOAD_CALLBACK_PLUGINS:
callbacks.load_callback_plugins()
# ----------------------------------------------
def parse(self):
''' create an options parser for bin/ansible '''
parser = utils.base_parser(
constants=C,
runas_opts=True,
subset_opts=True,
async_opts=True,
output_opts=True,
connect_opts=True,
check_opts=True,
diff_opts=False,
usage='%prog <host-pattern> [options]'
)
parser.add_option('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
parser.add_option('-m', '--module-name', dest='module_name',
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
options, args = parser.parse_args()
self.callbacks.options = options
if len(args) == 0 or len(args) > 1:
parser.print_help()
sys.exit(1)
# privlege escalation command line arguments need to be mutually exclusive
utils.check_mutually_exclusive_privilege(options, parser)
if (options.ask_vault_pass and options.vault_password_file):
parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
return (options, args)
# ----------------------------------------------
def run(self, options, args):
''' use Runner lib to do SSH things '''
pattern = args[0]
sshpass = becomepass = vault_pass = become_method = None
# Never ask for an SSH password when we run with local connection
if options.connection == "local":
options.ask_pass = False
else:
options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
# become
utils.normalize_become_options(options)
prompt_method = utils.choose_pass_prompt(options)
(sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
# read vault_pass from a file
if not options.ask_vault_pass and options.vault_password_file:
vault_pass = utils.read_vault_file(options.vault_password_file)
extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass)
if options.subset:
inventory_manager.subset(options.subset)
hosts = inventory_manager.list_hosts(pattern)
if len(hosts) == 0:
callbacks.display("No hosts matched", stderr=True)
sys.exit(0)
if options.listhosts:
for host in hosts:
callbacks.display(' %s' % host)
sys.exit(0)
if options.module_name in ['command','shell'] and not options.module_args:
callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
sys.exit(1)
if options.tree:
utils.prepare_writeable_dir(options.tree)
runner = Runner(
module_name=options.module_name,
module_path=options.module_path,
module_args=options.module_args,
remote_user=options.remote_user,
remote_pass=sshpass,
inventory=inventory_manager,
timeout=options.timeout,
private_key_file=options.private_key_file,
forks=options.forks,
pattern=pattern,
callbacks=self.callbacks,
transport=options.connection,
subset=options.subset,
check=options.check,
diff=options.check,
vault_pass=vault_pass,
become=options.become,
become_method=options.become_method,
become_pass=becomepass,
become_user=options.become_user,
extra_vars=extra_vars,
)
if options.seconds:
callbacks.display("background launch...\n\n", color='cyan')
results, poller = runner.run_async(options.seconds)
results = self.poll_while_needed(poller, options)
else:
results = runner.run()
return (runner, results)
# ----------------------------------------------
def poll_while_needed(self, poller, options):
''' summarize results from Runner '''
# BACKGROUND POLL LOGIC when -B and -P are specified
if options.seconds and options.poll_interval > 0:
poller.wait(options.seconds, options.poll_interval)
return poller.results
######################################################## ########################################################
if __name__ == '__main__': if __name__ == '__main__':
callbacks.display("", log_only=True)
callbacks.display(" ".join(sys.argv), log_only=True)
callbacks.display("", log_only=True)
cli = Cli() cli = None
(options, args) = cli.parse() display = Display()
me = os.path.basename(__file__)
try: try:
(runner, results) = cli.run(options, args) if me == 'ansible-playbook':
for result in results['contacted'].values(): from ansible.cli.playbook import PlaybookCLI as mycli
if 'failed' in result or result.get('rc', 0) != 0: elif me == 'ansible':
sys.exit(2) from ansible.cli.adhoc import AdHocCLI as mycli
if results['dark']: elif me == 'ansible-pull':
sys.exit(3) from ansible.cli.pull import PullCLI as mycli
except errors.AnsibleError, e: elif me == 'ansible-doc':
# Generic handler for ansible specific errors from ansible.cli.doc import DocCLI as mycli
callbacks.display("ERROR: %s" % str(e), stderr=True, color='red') elif me == 'ansible-vault':
sys.exit(1) from ansible.cli.vault import VaultCLI as mycli
elif me == 'ansible-galaxy':
from ansible.cli.galaxy import GalaxyCLI as mycli
cli = mycli(sys.argv, display=display)
if cli:
cli.parse()
sys.exit(cli.run())
else:
raise AnsibleError("Program not implemented: %s" % me)
except AnsibleOptionsError as e:
cli.parser.print_help()
display.display(str(e), stderr=True, color='red')
sys.exit(1)
except AnsibleError as e:
display.display(str(e), stderr=True, color='red')
sys.exit(2)
except KeyboardInterrupt:
display.error("interrupted")
sys.exit(4)

@ -1,337 +0,0 @@
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import textwrap
import re
import optparse
import datetime
import subprocess
import fcntl
import termios
import struct
from ansible import utils
from ansible.utils import module_docs
import ansible.constants as C
from ansible.utils import version
import traceback
MODULEDIR = C.DEFAULT_MODULE_PATH
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
def pager_print(text):
''' just print text '''
print text
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=text)
except IOError:
pass
except KeyboardInterrupt:
pass
def pager(text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
pager_print(text)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
pager_print(text)
else:
pager_pipe(text, os.environ['PAGER'])
elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
pager_pipe(text, 'less')
else:
pager_print(text)
def tty_ify(text):
t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = _URL.sub(r"\1", t) # U(word) => word
t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
def get_man_text(doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
desc = " ".join(doc['description'])
text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
if opt.get('required', False):
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
desc = " ".join(opt['description'])
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt:
default = str(opt['default'])
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent,
subsequent_indent=opt_indent))
if 'notes' in doc and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ",
subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ",
subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
return "\n".join(text)
def get_snippet_text(doc):
text = []
desc = tty_ify(" ".join(doc['short_description']))
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = tty_ify(" ".join(opt['description']))
if opt.get('required', False):
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, desc))
text.append('')
return "\n".join(text)
def get_module_list_text(module_list):
tty_size = 0
if os.isatty(0):
tty_size = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
columns = max(60, tty_size)
displace = max(len(x) for x in module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
filename = utils.plugins.module_finder.find_plugin(module)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
traceback.print_exc()
sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
def find_modules(path, module_list):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
find_modules(module, module_list)
elif any(module.endswith(x) for x in BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
module_list.append(module)
def main():
p = optparse.OptionParser(
version=version("%prog"),
usage='usage: %prog [options] [module...]',
description='Show Ansible module documentation',
)
p.add_option("-M", "--module-path",
action="store",
dest="module_path",
default=MODULEDIR,
help="Ansible modules/ directory")
p.add_option("-l", "--list",
action="store_true",
default=False,
dest='list_dir',
help='List available modules')
p.add_option("-s", "--snippet",
action="store_true",
default=False,
dest='show_snippet',
help='Show playbook snippet for specified module(s)')
p.add_option('-v', action='version', help='Show version number and exit')
(options, args) = p.parse_args()
if options.module_path is not None:
for i in options.module_path.split(os.pathsep):
utils.plugins.module_finder.add_directory(i)
if options.list_dir:
# list modules
paths = utils.plugins.module_finder._get_paths()
module_list = []
for path in paths:
find_modules(path, module_list)
pager(get_module_list_text(module_list))
sys.exit()
if len(args) == 0:
p.print_help()
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
text = ''
for module in args:
filename = utils.plugins.module_finder.find_plugin(module)
if filename is None:
sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder)))
continue
if any(filename.endswith(x) for x in BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
except:
traceback.print_exc()
sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
continue
if doc is not None:
all_keys = []
for (k,v) in doc['options'].iteritems():
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if options.show_snippet:
text += get_snippet_text(doc)
else:
text += get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module)
pager(text)
if __name__ == '__main__':
main()

@ -0,0 +1 @@
ansible

@ -1,957 +0,0 @@
#!/usr/bin/env python
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import datetime
import json
import os
import os.path
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib
import urllib2
import yaml
from collections import defaultdict
from distutils.version import LooseVersion
from jinja2 import Environment
from optparse import OptionParser
import ansible.constants as C
import ansible.utils
from ansible.errors import AnsibleError
default_meta_template = """---
galaxy_info:
author: {{ author }}
description: {{description}}
company: {{ company }}
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: {{ issue_tracker_url }}
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: {{ license }}
min_ansible_version: {{ min_ansible_version }}
#
# Below are all platforms currently available. Just uncomment
# the ones that apply to your role. If you don't see your
# platform on this list, let us know and we'll get it added!
#
#platforms:
{%- for platform,versions in platforms.iteritems() %}
#- name: {{ platform }}
# versions:
# - all
{%- for version in versions %}
# - {{ version }}
{%- endfor %}
{%- endfor %}
#
# Below are all categories currently available. Just as with
# the platforms above, uncomment those that apply to your role.
#
#categories:
{%- for category in categories %}
#- {{ category.name }}
{%- endfor %}
dependencies: []
# List your role dependencies here, one per line.
# Be sure to remove the '[]' above if you add dependencies
# to this list.
{% for dependency in dependencies %}
#- {{ dependency }}
{% endfor %}
"""
default_readme_template = """Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
"""
#-------------------------------------------------------------------------------------
# Utility functions for parsing actions/options
#-------------------------------------------------------------------------------------
VALID_ACTIONS = ("init", "info", "install", "list", "remove")
SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
def get_action(args):
"""
Get the action the user wants to execute from the
sys argv list.
"""
for i in range(0,len(args)):
arg = args[i]
if arg in VALID_ACTIONS:
del args[i]
return arg
return None
def build_option_parser(action):
"""
Builds an option parser object based on the action
the user wants to execute.
"""
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS)
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage=usage, epilog=epilog)
if not action:
parser.print_help()
sys.exit()
# options for all actions
# - none yet
# options specific to actions
if action == "info":
parser.set_usage("usage: %prog info [options] role_name[,version]")
elif action == "init":
parser.set_usage("usage: %prog init [options] role_name")
parser.add_option(
'-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. '
'The default is the current working directory.')
parser.add_option(
'--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
elif action == "install":
parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
parser.add_option(
'-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
parser.add_option(
'-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help='Don\'t download roles listed as dependencies')
parser.add_option(
'-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported')
elif action == "remove":
parser.set_usage("usage: %prog remove role1 role2 ...")
elif action == "list":
parser.set_usage("usage: %prog list [role_name]")
# options that apply to more than one action
if action != "init":
parser.add_option(
'-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
if action in ("info","init","install"):
parser.add_option(
'-s', '--server', dest='api_server', default="galaxy.ansible.com",
help='The API server destination')
if action in ("init","install"):
parser.add_option(
'-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
# done, return the parser
return parser
def get_opt(options, k, defval=""):
"""
Returns an option from an Optparse values instance.
"""
try:
data = getattr(options, k)
except:
return defval
if k == "roles_path":
if os.pathsep in data:
data = data.split(os.pathsep)[0]
return data
def exit_without_ignore(options, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not get_opt(options, "ignore_errors", False):
print '- you can use --ignore-errors to skip failed roles.'
sys.exit(rc)
#-------------------------------------------------------------------------------------
# Galaxy API functions
#-------------------------------------------------------------------------------------
def api_get_config(api_server):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
try:
url = 'https://%s/api/' % api_server
data = json.load(urllib2.urlopen(url))
if not data.get("current_version",None):
return None
else:
return data
except:
return None
def api_lookup_role_by_name(api_server, role_name, notify=True):
"""
Uses the Galaxy API to do a lookup on the role owner/name.
"""
role_name = urllib.quote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
print "- downloading role '%s', owned by %s" % (role_name, user_name)
except:
parser.print_help()
print "- invalid role name (%s). Specify role as format: username.rolename" % role_name
sys.exit(1)
url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name)
try:
data = json.load(urllib2.urlopen(url))
if len(data["results"]) == 0:
return None
else:
return data["results"][0]
except:
return None
def api_fetch_role_related(api_server, related, role_id):
"""
Uses the Galaxy API to fetch the list of related items for
the given role. The url comes from the 'related' field of
the role.
"""
try:
url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related)
data = json.load(urllib2.urlopen(url))
results = data['results']
done = (data.get('next', None) == None)
while not done:
url = 'https://%s%s' % (api_server, data['next'])
print url
data = json.load(urllib2.urlopen(url))
results += data['results']
done = (data.get('next', None) == None)
return results
except:
return None
def api_get_list(api_server, what):
"""
Uses the Galaxy API to fetch the list of items specified.
"""
try:
url = 'https://%s/api/v1/%s/?page_size' % (api_server, what)
data = json.load(urllib2.urlopen(url))
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next', None) == None)
while not done:
url = 'https://%s%s' % (api_server, data['next'])
print url
data = json.load(urllib2.urlopen(url))
results += data['results']
done = (data.get('next', None) == None)
return results
except:
print "- failed to download the %s list" % what
return None
#-------------------------------------------------------------------------------------
# scm repo utility functions
#-------------------------------------------------------------------------------------
def scm_archive_role(scm, role_url, role_version, role_name):
if scm not in ['hg', 'git']:
print "- scm %s is not currently supported" % scm
return False
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', role_url, role_name]
with open('/dev/null', 'w') as devnull:
try:
print "- executing: %s" % " ".join(clone_cmd)
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
print "- command %s failed" % ' '.join(clone_cmd)
print " in directory %s" % tempdir
return False
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
if role_version:
archive_cmd.extend(['-r', role_version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
if role_version:
archive_cmd.append(role_version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
print "- executing: %s" % " ".join(archive_cmd)
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
print "- command %s failed" % ' '.join(archive_cmd)
print " in directory %s" % tempdir
return False
shutil.rmtree(tempdir, ignore_errors=True)
return temp_file.name
#-------------------------------------------------------------------------------------
# Role utility functions
#-------------------------------------------------------------------------------------
def get_role_path(role_name, options):
"""
Returns the role path based on the roles_path option
and the role name.
"""
roles_path = get_opt(options,'roles_path')
roles_path = os.path.join(roles_path, role_name)
roles_path = os.path.expanduser(roles_path)
return roles_path
def get_role_metadata(role_name, options):
"""
Returns the metadata as YAML, if the file 'meta/main.yml'
exists in the specified role_path
"""
role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
try:
if os.path.isfile(role_path):
f = open(role_path, 'r')
meta_data = yaml.safe_load(f)
f.close()
return meta_data
else:
return None
except:
return None
def get_galaxy_install_info(role_name, options):
"""
Returns the YAML data contained in 'meta/.galaxy_install_info',
if it exists.
"""
try:
info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
if os.path.isfile(info_path):
f = open(info_path, 'r')
info_data = yaml.safe_load(f)
f.close()
return info_data
else:
return None
except:
return None
def write_galaxy_install_info(role_name, role_version, options):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version = role_version,
install_date = datetime.datetime.utcnow().strftime("%c"),
)
try:
info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
f = open(info_path, 'w+')
info_data = yaml.safe_dump(info, f)
f.close()
except:
return False
return True
def remove_role(role_name, options):
"""
Removes the specified role from the roles path. There is a
sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories
"""
if get_role_metadata(role_name, options):
role_path = get_role_path(role_name, options)
shutil.rmtree(role_path)
return True
else:
return False
def fetch_role(role_name, target, role_data, options):
"""
Downloads the archived role from github to a temp location, extracts
it, and then copies the extracted role to the role library path.
"""
# first grab the file and save it to a temp location
if '://' in role_name:
archive_url = role_name
else:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
print "- downloading role from %s" % archive_url
try:
url_file = urllib2.urlopen(archive_url)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception, e:
# TODO: better urllib2 error handling for error
# messages that are more exact
print "- error: failed to download the file."
return False
def install_role(role_name, role_version, role_filename, options):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
if not tarfile.is_tarfile(role_filename):
print "- error: the file downloaded was not a tar.gz"
return False
else:
if role_filename.endswith('.gz'):
role_tar_file = tarfile.open(role_filename, "r:gz")
else:
role_tar_file = tarfile.open(role_filename, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if "/meta/main.yml" in member.name:
meta_file = member
break
if not meta_file:
print "- error: this role does not appear to have a meta/main.yml file."
return False
else:
try:
meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
print "- error: this role does not appear to have a valid meta/main.yml file."
return False
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
role_path = os.path.join(get_opt(options, 'roles_path'), role_name)
role_path = os.path.expanduser(role_path)
print "- extracting %s to %s" % (role_name, role_path)
try:
if os.path.exists(role_path):
if not os.path.isdir(role_path):
print "- error: the specified roles path exists and is not a directory."
return False
elif not get_opt(options, "force", False):
print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name
return False
else:
# using --force, remove the old path
if not remove_role(role_name, options):
print "- error: %s doesn't appear to contain a role." % role_path
print " please remove this directory manually if you really want to put the role here."
return False
else:
os.makedirs(role_path)
# now we do the actual extraction to the role_path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.split("/")[1:]
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, role_path)
# write out the install info file for later use
write_galaxy_install_info(role_name, role_version, options)
except OSError, e:
print "- error: you do not have permission to modify files in %s" % role_path
return False
# return the parsed yaml metadata
print "- %s was installed successfully" % role_name
return meta_file_data
#-------------------------------------------------------------------------------------
# Action functions
#-------------------------------------------------------------------------------------
def execute_init(args, options, parser):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = get_opt(options, 'init_path', './')
api_server = get_opt(options, "api_server", "galaxy.ansible.com")
force = get_opt(options, 'force', False)
offline = get_opt(options, 'offline', False)
if not offline:
api_config = api_get_config(api_server)
if not api_config:
print "- the API server (%s) is not responding, please try again later." % api_server
sys.exit(1)
try:
role_name = args.pop(0).strip()
if role_name == "":
raise Exception("")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
print "- the path %s already exists, but is a file - aborting" % role_path
sys.exit(1)
elif not force:
print "- the directory %s already exists." % role_path
print " you can use --force to re-initialize this directory,\n" + \
" however it will reset any main.yml files that may have\n" + \
" been modified there already."
sys.exit(1)
except Exception, e:
parser.print_help()
print "- no role name specified for init"
sys.exit(1)
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
# create the default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(default_readme_template)
f.close
for dir in ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# tags/platforms included (but commented out) and the
# dependencies section
platforms = []
if not offline:
platforms = api_get_list(api_server, "platforms") or []
categories = []
if not offline:
categories = api_get_list(api_server, "categories") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
categories = categories,
)
rendered_meta = Environment().from_string(default_meta_template).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
print "- %s was created successfully" % role_name
def execute_info(args, options, parser):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(args) == 0:
# the user needs to specify a role
parser.print_help()
print "- you must specify a user/role name"
sys.exit(1)
api_server = get_opt(options, "api_server", "galaxy.ansible.com")
api_config = api_get_config(api_server)
roles_path = get_opt(options, "roles_path")
for role in args:
role_info = {}
install_info = get_galaxy_install_info(role, options)
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = api_lookup_role_by_name(api_server, role, False)
if remote_data:
role_info.update(remote_data)
metadata = get_role_metadata(role, options)
if metadata:
role_info.update(metadata)
role_spec = ansible.utils.role_spec_parse(role)
if role_spec:
role_info.update(role_spec)
if role_info:
print "- %s:" % (role)
for k in sorted(role_info.keys()):
if k in SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
print "\t%s: " % (k)
for key in sorted(role_info[k].keys()):
if key in SKIP_INFO_KEYS:
continue
print "\t\t%s: %s" % (key, role_info[k][key])
else:
print "\t%s: %s" % (k, role_info[k])
else:
print "- the role %s was not found" % role
def execute_install(args, options, parser):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = get_opt(options, "role_file", None)
if len(args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
parser.print_help()
print "- you must specify a user/role name or a roles file"
sys.exit()
elif len(args) == 1 and not role_file is None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
parser.print_help()
print "- please specify a user/role name, or a roles file, but not both"
sys.exit(1)
api_server = get_opt(options, "api_server", "galaxy.ansible.com")
no_deps = get_opt(options, "no_deps", False)
roles_path = get_opt(options, "roles_path")
roles_done = []
if role_file:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
else:
# roles listed in a file, one per line
roles_left = map(ansible.utils.role_spec_parse, f.readlines())
f.close()
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
roles_left = map(ansible.utils.role_spec_parse, args)
while len(roles_left) > 0:
# query the galaxy API for the role data
role_data = None
role = roles_left.pop(0)
role_src = role.get("src")
role_scm = role.get("scm")
role_path = role.get("path")
if role_path:
options.roles_path = role_path
else:
options.roles_path = roles_path
if os.path.isfile(role_src):
# installing a local tar.gz
tmp_file = role_src
else:
if role_scm:
# create tar file from scm url
tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name"))
elif '://' in role_src:
# just download a URL - version will probably be in the URL
tmp_file = fetch_role(role_src, None, None, options)
else:
# installing from galaxy
api_config = api_get_config(api_server)
if not api_config:
print "- the API server (%s) is not responding, please try again later." % api_server
sys.exit(1)
role_data = api_lookup_role_by_name(api_server, role_src)
if not role_data:
print "- sorry, %s was not found on %s." % (role_src, api_server)
exit_without_ignore(options)
continue
role_versions = api_fetch_role_related(api_server, 'versions', role_data['id'])
if "version" not in role or role['version'] == '':
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
role["version"] = str(loose_versions[-1])
else:
role["version"] = 'master'
elif role['version'] != 'master':
if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]:
print 'role is %s' % role
print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions)
exit_without_ignore(options)
continue
# download the role. if --no-deps was specified, we stop here,
# otherwise we recursively grab roles and all of their deps.
tmp_file = fetch_role(role_src, role["version"], role_data, options)
installed = False
if tmp_file:
installed = install_role(role.get("name"), role.get("version"), tmp_file, options)
# we're done with the temp file, clean it up
if tmp_file != role_src:
os.unlink(tmp_file)
# install dependencies, if we want them
if not no_deps and installed:
if not role_data:
role_data = get_role_metadata(role.get("name"), options)
role_dependencies = role_data['dependencies']
else:
role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
for dep in role_dependencies:
if isinstance(dep, basestring):
dep = ansible.utils.role_spec_parse(dep)
else:
dep = ansible.utils.role_yaml_parse(dep)
if not get_role_metadata(dep["name"], options):
if dep not in roles_left:
print '- adding dependency: %s' % dep["name"]
roles_left.append(dep)
else:
print '- dependency %s already pending installation.' % dep["name"]
else:
print '- dependency %s is already installed, skipping.' % dep["name"]
if not tmp_file or not installed:
print "- %s was NOT installed successfully." % role.get("name")
exit_without_ignore(options)
sys.exit(0)
def execute_remove(args, options, parser):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(args) == 0:
parser.print_help()
print '- you must specify at least one role to remove.'
sys.exit()
for role in args:
if get_role_metadata(role, options):
if remove_role(role, options):
print '- successfully removed %s' % role
else:
print "- failed to remove role: %s" % role
else:
print '- %s is not installed, skipping.' % role
sys.exit(0)
def execute_list(args, options, parser):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(args) > 1:
print "- please specify only one role to list, or specify no roles to see a full list"
sys.exit(1)
if len(args) == 1:
# show only the request role, if it exists
role_name = args[0]
metadata = get_role_metadata(role_name, options)
if metadata:
install_info = get_galaxy_install_info(role_name, options)
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
print "- %s, %s" % (role_name, version)
else:
print "- the role %s was not found" % role_name
else:
# show all valid roles in the roles_path directory
roles_path = get_opt(options, 'roles_path')
roles_path = os.path.expanduser(roles_path)
if not os.path.exists(roles_path):
parser.print_help()
print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path
sys.exit(1)
elif not os.path.isdir(roles_path):
print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path
parser.print_help()
sys.exit(1)
path_files = os.listdir(roles_path)
for path_file in path_files:
if get_role_metadata(path_file, options):
install_info = get_galaxy_install_info(path_file, options)
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
print "- %s, %s" % (path_file, version)
sys.exit(0)
#-------------------------------------------------------------------------------------
# The main entry point
#-------------------------------------------------------------------------------------
def main():
# parse the CLI options
action = get_action(sys.argv)
parser = build_option_parser(action)
(options, args) = parser.parse_args()
# execute the desired action
if 1: #try:
fn = globals()["execute_%s" % action]
fn(args, options, parser)
#except KeyError, e:
# print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS))
# sys.exit(1)
if __name__ == "__main__":
main()

@ -1,330 +0,0 @@
#!/usr/bin/env python
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#######################################################
__requires__ = ['ansible']
try:
import pkg_resources
except Exception:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. But we
# have code that better expresses the errors in the places where the code
# is actually used (the deps are optional for many code paths) so we don't
# want to fail here.
pass
import sys
import os
import stat
# Augment PYTHONPATH to find Python modules relative to this file path
# This is so that we can find the modules when running from a local checkout
# installed as editable with `pip install -e ...` or `python setup.py develop`
local_module_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'lib')
)
sys.path.append(local_module_path)
import ansible.playbook
import ansible.constants as C
import ansible.utils.template
from ansible import errors
from ansible import callbacks
from ansible import utils
from ansible.color import ANSIBLE_COLOR, stringc
from ansible.callbacks import display
def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
if num != 0 and ANSIBLE_COLOR and color is not None:
return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
else:
return "%s=%-4s" % (lead, str(num))
def hostcolor(host, stats, color=True):
if ANSIBLE_COLOR and color:
if stats['failures'] != 0 or stats['unreachable'] != 0:
return "%-37s" % stringc(host, 'red')
elif stats['changed'] != 0:
return "%-37s" % stringc(host, 'yellow')
else:
return "%-37s" % stringc(host, 'green')
return "%-26s" % host
def main(args):
''' run ansible-playbook operations '''
# create parser for CLI options
parser = utils.base_parser(
constants=C,
usage = "%prog playbook.yml",
connect_opts=True,
runas_opts=True,
subset_opts=True,
check_opts=True,
diff_opts=True
)
#parser.add_option('--vault-password', dest="vault_password",
# help="password for vault encrypted files")
parser.add_option('-t', '--tags', dest='tags', default='all',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags',
help="only run plays and tasks whose tags do not match these values")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
parser.add_option('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at',
help="start the playbook at the task matching this name")
parser.add_option('--force-handlers', dest='force_handlers',
default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
options, args = parser.parse_args(args)
if len(args) == 0:
parser.print_help(file=sys.stderr)
return 1
# privlege escalation command line arguments need to be mutually exclusive
utils.check_mutually_exclusive_privilege(options, parser)
if (options.ask_vault_pass and options.vault_password_file):
parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
sshpass = None
becomepass = None
vault_pass = None
options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
if options.listhosts or options.syntax or options.listtasks or options.listtags:
(_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
else:
options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
# Never ask for an SSH password when we run with local connection
if options.connection == "local":
options.ask_pass = False
# set pe options
utils.normalize_become_options(options)
prompt_method = utils.choose_pass_prompt(options)
(sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
become_ask_pass=options.become_ask_pass,
ask_vault_pass=options.ask_vault_pass,
become_method=prompt_method)
# read vault_pass from a file
if not options.ask_vault_pass and options.vault_password_file:
vault_pass = utils.read_vault_file(options.vault_password_file)
extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
only_tags = options.tags.split(",")
skip_tags = options.skip_tags
if options.skip_tags is not None:
skip_tags = options.skip_tags.split(",")
for playbook in args:
if not os.path.exists(playbook):
raise errors.AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook)
inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass)
# Note: slightly wrong, this is written so that implicit localhost
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this in v2
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
utils.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
inventory.subset(options.subset)
if len(inventory.list_hosts()) == 0 and no_hosts is False:
# Invalid limit
raise errors.AnsibleError("Specified --limit does not match any hosts")
# run all playbooks specified on the command line
for playbook in args:
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
if options.step:
playbook_cb.step = options.step
if options.start_at:
playbook_cb.start_at = options.start_at
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
pb = ansible.playbook.PlayBook(
playbook=playbook,
module_path=options.module_path,
inventory=inventory,
forks=options.forks,
remote_user=options.remote_user,
remote_pass=sshpass,
callbacks=playbook_cb,
runner_callbacks=runner_cb,
stats=stats,
timeout=options.timeout,
transport=options.connection,
become=options.become,
become_method=options.become_method,
become_user=options.become_user,
become_pass=becomepass,
extra_vars=extra_vars,
private_key_file=options.private_key_file,
only_tags=only_tags,
skip_tags=skip_tags,
check=options.check,
diff=options.diff,
vault_password=vault_pass,
force_handlers=options.force_handlers,
)
if options.flush_cache:
display(callbacks.banner("FLUSHING FACT CACHE"))
pb.SETUP_CACHE.flush()
if options.listhosts or options.listtasks or options.syntax or options.listtags:
print ''
print 'playbook: %s' % playbook
print ''
playnum = 0
for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
playnum += 1
play = ansible.playbook.Play(pb, play_ds, play_basedir,
vault_password=pb.vault_password)
label = play.name
hosts = pb.inventory.list_hosts(play.hosts)
if options.listhosts:
print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts))
for host in hosts:
print ' %s' % host
if options.listtags or options.listtasks:
print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags))))
if options.listtags:
tags = []
for task in pb.tasks_to_run_in_play(play):
tags.extend(task.tags)
print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged']))))
if options.listtasks:
for task in pb.tasks_to_run_in_play(play):
if getattr(task, 'name', None) is not None:
# meta tasks have no names
print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged']))))
if options.listhosts or options.listtasks or options.listtags:
print ''
continue
if options.syntax:
# if we've not exited by now then we are fine.
print 'Playbook Syntax is fine'
return 0
failed_hosts = []
unreachable_hosts = []
try:
pb.run()
hosts = sorted(pb.stats.processed.keys())
display(callbacks.banner("PLAY RECAP"))
playbook_cb.on_stats(pb.stats)
for h in hosts:
t = pb.stats.summarize(h)
if t['failures'] > 0:
failed_hosts.append(h)
if t['unreachable'] > 0:
unreachable_hosts.append(h)
retries = failed_hosts + unreachable_hosts
if C.RETRY_FILES_ENABLED and len(retries) > 0:
filename = pb.generate_retry_inventory(retries)
if filename:
display(" to retry, use: --limit @%s\n" % filename)
for h in hosts:
t = pb.stats.summarize(h)
display("%s : %s %s %s %s" % (
hostcolor(h, t),
colorize('ok', t['ok'], 'green'),
colorize('changed', t['changed'], 'yellow'),
colorize('unreachable', t['unreachable'], 'red'),
colorize('failed', t['failures'], 'red')),
screen_only=True
)
display("%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize('ok', t['ok'], None),
colorize('changed', t['changed'], None),
colorize('unreachable', t['unreachable'], None),
colorize('failed', t['failures'], None)),
log_only=True
)
print ""
if len(failed_hosts) > 0:
return 2
if len(unreachable_hosts) > 0:
return 3
except errors.AnsibleError, e:
display("ERROR: %s" % e, color='red')
return 1
return 0
if __name__ == "__main__":
display(" ", log_only=True)
display(" ".join(sys.argv), log_only=True)
display(" ", log_only=True)
try:
sys.exit(main(sys.argv[1:]))
except errors.AnsibleError, e:
display("ERROR: %s" % e, color='red', stderr=True)
sys.exit(1)
except KeyboardInterrupt, ke:
display("ERROR: interrupted", color='red', stderr=True)
sys.exit(1)

@ -1,257 +0,0 @@
#!/usr/bin/env python
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-pull is a script that runs ansible in local mode
# after checking out a playbooks directory from source repo. There is an
# example playbook to bootstrap this script in the examples/ dir which
# installs ansible and sets it up to run on cron.
# usage:
# ansible-pull -d /var/lib/ansible \
# -U http://example.net/content.git [-C production] \
# [path/playbook.yml]
#
# the -d and -U arguments are required; the -C argument is optional.
#
# ansible-pull accepts an optional argument to specify a playbook
# location underneath the workdir and then searches the source repo
# for playbooks in the following order, stopping at the first match:
#
# 1. $workdir/path/playbook.yml, if specified
# 2. $workdir/$fqdn.yml
# 3. $workdir/$hostname.yml
# 4. $workdir/local.yml
#
# the source repo must contain at least one of these playbooks.
import os
import shutil
import sys
import datetime
import socket
import random
import time
from ansible import utils
from ansible.utils import cmd_functions
from ansible import errors
from ansible import inventory
DEFAULT_REPO_TYPE = 'git'
DEFAULT_PLAYBOOK = 'local.yml'
PLAYBOOK_ERRORS = {1: 'File does not exist',
2: 'File is not readable'}
VERBOSITY=0
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def try_playbook(path):
if not os.path.exists(path):
return 1
if not os.access(path, os.R_OK):
return 2
return 0
def select_playbook(path, args):
playbook = None
if len(args) > 0 and args[0] is not None:
playbook = "%s/%s" % (path, args[0])
rc = try_playbook(playbook)
if rc != 0:
print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc])
return None
return playbook
else:
fqdn = socket.getfqdn()
hostpb = "%s/%s.yml" % (path, fqdn)
shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0])
localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK)
errors = []
for pb in [hostpb, shorthostpb, localpb]:
rc = try_playbook(pb)
if rc == 0:
playbook = pb
break
else:
errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc]))
if playbook is None:
print >>sys.stderr, "\n".join(errors)
return playbook
def main(args):
""" Set up and run a local playbook """
usage = "%prog [options] [playbook.yml]"
parser = utils.SortedOptParser(usage=usage)
parser.add_option('--purge', default=False, action='store_true',
help='purge checkout after playbook run')
parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
help='only run the playbook if the repository has been updated')
parser.add_option('-s', '--sleep', dest='sleep', default=None,
help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests')
parser.add_option('-f', '--force', dest='force', default=False,
action='store_true',
help='run the playbook even if the repository could '
'not be updated')
parser.add_option('-d', '--directory', dest='dest', default=None,
help='directory to checkout repository to')
#parser.add_option('-l', '--live', default=True, action='store_live',
# help='Print the ansible-playbook output while running')
parser.add_option('-U', '--url', dest='url', default=None,
help='URL of the playbook repository')
parser.add_option('-C', '--checkout', dest='checkout',
help='branch/tag/commit to checkout. '
'Defaults to behavior of repository module.')
parser.add_option('-i', '--inventory-file', dest='inventory',
help="location of the inventory host file")
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
parser.add_option('-v', '--verbose', default=False, action="callback",
callback=increment_debug,
help='Pass -vvvv to ansible-playbook')
parser.add_option('-m', '--module-name', dest='module_name',
default=DEFAULT_REPO_TYPE,
help='Module name used to check out repository. '
'Default is %s.' % DEFAULT_REPO_TYPE)
parser.add_option('--vault-password-file', dest='vault_password_file',
help="vault password file")
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password')
parser.add_option('-t', '--tags', dest='tags', default=False,
help='only run plays and tasks tagged with these values')
parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
help='adds the hostkey for the repo url if not already added')
parser.add_option('--key-file', dest='key_file',
help="Pass '-i <key_file>' to the SSH arguments used by git.")
options, args = parser.parse_args(args)
hostname = socket.getfqdn()
if not options.dest:
# use a hostname dependent directory, in case of $HOME on nfs
options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname)
options.dest = os.path.abspath(options.dest)
if not options.url:
parser.error("URL for repository not specified, use -h for help")
return 1
now = datetime.datetime.now()
print now.strftime("Starting ansible-pull at %F %T")
# Attempt to use the inventory passed in as an argument
# It might not yet have been downloaded so use localhost if note
if not options.inventory or not os.path.exists(options.inventory):
inv_opts = 'localhost,'
else:
inv_opts = options.inventory
limit_opts = 'localhost:%s:127.0.0.1' % hostname
repo_opts = "name=%s dest=%s" % (options.url, options.dest)
if VERBOSITY == 0:
base_opts = '-c local --limit "%s"' % limit_opts
elif VERBOSITY > 0:
debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ])
base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts)
if options.checkout:
repo_opts += ' version=%s' % options.checkout
# Only git module is supported
if options.module_name == DEFAULT_REPO_TYPE:
if options.accept_host_key:
repo_opts += ' accept_hostkey=yes'
if options.key_file:
repo_opts += ' key_file=%s' % options.key_file
path = utils.plugins.module_finder.find_plugin(options.module_name)
if path is None:
sys.stderr.write("module '%s' not found.\n" % options.module_name)
return 1
bin_path = os.path.dirname(os.path.abspath(__file__))
cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
bin_path, inv_opts, base_opts, options.module_name, repo_opts
)
for ev in options.extra_vars:
cmd += ' -e "%s"' % ev
if options.sleep:
try:
secs = random.randint(0,int(options.sleep));
except ValueError:
parser.error("%s is not a number." % options.sleep)
return 1
print >>sys.stderr, "Sleeping for %d seconds..." % secs
time.sleep(secs);
# RUN THe CHECKOUT COMMAND
rc, out, err = cmd_functions.run_cmd(cmd, live=True)
if rc != 0:
if options.force:
print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook."
else:
return rc
elif options.ifchanged and '"changed": true' not in out:
print "Repository has not changed, quitting."
return 0
playbook = select_playbook(options.dest, args)
if playbook is None:
print >>sys.stderr, "Could not find a playbook to run."
return 1
cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
if options.vault_password_file:
cmd += " --vault-password-file=%s" % options.vault_password_file
if options.inventory:
cmd += ' -i "%s"' % options.inventory
for ev in options.extra_vars:
cmd += ' -e "%s"' % ev
if options.ask_sudo_pass:
cmd += ' -K'
if options.tags:
cmd += ' -t "%s"' % options.tags
os.chdir(options.dest)
# RUN THE PLAYBOOK COMMAND
rc, out, err = cmd_functions.run_cmd(cmd, live=True)
if options.purge:
os.chdir('/')
try:
shutil.rmtree(options.dest)
except Exception, e:
print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
return rc
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt, e:
print >>sys.stderr, "Exit on user request.\n"
sys.exit(1)

@ -0,0 +1 @@
ansible

@ -1,241 +0,0 @@
#!/usr/bin/env python
# (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
__requires__ = ['ansible']
try:
import pkg_resources
except Exception:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. But we
# have code that better expresses the errors in the places where the code
# is actually used (the deps are optional for many code paths) so we don't
# want to fail here.
pass
import os
import sys
import traceback
import ansible.constants as C
from ansible import utils
from ansible import errors
from ansible.utils.vault import VaultEditor
from optparse import OptionParser
#-------------------------------------------------------------------------------------
# Utility functions for parsing actions/options
#-------------------------------------------------------------------------------------
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
def build_option_parser(action):
"""
Builds an option parser object based on the action
the user wants to execute.
"""
usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS)
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage=usage, epilog=epilog)
if not action:
parser.print_help()
sys.exit()
# options for all actions
#parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use")
parser.add_option('--debug', dest='debug', action="store_true", help="debug")
parser.add_option('--vault-password-file', dest='password_file',
help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE)
# options specific to actions
if action == "create":
parser.set_usage("usage: %prog create [options] file_name")
elif action == "decrypt":
parser.set_usage("usage: %prog decrypt [options] file_name")
elif action == "edit":
parser.set_usage("usage: %prog edit [options] file_name")
elif action == "view":
parser.set_usage("usage: %prog view [options] file_name")
elif action == "encrypt":
parser.set_usage("usage: %prog encrypt [options] file_name")
elif action == "rekey":
parser.set_usage("usage: %prog rekey [options] file_name")
# done, return the parser
return parser
def get_action(args):
"""
Get the action the user wants to execute from the
sys argv list.
"""
for i in range(0,len(args)):
arg = args[i]
if arg in VALID_ACTIONS:
del args[i]
return arg
return None
def get_opt(options, k, defval=""):
"""
Returns an option from an Optparse values instance.
"""
try:
data = getattr(options, k)
except:
return defval
if k == "roles_path":
if os.pathsep in data:
data = data.split(os.pathsep)[0]
return data
#-------------------------------------------------------------------------------------
# Command functions
#-------------------------------------------------------------------------------------
def execute_create(args, options, parser):
if len(args) > 1:
raise errors.AnsibleError("'create' does not accept more than one filename")
if not options.password_file:
password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
else:
password = utils.read_vault_file(options.password_file)
cipher = 'AES256'
if hasattr(options, 'cipher'):
cipher = options.cipher
this_editor = VaultEditor(cipher, password, args[0])
this_editor.create_file()
def execute_decrypt(args, options, parser):
if not options.password_file:
password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
else:
password = utils.read_vault_file(options.password_file)
cipher = 'AES256'
if hasattr(options, 'cipher'):
cipher = options.cipher
for f in args:
this_editor = VaultEditor(cipher, password, f)
this_editor.decrypt_file()
print "Decryption successful"
def execute_edit(args, options, parser):
if len(args) > 1:
raise errors.AnsibleError("edit does not accept more than one filename")
if not options.password_file:
password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
else:
password = utils.read_vault_file(options.password_file)
cipher = None
for f in args:
this_editor = VaultEditor(cipher, password, f)
this_editor.edit_file()
def execute_view(args, options, parser):
if len(args) > 1:
raise errors.AnsibleError("view does not accept more than one filename")
if not options.password_file:
password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
else:
password = utils.read_vault_file(options.password_file)
cipher = None
for f in args:
this_editor = VaultEditor(cipher, password, f)
this_editor.view_file()
def execute_encrypt(args, options, parser):
if not options.password_file:
password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
else:
password = utils.read_vault_file(options.password_file)
cipher = 'AES256'
if hasattr(options, 'cipher'):
cipher = options.cipher
for f in args:
this_editor = VaultEditor(cipher, password, f)
this_editor.encrypt_file()
print "Encryption successful"
def execute_rekey(args, options, parser):
if not options.password_file:
password, __ = utils.ask_vault_passwords(ask_vault_pass=True)
else:
password = utils.read_vault_file(options.password_file)
__, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
cipher = None
for f in args:
this_editor = VaultEditor(cipher, password, f)
this_editor.rekey_file(new_password)
print "Rekey successful"
#-------------------------------------------------------------------------------------
# MAIN
#-------------------------------------------------------------------------------------
def main():
action = get_action(sys.argv)
parser = build_option_parser(action)
(options, args) = parser.parse_args()
if not len(args):
raise errors.AnsibleError(
"The '%s' command requires a filename as the first argument" % action
)
# execute the desired action
try:
fn = globals()["execute_%s" % action]
fn(args, options, parser)
except Exception, err:
if options.debug:
print traceback.format_exc()
print "ERROR:",err
sys.exit(1)
if __name__ == "__main__":
main()

@ -14,5 +14,9 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__version__ = '2.0.0'
__author__ = 'Michael DeHaan' # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
__version__ = '2.0'

@ -15,10 +15,15 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os import os
import pwd import pwd
import sys import sys
import ConfigParser
from six.moves import configparser
from string import ascii_letters, digits from string import ascii_letters, digits
# copied from utils, avoid circular reference fun :) # copied from utils, avoid circular reference fun :)
@ -35,13 +40,15 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
''' return a configuration variable with casting ''' ''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default) value = _get_config(p, section, key, env_var, default)
if boolean: if boolean:
return mk_boolean(value) value = mk_boolean(value)
if value and integer: if value:
return int(value) if integer:
if value and floating: value = int(value)
return float(value) elif floating:
if value and islist: value = float(value)
return [x.strip() for x in value.split(',')] elif islist:
if isinstance(value, basestring):
value = [x.strip() for x in value.split(',')]
return value return value
def _get_config(p, section, key, env_var, default): def _get_config(p, section, key, env_var, default):
@ -60,7 +67,7 @@ def _get_config(p, section, key, env_var, default):
def load_config_file(): def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = ConfigParser.ConfigParser() p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None) path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None: if path0 is not None:
@ -73,8 +80,8 @@ def load_config_file():
if path is not None and os.path.exists(path): if path is not None and os.path.exists(path):
try: try:
p.read(path) p.read(path)
except ConfigParser.Error as e: except configparser.Error as e:
print "Error reading config file: \n%s" % e print("Error reading config file: \n{0}".format(e))
sys.exit(1) sys.exit(1)
return p return p
return None return None
@ -98,7 +105,8 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
DEFAULTS='defaults' DEFAULTS='defaults'
# configurable things # configurable things
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts'))) DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
@ -112,6 +120,7 @@ DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
@ -122,7 +131,6 @@ DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None,
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
@ -141,7 +149,7 @@ BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None) DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# need to rethink impementing these 2 # need to rethink impementing these 2
DEFAULT_BECOME_EXE = None DEFAULT_BECOME_EXE = None
@ -156,6 +164,7 @@ DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', '
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
@ -173,8 +182,8 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
@ -196,10 +205,16 @@ ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_fi
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True)
# characters included in auto-generated passwords # characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things # non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
DEFAULT_BECOME_PASS = None DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None DEFAULT_REMOTE_PASS = None

@ -16,36 +16,44 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
############################################# #############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch import fnmatch
import os import os
import sys import sys
import re import re
import stat
import subprocess import subprocess
import ansible.constants as C from ansible import constants as C
from ansible.errors import *
from ansible.inventory.ini import InventoryParser from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group from ansible.inventory.group import Group
from ansible.inventory.host import Host from ansible.inventory.host import Host
from ansible import errors from ansible.plugins import vars_loader
from ansible import utils from ansible.utils.path import is_executable
from ansible.utils.vars import combine_vars
class Inventory(object): class Inventory(object):
""" """
Host inventory for ansible. Host inventory for ansible.
""" """
__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
'_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts # the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded # if a list, inventory data will NOT be loaded
self.host_list = host_list self.host_list = host_list
self._vault_password=vault_password self._loader = loader
self._variable_manager = variable_manager
# caching to avoid repeated calculations, particularly with # caching to avoid repeated calculations, particularly with
# external inventory scripts. # external inventory scripts.
@ -97,7 +105,7 @@ class Inventory(object):
if os.path.isdir(host_list): if os.path.isdir(host_list):
# Ensure basedir is inside the directory # Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "") self.host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(filename=host_list) self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values() self.groups = self.parser.groups.values()
else: else:
# check to see if the specified file starts with a # check to see if the specified file starts with a
@ -113,9 +121,9 @@ class Inventory(object):
except: except:
pass pass
if utils.is_executable(host_list): if is_executable(host_list):
try: try:
self.parser = InventoryScript(filename=host_list) self.parser = InventoryScript(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values() self.groups = self.parser.groups.values()
except: except:
if not shebang_present: if not shebang_present:
@ -134,19 +142,23 @@ class Inventory(object):
else: else:
raise raise
utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True) vars_loader.add_directory(self.basedir(), with_subdir=True)
else: else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?") raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ] self._vars_plugins = [ x for x in vars_loader.all(self) ]
# FIXME: shouldn't be required, since the group/host vars file
# management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins # get group vars from group_vars/ files and vars plugins
for group in self.groups: for group in self.groups:
group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password)) # FIXME: combine_vars
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
# get host vars from host_vars/ files and vars plugins # get host vars from host_vars/ files and vars plugins
for host in self.get_hosts(): for host in self.get_hosts():
host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password)) # FIXME: combine_vars
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
def _match(self, str, pattern_str): def _match(self, str, pattern_str):
@ -192,9 +204,9 @@ class Inventory(object):
# exclude hosts mentioned in any restriction (ex: failed hosts) # exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None: if self._restriction is not None:
hosts = [ h for h in hosts if h.name in self._restriction ] hosts = [ h for h in hosts if h in self._restriction ]
if self._also_restriction is not None: if self._also_restriction is not None:
hosts = [ h for h in hosts if h.name in self._also_restriction ] hosts = [ h for h in hosts if h in self._also_restriction ]
return hosts return hosts
@ -320,6 +332,8 @@ class Inventory(object):
new_host = Host(pattern) new_host = Host(pattern)
new_host.set_variable("ansible_python_interpreter", sys.executable) new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local") new_host.set_variable("ansible_connection", "local")
new_host.ipv4_address = '127.0.0.1'
ungrouped = self.get_group("ungrouped") ungrouped = self.get_group("ungrouped")
if ungrouped is None: if ungrouped is None:
self.add_group(Group('ungrouped')) self.add_group(Group('ungrouped'))
@ -420,7 +434,7 @@ class Inventory(object):
group = self.get_group(groupname) group = self.get_group(groupname)
if group is None: if group is None:
raise errors.AnsibleError("group not found: %s" % groupname) raise Exception("group not found: %s" % groupname)
vars = {} vars = {}
@ -428,19 +442,21 @@ class Inventory(object):
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')] vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results: for updated in vars_results:
if updated is not None: if updated is not None:
vars = utils.combine_vars(vars, updated) # FIXME: combine_vars
vars = combine_vars(vars, updated)
# Read group_vars/ files # Read group_vars/ files
vars = utils.combine_vars(vars, self.get_group_vars(group)) # FIXME: combine_vars
vars = combine_vars(vars, self.get_group_vars(group))
return vars return vars
def get_variables(self, hostname, update_cached=False, vault_password=None): def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname) host = self.get_host(hostname)
if not host: if not host:
raise errors.AnsibleError("host not found: %s" % hostname) raise Exception("host not found: %s" % hostname)
return host.get_variables() return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None): def get_host_variables(self, hostname, update_cached=False, vault_password=None):
@ -460,22 +476,26 @@ class Inventory(object):
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')] vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results: for updated in vars_results:
if updated is not None: if updated is not None:
vars = utils.combine_vars(vars, updated) # FIXME: combine_vars
vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host # plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')] vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results: for updated in vars_results:
if updated is not None: if updated is not None:
vars = utils.combine_vars(vars, updated) # FIXME: combine_vars
vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars # still need to check InventoryParser per host vars
# which actually means InventoryScript per host, # which actually means InventoryScript per host,
# which is not performant # which is not performant
if self.parser is not None: if self.parser is not None:
vars = utils.combine_vars(vars, self.parser.get_host_variables(host)) # FIXME: combine_vars
vars = combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files # Read host_vars/ files
vars = utils.combine_vars(vars, self.get_host_vars(host)) # FIXME: combine_vars
vars = combine_vars(vars, self.get_host_vars(host))
return vars return vars
@ -490,7 +510,7 @@ class Inventory(object):
""" return a list of hostnames for a pattern """ """ return a list of hostnames for a pattern """
result = [ h.name for h in self.get_hosts(pattern) ] result = [ h for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]: if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
result = [pattern] result = [pattern]
return result return result
@ -498,11 +518,7 @@ class Inventory(object):
def list_groups(self): def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x) return sorted([ g.name for g in self.groups ], key=lambda x: x)
# TODO: remove this function def restrict_to_hosts(self, restriction):
def get_restriction(self):
return self._restriction
def restrict_to(self, restriction):
""" """
Restrict list operations to the hosts given in restriction. This is used Restrict list operations to the hosts given in restriction. This is used
to exclude failed hosts in main playbook code, don't use this for other to exclude failed hosts in main playbook code, don't use this for other
@ -544,7 +560,7 @@ class Inventory(object):
results.append(x) results.append(x)
self._subset = results self._subset = results
def lift_restriction(self): def remove_restriction(self):
""" Do not restrict list operations """ """ Do not restrict list operations """
self._restriction = None self._restriction = None
@ -588,10 +604,12 @@ class Inventory(object):
self._playbook_basedir = dir self._playbook_basedir = dir
# get group vars from group_vars/ files # get group vars from group_vars/ files
for group in self.groups: for group in self.groups:
group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) # FIXME: combine_vars
group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
# get host vars from host_vars/ files # get host vars from host_vars/ files
for host in self.get_hosts(): for host in self.get_hosts():
host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) # FIXME: combine_vars
host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
# invalidate cache # invalidate cache
self._vars_per_host = {} self._vars_per_host = {}
self._vars_per_group = {} self._vars_per_group = {}
@ -639,15 +657,15 @@ class Inventory(object):
if _basedir == self._playbook_basedir and scan_pass != 1: if _basedir == self._playbook_basedir and scan_pass != 1:
continue continue
# FIXME: these should go to VariableManager
if group and host is None: if group and host is None:
# load vars in dir/group_vars/name_of_group # load vars in dir/group_vars/name_of_group
base_path = os.path.join(basedir, "group_vars/%s" % group.name) base_path = os.path.join(basedir, "group_vars/%s" % group.name)
results = utils.load_vars(base_path, results, vault_password=self._vault_password) self._variable_manager.add_group_vars_file(base_path, self._loader)
elif host and group is None: elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host # same for hostvars in dir/host_vars/name_of_host
base_path = os.path.join(basedir, "host_vars/%s" % host.name) base_path = os.path.join(basedir, "host_vars/%s" % host.name)
results = utils.load_vars(base_path, results, vault_password=self._vault_password) self._variable_manager.add_host_vars_file(base_path, self._loader)
# all done, results is a dictionary of variables for this particular host. # all done, results is a dictionary of variables for this particular host.
return results return results

@ -17,20 +17,25 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
############################################# #############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os import os
import ansible.constants as C
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.host import Host from ansible.inventory.host import Host
from ansible.inventory.group import Group from ansible.inventory.group import Group
from ansible.inventory.ini import InventoryParser from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript from ansible.inventory.script import InventoryScript
from ansible import utils from ansible.utils.path import is_executable
from ansible import errors from ansible.utils.vars import combine_vars
class InventoryDirectory(object): class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. ''' ''' Host inventory parser for ansible using a directory of inventories. '''
def __init__(self, filename=C.DEFAULT_HOST_LIST): def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
self.names = os.listdir(filename) self.names = os.listdir(filename)
self.names.sort() self.names.sort()
self.directory = filename self.directory = filename
@ -38,10 +43,12 @@ class InventoryDirectory(object):
self.hosts = {} self.hosts = {}
self.groups = {} self.groups = {}
self._loader = loader
for i in self.names: for i in self.names:
# Skip files that end with certain extensions or characters # Skip files that end with certain extensions or characters
if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")): if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")):
continue continue
# Skip hidden files # Skip hidden files
if i.startswith('.') and not i.startswith('./'): if i.startswith('.') and not i.startswith('./'):
@ -51,9 +58,9 @@ class InventoryDirectory(object):
continue continue
fullpath = os.path.join(self.directory, i) fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath): if os.path.isdir(fullpath):
parser = InventoryDirectory(filename=fullpath) parser = InventoryDirectory(loader=loader, filename=fullpath)
elif utils.is_executable(fullpath): elif is_executable(fullpath):
parser = InventoryScript(filename=fullpath) parser = InventoryScript(loader=loader, filename=fullpath)
else: else:
parser = InventoryParser(filename=fullpath) parser = InventoryParser(filename=fullpath)
self.parsers.append(parser) self.parsers.append(parser)
@ -153,7 +160,7 @@ class InventoryDirectory(object):
# name # name
if group.name != newgroup.name: if group.name != newgroup.name:
raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth # depth
group.depth = max([group.depth, newgroup.depth]) group.depth = max([group.depth, newgroup.depth])
@ -196,14 +203,14 @@ class InventoryDirectory(object):
self.groups[newparent.name].add_child_group(group) self.groups[newparent.name].add_child_group(group)
# variables # variables
group.vars = utils.combine_vars(group.vars, newgroup.vars) group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost): def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """ """ Merge all of instance newhost into host """
# name # name
if host.name != newhost.name: if host.name != newhost.name:
raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation # group membership relation
for newgroup in newhost.groups: for newgroup in newhost.groups:
@ -218,7 +225,7 @@ class InventoryDirectory(object):
self.groups[newgroup.name].add_host(host) self.groups[newgroup.name].add_host(host)
# variables # variables
host.vars = utils.combine_vars(host.vars, newhost.vars) host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host): def get_host_variables(self, host):
""" Gets additional host variables from all inventories """ """ Gets additional host variables from all inventories """

@ -30,6 +30,9 @@ expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised. end must be the same as that of beg, else an exception is raised.
''' '''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string import string
from ansible import errors from ansible import errors

@ -14,11 +14,15 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class Group(object): from ansible.utils.debug import debug
class Group:
''' a group of ansible hosts ''' ''' a group of ansible hosts '''
__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None): def __init__(self, name=None):
@ -29,9 +33,49 @@ class Group(object):
self.child_groups = [] self.child_groups = []
self.parent_groups = [] self.parent_groups = []
self._hosts_cache = None self._hosts_cache = None
#self.clear_hosts_cache() #self.clear_hosts_cache()
if self.name is None: #if self.name is None:
raise Exception("group name is required") # raise Exception("group name is required")
def __repr__(self):
return self.get_name()
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def serialize(self):
parent_groups = []
for parent in self.parent_groups:
parent_groups.append(parent.serialize())
result = dict(
name=self.name,
vars=self.vars.copy(),
parent_groups=parent_groups,
depth=self.depth,
)
debug("serializing group, result is: %s" % result)
return result
def deserialize(self, data):
debug("deserializing group, data is: %s" % data)
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
parent_groups = data.get('parent_groups', [])
for parent_data in parent_groups:
g = Group()
g.deserialize(parent_data)
self.parent_groups.append(g)
def get_name(self):
return self.name
def add_child_group(self, group): def add_child_group(self, group):
@ -100,7 +144,7 @@ class Group(object):
hosts.append(mine) hosts.append(mine)
return hosts return hosts
def get_variables(self): def get_vars(self):
return self.vars.copy() return self.vars.copy()
def _get_ancestors(self): def _get_ancestors(self):

@ -15,24 +15,88 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.constants as C # Make coding more python3-ish
from ansible import utils from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class Host(object): from ansible import constants as C
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars
__all__ = ['Host']
class Host:
''' a single ansible host ''' ''' a single ansible host '''
__slots__ = [ 'name', 'vars', 'groups' ] #__slots__ = [ 'name', 'vars', 'groups' ]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def __eq__(self, other):
return self.name == other.name
def serialize(self):
groups = []
for group in self.groups:
groups.append(group.serialize())
return dict(
name=self.name,
vars=self.vars.copy(),
ipv4_address=self.ipv4_address,
ipv6_address=self.ipv6_address,
port=self.port,
gathered_facts=self._gathered_facts,
groups=groups,
)
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.ipv4_address = data.get('ipv4_address', '')
self.ipv6_address = data.get('ipv6_address', '')
self.port = data.get('port')
groups = data.get('groups', [])
for group_data in groups:
g = Group()
g.deserialize(group_data)
self.groups.append(g)
def __init__(self, name=None, port=None): def __init__(self, name=None, port=None):
self.name = name self.name = name
self.vars = {} self.vars = {}
self.groups = [] self.groups = []
self.ipv4_address = name
self.ipv6_address = name
if port and port != C.DEFAULT_REMOTE_PORT: if port and port != C.DEFAULT_REMOTE_PORT:
self.set_variable('ansible_ssh_port', int(port)) self.port = int(port)
else:
self.port = C.DEFAULT_REMOTE_PORT
self._gathered_facts = False
if self.name is None: def __repr__(self):
raise Exception("host name is required") return self.get_name()
def get_name(self):
return self.name
@property
def gathered_facts(self):
return self._gathered_facts
def set_gathered_facts(self, gathered):
self._gathered_facts = gathered
def add_group(self, group): def add_group(self, group):
@ -52,16 +116,15 @@ class Host(object):
groups[a.name] = a groups[a.name] = a
return groups.values() return groups.values()
def get_variables(self): def get_vars(self):
results = {} results = {}
groups = self.get_groups() groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth): for group in sorted(groups, key=lambda g: g.depth):
results = utils.combine_vars(results, group.get_variables()) results = combine_vars(results, group.get_vars())
results = utils.combine_vars(results, self.vars) results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0] results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results return results

@ -16,17 +16,20 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
############################################# #############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ansible.constants as C import ast
import shlex
import re
from ansible import constants as C
from ansible.errors import *
from ansible.inventory.host import Host from ansible.inventory.host import Host
from ansible.inventory.group import Group from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range from ansible.inventory.expand_hosts import expand_hostname_range
from ansible import errors from ansible.utils.unicode import to_unicode
from ansible import utils
import shlex
import re
import ast
class InventoryParser(object): class InventoryParser(object):
""" """
@ -34,9 +37,8 @@ class InventoryParser(object):
""" """
def __init__(self, filename=C.DEFAULT_HOST_LIST): def __init__(self, filename=C.DEFAULT_HOST_LIST):
self.filename = filename
with open(filename) as fh: with open(filename) as fh:
self.filename = filename
self.lines = fh.readlines() self.lines = fh.readlines()
self.groups = {} self.groups = {}
self.hosts = {} self.hosts = {}
@ -54,10 +56,7 @@ class InventoryParser(object):
def _parse_value(v): def _parse_value(v):
if "#" not in v: if "#" not in v:
try: try:
ret = ast.literal_eval(v) v = ast.literal_eval(v)
if not isinstance(ret, float):
# Do not trim floats. Eg: "1.20" to 1.2
return ret
# Using explicit exceptions. # Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it. # Likely a string that literal_eval does not like. We wil then just set it.
except ValueError: except ValueError:
@ -66,7 +65,7 @@ class InventoryParser(object):
except SyntaxError: except SyntaxError:
# Is this a hash with an equals at the end? # Is this a hash with an equals at the end?
pass pass
return v return to_unicode(v, nonstring='passthru', errors='strict')
# [webservers] # [webservers]
# alpha # alpha
@ -91,8 +90,8 @@ class InventoryParser(object):
self.groups = dict(all=all, ungrouped=ungrouped) self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped' active_group_name = 'ungrouped'
for lineno in range(len(self.lines)): for line in self.lines:
line = utils.before_comment(self.lines[lineno]).strip() line = self._before_comment(line).strip()
if line.startswith("[") and line.endswith("]"): if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","") active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line: if ":vars" in line or ":children" in line:
@ -146,8 +145,11 @@ class InventoryParser(object):
try: try:
(k,v) = t.split("=", 1) (k,v) = t.split("=", 1)
except ValueError, e: except ValueError, e:
raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e))) raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e)))
host.set_variable(k, self._parse_value(v)) if k == 'ansible_ssh_host':
host.ipv4_address = self._parse_value(v)
else:
host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host) self.groups[active_group_name].add_host(host)
# [southeast:children] # [southeast:children]
@ -157,8 +159,8 @@ class InventoryParser(object):
def _parse_group_children(self): def _parse_group_children(self):
group = None group = None
for lineno in range(len(self.lines)): for line in self.lines:
line = self.lines[lineno].strip() line = line.strip()
if line is None or line == '': if line is None or line == '':
continue continue
if line.startswith("[") and ":children]" in line: if line.startswith("[") and ":children]" in line:
@ -173,7 +175,7 @@ class InventoryParser(object):
elif group: elif group:
kid_group = self.groups.get(line, None) kid_group = self.groups.get(line, None)
if kid_group is None: if kid_group is None:
raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line)) raise AnsibleError("child group is not defined: (%s)" % line)
else: else:
group.add_child_group(kid_group) group.add_child_group(kid_group)
@ -184,13 +186,13 @@ class InventoryParser(object):
def _parse_group_variables(self): def _parse_group_variables(self):
group = None group = None
for lineno in range(len(self.lines)): for line in self.lines:
line = self.lines[lineno].strip() line = line.strip()
if line.startswith("[") and ":vars]" in line: if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","") line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None) group = self.groups.get(line, None)
if group is None: if group is None:
raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line)) raise AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"): elif line.startswith("#") or line.startswith(";"):
pass pass
elif line.startswith("["): elif line.startswith("["):
@ -199,10 +201,18 @@ class InventoryParser(object):
pass pass
elif group: elif group:
if "=" not in line: if "=" not in line:
raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1)) raise AnsibleError("variables assigned to group must be in key=value form")
else: else:
(k, v) = [e.strip() for e in line.split("=", 1)] (k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v)) group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host): def get_host_variables(self, host):
return {} return {}
def _before_comment(self, msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg

@ -16,22 +16,26 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
############################################# #############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os import os
import subprocess import subprocess
import ansible.constants as C import sys
from ansible import constants as C
from ansible.errors import *
from ansible.inventory.host import Host from ansible.inventory.host import Host
from ansible.inventory.group import Group from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode from ansible.module_utils.basic import json_dict_bytes_to_unicode
from ansible import utils
from ansible import errors
import sys
class InventoryScript(object): class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. ''' ''' Host inventory parser for ansible using external inventory scripts. '''
def __init__(self, filename=C.DEFAULT_HOST_LIST): def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
self._loader = loader
# Support inventory scripts that are not prefixed with some # Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working # path information but happen to be in the current working
@ -41,11 +45,11 @@ class InventoryScript(object):
try: try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e: except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate() (stdout, stderr) = sp.communicate()
if sp.returncode != 0: if sp.returncode != 0:
raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout self.data = stdout
# see comment about _meta below # see comment about _meta below
@ -58,7 +62,7 @@ class InventoryScript(object):
all_hosts = {} all_hosts = {}
# not passing from_remote because data from CMDB is trusted # not passing from_remote because data from CMDB is trusted
self.raw = utils.parse_json(self.data) self.raw = self._loader.load(self.data)
self.raw = json_dict_bytes_to_unicode(self.raw) self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all') all = Group('all')
@ -68,7 +72,7 @@ class InventoryScript(object):
if 'failed' in self.raw: if 'failed' in self.raw:
sys.stderr.write(err + "\n") sys.stderr.write(err + "\n")
raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
for (group_name, data) in self.raw.items(): for (group_name, data) in self.raw.items():
@ -92,12 +96,12 @@ class InventoryScript(object):
if not isinstance(data, dict): if not isinstance(data, dict):
data = {'hosts': data} data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars # is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts','vars','children')): elif not any(k in data for k in ('hosts','vars')):
data = {'hosts': [group_name], 'vars': data} data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data: if 'hosts' in data:
if not isinstance(data['hosts'], list): if not isinstance(data['hosts'], list):
raise errors.AnsibleError("You defined a group \"%s\" with bad " raise AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data)) "data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']: for hostname in data['hosts']:
@ -108,7 +112,7 @@ class InventoryScript(object):
if 'vars' in data: if 'vars' in data:
if not isinstance(data['vars'], dict): if not isinstance(data['vars'], dict):
raise errors.AnsibleError("You defined a group \"%s\" with bad " raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data)) "data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems(): for k, v in data['vars'].iteritems():
@ -143,12 +147,12 @@ class InventoryScript(object):
try: try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e: except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate() (out, err) = sp.communicate()
if out.strip() == '': if out.strip() == '':
return dict() return dict()
try: try:
return json_dict_bytes_to_unicode(utils.parse_json(out)) return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError: except ValueError:
raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))

@ -15,6 +15,8 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class VarsModule(object): class VarsModule(object):

@ -43,7 +43,7 @@ BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# can be inserted in any module source automatically by including # can be inserted in any module source automatically by including
# #<<INCLUDE_ANSIBLE_MODULE_COMMON>> on a blank line by itself inside # #<<INCLUDE_ANSIBLE_MODULE_COMMON>> on a blank line by itself inside
# of an ansible module. The source of this common code lives # of an ansible module. The source of this common code lives
# in lib/ansible/module_common.py # in ansible/executor/module_common.py
import locale import locale
import os import os
@ -65,6 +65,7 @@ import pwd
import platform import platform
import errno import errno
import tempfile import tempfile
from itertools import imap, repeat
try: try:
import json import json
@ -234,7 +235,7 @@ def load_platform_subclass(cls, *args, **kwargs):
return super(cls, subclass).__new__(subclass) return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d): def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str ''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples, Specialized for json return because this only handles, lists, tuples,
@ -242,17 +243,17 @@ def json_dict_unicode_to_bytes(d):
''' '''
if isinstance(d, unicode): if isinstance(d, unicode):
return d.encode('utf-8') return d.encode(encoding)
elif isinstance(d, dict): elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, d.iteritems())) return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding)))
elif isinstance(d, list): elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d)) return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple): elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d)) return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else: else:
return d return d
def json_dict_bytes_to_unicode(d): def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str ''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples, Specialized for json return because this only handles, lists, tuples,
@ -260,13 +261,13 @@ def json_dict_bytes_to_unicode(d):
''' '''
if isinstance(d, str): if isinstance(d, str):
return unicode(d, 'utf-8') return unicode(d, encoding)
elif isinstance(d, dict): elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, d.iteritems())) return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding)))
elif isinstance(d, list): elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d)) return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple): elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d)) return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else: else:
return d return d
@ -359,9 +360,9 @@ class AnsibleModule(object):
# reset to LANG=C if it's an invalid/unavailable locale # reset to LANG=C if it's an invalid/unavailable locale
self._check_locale() self._check_locale()
(self.params, self.args) = self._load_params() self.params = self._load_params()
self._legal_inputs = ['CHECKMODE', 'NO_LOG'] self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log']
self.aliases = self._handle_aliases() self.aliases = self._handle_aliases()
@ -888,7 +889,7 @@ class AnsibleModule(object):
def _check_for_check_mode(self): def _check_for_check_mode(self):
for (k,v) in self.params.iteritems(): for (k,v) in self.params.iteritems():
if k == 'CHECKMODE': if k == '_ansible_check_mode':
if not self.supports_check_mode: if not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module does not support check mode") self.exit_json(skipped=True, msg="remote module does not support check mode")
if self.supports_check_mode: if self.supports_check_mode:
@ -896,13 +897,13 @@ class AnsibleModule(object):
def _check_for_no_log(self): def _check_for_no_log(self):
for (k,v) in self.params.iteritems(): for (k,v) in self.params.iteritems():
if k == 'NO_LOG': if k == '_ansible_no_log':
self.no_log = self.boolean(v) self.no_log = self.boolean(v)
def _check_invalid_arguments(self): def _check_invalid_arguments(self):
for (k,v) in self.params.iteritems(): for (k,v) in self.params.iteritems():
# these should be in legal inputs already # these should be in legal inputs already
#if k in ('CHECKMODE', 'NO_LOG'): #if k in ('_ansible_check_mode', '_ansible_no_log'):
# continue # continue
if k not in self._legal_inputs: if k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k) self.fail_json(msg="unsupported parameter for module: %s" % k)
@ -1075,20 +1076,11 @@ class AnsibleModule(object):
def _load_params(self): def _load_params(self):
''' read the input and return a dictionary and the arguments string ''' ''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
items = shlex.split(args) if params is None:
params = {} params = dict()
for x in items: return params
try:
(k, v) = x.split("=",1)
except Exception, e:
self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
if k in params:
self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
params[k] = v
params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
params2.update(params)
return (params2, args)
def _log_invocation(self): def _log_invocation(self):
''' log that ansible ran the module ''' ''' log that ansible ran the module '''
@ -1209,13 +1201,17 @@ class AnsibleModule(object):
self.fail_json(msg='Boolean %s not in either boolean list' % arg) self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data): def jsonify(self, data):
for encoding in ("utf-8", "latin-1", "unicode_escape"): for encoding in ("utf-8", "latin-1"):
try: try:
return json.dumps(data, encoding=encoding) return json.dumps(data, encoding=encoding)
# Old systems using simplejson module does not support encoding keyword. # Old systems using old simplejson module does not support encoding keyword.
except TypeError, e: except TypeError:
return json.dumps(data) try:
except UnicodeDecodeError, e: new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data)
except UnicodeDecodeError:
continue continue
self.fail_json(msg='Invalid unicode encoding encountered') self.fail_json(msg='Invalid unicode encoding encountered')
@ -1452,7 +1448,7 @@ class AnsibleModule(object):
msg = None msg = None
st_in = None st_in = None
# Set a temporart env path if a prefix is passed # Set a temporary env path if a prefix is passed
env=os.environ env=os.environ
if path_prefix: if path_prefix:
env['PATH']="%s:%s" % (path_prefix, env['PATH']) env['PATH']="%s:%s" % (path_prefix, env['PATH'])

@ -142,14 +142,14 @@ Function ConvertTo-Bool
return return
} }
# Helper function to calculate a hash of a file in a way which powershell 3 # Helper function to calculate md5 of a file in a way which powershell 3
# and above can handle: # and above can handle:
Function Get-FileChecksum($path) Function Get-FileMd5($path)
{ {
$hash = "" $hash = ""
If (Test-Path -PathType Leaf $path) If (Test-Path -PathType Leaf $path)
{ {
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
$fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose(); $fp.Dispose();

@ -0,0 +1,20 @@
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type

@ -1 +0,0 @@
Subproject commit 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd0

@ -1 +0,0 @@
Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc

@ -15,860 +15,71 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.inventory # Make coding more python3-ish
import ansible.constants as C from __future__ import (absolute_import, division, print_function)
import ansible.runner __metaclass__ = type
from ansible.utils.template import template
from ansible import utils
from ansible import errors
from ansible.module_utils.splitter import split_args, unquote
import ansible.callbacks
import ansible.cache
import os
import shlex
import collections
from play import Play
import StringIO
import pipes
# the setup cache stores all variables about a host
# gathered during the setup step, while the vars cache
# holds all other variables about a host
SETUP_CACHE = ansible.cache.FactCache()
VARS_CACHE = collections.defaultdict(dict)
RESERVED_TAGS = ['all','tagged','untagged','always']
class PlayBook(object):
'''
runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based
set of commands to run in series.
multiple plays/tasks do not execute simultaneously, but tasks in each
pattern do execute in parallel (according to the number of forks
requested) among the hosts they address
'''
# *****************************************************
def __init__(self,
playbook = None,
host_list = C.DEFAULT_HOST_LIST,
module_path = None,
forks = C.DEFAULT_FORKS,
timeout = C.DEFAULT_TIMEOUT,
remote_user = C.DEFAULT_REMOTE_USER,
remote_pass = C.DEFAULT_REMOTE_PASS,
remote_port = None,
transport = C.DEFAULT_TRANSPORT,
private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
callbacks = None,
runner_callbacks = None,
stats = None,
extra_vars = None,
only_tags = None,
skip_tags = None,
subset = C.DEFAULT_SUBSET,
inventory = None,
check = False,
diff = False,
any_errors_fatal = False,
vault_password = False,
force_handlers = False,
# privilege escalation
become = C.DEFAULT_BECOME,
become_method = C.DEFAULT_BECOME_METHOD,
become_user = C.DEFAULT_BECOME_USER,
become_pass = None,
):
"""
playbook: path to a playbook file
host_list: path to a file like /etc/ansible/hosts
module_path: path to ansible modules, like /usr/share/ansible/
forks: desired level of parallelism
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH keys
remote_port: default remote port to use if not specified with the host or play
transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occurring to each host
inventory: can be specified instead of host_list to use a pre-existing inventory object
check: don't change anything, just try to detect some potential changes
any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
force_handlers: continue to notify and run handlers even if a task fails
"""
self.SETUP_CACHE = SETUP_CACHE
self.VARS_CACHE = VARS_CACHE
arguments = []
if playbook is None:
arguments.append('playbook')
if callbacks is None:
arguments.append('callbacks')
if runner_callbacks is None:
arguments.append('runner_callbacks')
if stats is None:
arguments.append('stats')
if arguments:
raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
if extra_vars is None:
extra_vars = {}
if only_tags is None:
only_tags = [ 'all' ]
if skip_tags is None:
skip_tags = []
self.check = check
self.diff = diff
self.module_path = module_path
self.forks = forks
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.transport = transport
self.callbacks = callbacks
self.runner_callbacks = runner_callbacks
self.stats = stats
self.extra_vars = extra_vars
self.global_vars = {}
self.private_key_file = private_key_file
self.only_tags = only_tags
self.skip_tags = skip_tags
self.any_errors_fatal = any_errors_fatal
self.vault_password = vault_password
self.force_handlers = force_handlers
self.become = become
self.become_method = become_method
self.become_user = become_user
self.become_pass = become_pass
self.callbacks.playbook = self
self.runner_callbacks.playbook = self
if inventory is None:
self.inventory = ansible.inventory.Inventory(host_list)
self.inventory.subset(subset)
else:
self.inventory = inventory
if self.module_path is not None:
utils.plugins.module_finder.add_directory(self.module_path)
self.basedir = os.path.dirname(playbook) or '.'
utils.plugins.push_basedir(self.basedir)
# let inventory know the playbook basedir so it can load more vars
self.inventory.set_playbook_basedir(self.basedir)
vars = extra_vars.copy()
vars['playbook_dir'] = os.path.abspath(self.basedir)
if self.inventory.basedir() is not None:
vars['inventory_dir'] = self.inventory.basedir()
if self.inventory.src() is not None:
vars['inventory_file'] = self.inventory.src()
self.filename = playbook
(self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
ansible.callbacks.load_callback_plugins()
ansible.callbacks.set_playbook(self.callbacks, self)
self._ansible_version = utils.version_info(gitinfo=True)
# *****************************************************
def _get_playbook_vars(self, play_ds, existing_vars):
'''
Gets the vars specified with the play and blends them
with any existing vars that have already been read in
'''
new_vars = existing_vars.copy()
if 'vars' in play_ds:
if isinstance(play_ds['vars'], dict):
new_vars.update(play_ds['vars'])
elif isinstance(play_ds['vars'], list):
for v in play_ds['vars']:
new_vars.update(v)
return new_vars
# *****************************************************
def _get_include_info(self, play_ds, basedir, existing_vars={}):
'''
Gets any key=value pairs specified with the included file
name and returns the merged vars along with the path
'''
new_vars = existing_vars.copy()
tokens = split_args(play_ds.get('include', ''))
for t in tokens[1:]:
try:
(k,v) = unquote(t).split("=", 1)
new_vars[k] = template(basedir, v, new_vars)
except ValueError, e:
raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
return (new_vars, unquote(tokens[0]))
# *****************************************************
def _get_playbook_vars_files(self, play_ds, existing_vars_files):
new_vars_files = list(existing_vars_files)
if 'vars_files' in play_ds:
new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
return new_vars_files
# *****************************************************
def _extend_play_vars(self, play, vars={}):
'''
Extends the given play's variables with the additional specified vars.
'''
if 'vars' not in play or not play['vars']:
# someone left out or put an empty "vars:" entry in their playbook
return vars.copy()
play_vars = None
if isinstance(play['vars'], dict):
play_vars = play['vars'].copy()
play_vars.update(vars)
elif isinstance(play['vars'], list):
# nobody should really do this, but handle vars: a=1 b=2
play_vars = play['vars'][:]
play_vars.extend([{k:v} for k,v in vars.iteritems()])
return play_vars
# *****************************************************
def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
'''
run top level error checking on playbooks and allow them to include other playbooks.
'''
playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
accumulated_plays = []
play_basedirs = []
if type(playbook_data) != list:
raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
basedir = os.path.dirname(path) or '.'
utils.plugins.push_basedir(basedir)
for play in playbook_data:
if type(play) != dict:
raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
if 'include' in play:
# a playbook (list of plays) decided to include some other list of plays
# from another file. The result is a flat list of plays in the end.
play_vars = self._get_playbook_vars(play, vars)
play_vars_files = self._get_playbook_vars_files(play, vars_files)
inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
play_vars.update(inc_vars)
included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
(plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
for p in plays:
# support for parameterized play includes works by passing
# those variables along to the subservient play
p['vars'] = self._extend_play_vars(p, play_vars)
# now add in the vars_files
p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
accumulated_plays.extend(plays)
play_basedirs.extend(basedirs)
else:
# this is a normal (non-included play)
accumulated_plays.append(play)
play_basedirs.append(basedir)
return (accumulated_plays, play_basedirs)
# *****************************************************
def run(self):
''' run all patterns in the playbook '''
plays = []
matched_tags_all = set()
unmatched_tags_all = set()
# loop through all patterns and run them
self.callbacks.on_start()
for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
assert play is not None
matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
matched_tags_all = matched_tags_all | matched_tags
unmatched_tags_all = unmatched_tags_all | unmatched_tags
# Remove tasks we wish to skip
matched_tags = matched_tags - set(self.skip_tags)
# if we have matched_tags, the play must be run.
# if the play contains no tasks, assume we just want to gather facts
# in this case there are actually 3 meta tasks (handler flushes) not 0
# tasks, so that's why there's a check against 3
if (len(matched_tags) > 0 or len(play.tasks()) == 3):
plays.append(play)
# if the playbook is invoked with --tags or --skip-tags that don't
# exist at all in the playbooks then we need to raise an error so that
# the user can correct the arguments.
unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
(matched_tags_all | unmatched_tags_all))
for t in RESERVED_TAGS:
unknown_tags.discard(t)
if len(unknown_tags) > 0:
for t in RESERVED_TAGS:
unmatched_tags_all.discard(t)
msg = 'tag(s) not found in playbook: %s. possible values: %s'
unknown = ','.join(sorted(unknown_tags))
unmatched = ','.join(sorted(unmatched_tags_all))
raise errors.AnsibleError(msg % (unknown, unmatched))
for play in plays:
ansible.callbacks.set_play(self.callbacks, play)
ansible.callbacks.set_play(self.runner_callbacks, play)
if not self._run_play(play):
break
ansible.callbacks.set_play(self.callbacks, None)
ansible.callbacks.set_play(self.runner_callbacks, None)
# summarize the results
results = {}
for host in self.stats.processed.keys():
results[host] = self.stats.summarize(host)
return results
# *****************************************************
def _async_poll(self, poller, async_seconds, async_poll_interval):
''' launch an async job, if poll_interval is set, wait for completion '''
results = poller.wait(async_seconds, async_poll_interval)
# mark any hosts that are still listed as started as failed
# since these likely got killed by async_wrapper
for host in poller.hosts_to_poll:
reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
results['contacted'][host] = reason
return results
# *****************************************************
def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
''' returns a list of hosts that haven't failed and aren't dark '''
return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
# *****************************************************
def _run_task_internal(self, task, include_failed=False):
''' run a particular module step in a playbook '''
hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
self.inventory.restrict_to(hosts)
runner = ansible.runner.Runner( import os
pattern=task.play.hosts,
inventory=self.inventory,
module_name=task.module_name,
module_args=task.module_args,
forks=self.forks,
remote_pass=self.remote_pass,
module_path=self.module_path,
timeout=self.timeout,
remote_user=task.remote_user,
remote_port=task.play.remote_port,
module_vars=task.module_vars,
play_vars=task.play_vars,
play_file_vars=task.play_file_vars,
role_vars=task.role_vars,
role_params=task.role_params,
default_vars=task.default_vars,
extra_vars=self.extra_vars,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
basedir=task.play.basedir,
conditional=task.when,
callbacks=self.runner_callbacks,
transport=task.transport,
is_playbook=True,
check=self.check,
diff=self.diff,
environment=task.environment,
complex_args=task.args,
accelerate=task.play.accelerate,
accelerate_port=task.play.accelerate_port,
accelerate_ipv6=task.play.accelerate_ipv6,
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
vault_pass = self.vault_password,
run_hosts=hosts,
no_log=task.no_log,
run_once=task.run_once,
become=task.become,
become_method=task.become_method,
become_user=task.become_user,
become_pass=task.become_pass,
)
runner.module_vars.update({'play_hosts': hosts})
runner.module_vars.update({'ansible_version': self._ansible_version})
if task.async_seconds == 0:
results = runner.run()
else:
results, poller = runner.run_async(task.async_seconds)
self.stats.compute(results)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
else:
for (host, res) in results.get('contacted', {}).iteritems():
self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
contacted = results.get('contacted',{})
dark = results.get('dark', {})
self.inventory.lift_restriction()
if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
return None
return results
# *****************************************************
def _run_task(self, play, task, is_handler):
''' run a single task in the playbook and recursively run any subtasks. '''
ansible.callbacks.set_task(self.callbacks, task)
ansible.callbacks.set_task(self.runner_callbacks, task)
if task.role_name:
name = '%s | %s' % (task.role_name, task.name)
else:
name = task.name
try:
# v1 HACK: we don't have enough information to template many names
# at this point. Rather than making this work for all cases in
# v1, just make this degrade gracefully. Will fix in v2
name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
except:
pass
self.callbacks.on_task_start(name, is_handler)
if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
return True
# template ignore_errors
# TODO: Is this needed here? cond is templated again in
# check_conditional after some more manipulations.
# TODO: we don't have enough information here to template cond either
# (see note on templating name above)
cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
# load up an appropriate ansible runner to run the task in parallel
include_failed = is_handler and play.force_handlers
results = self._run_task_internal(task, include_failed=include_failed)
# if no hosts are matched, carry on
hosts_remaining = True
if results is None:
hosts_remaining = False
results = {}
contacted = results.get('contacted', {})
self.stats.compute(results, ignore_errors=task.ignore_errors)
def _register_play_vars(host, result):
# when 'register' is used, persist the result in the vars cache
# rather than the setup cache - vars should be transient between
# playbook executions
if 'stdout' in result and 'stdout_lines' not in result:
result['stdout_lines'] = result['stdout'].splitlines()
utils.update_hash(self.VARS_CACHE, host, {task.register: result})
def _save_play_facts(host, facts):
# saves play facts in SETUP_CACHE, unless the module executed was
# set_fact, in which case we add them to the VARS_CACHE
if task.module_name in ('set_fact', 'include_vars'):
utils.update_hash(self.VARS_CACHE, host, facts)
else:
utils.update_hash(self.SETUP_CACHE, host, facts)
# add facts to the global setup cache
for host, result in contacted.iteritems():
if 'results' in result:
# task ran with_ lookup plugin, so facts are encapsulated in
# multiple list items in the results key
for res in result['results']:
if type(res) == dict:
facts = res.get('ansible_facts', {})
_save_play_facts(host, facts)
else:
# when facts are returned, persist them in the setup cache
facts = result.get('ansible_facts', {})
_save_play_facts(host, facts)
# if requested, save the result into the registered variable name
if task.register:
_register_play_vars(host, result)
# also have to register some failed, but ignored, tasks
if task.ignore_errors and task.register:
failed = results.get('failed', {})
for host, result in failed.iteritems():
_register_play_vars(host, result)
# flag which notify handlers need to be run
if len(task.notify) > 0:
for host, results in results.get('contacted',{}).iteritems():
if results.get('changed', False):
for handler_name in task.notify:
self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
return hosts_remaining
# *****************************************************
def _flag_handler(self, play, handler_name, host):
'''
if a task has any notify elements, flag handlers for run
at end of execution cycle for hosts that have indicated
changes have been made
'''
found = False
for x in play.handlers():
if handler_name == template(play.basedir, x.name, x.module_vars):
found = True
self.callbacks.on_notify(host, x.name)
x.notified_by.append(host)
if not found:
raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
# *****************************************************
def _do_setup_step(self, play):
''' get facts from the remote system '''
host_list = self._trim_unavailable_hosts(play._play_hosts)
if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
if len(host_list) == 0:
return {}
elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
return {}
self.callbacks.on_setup()
self.inventory.restrict_to(host_list)
ansible.callbacks.set_task(self.callbacks, None)
ansible.callbacks.set_task(self.runner_callbacks, None)
# push any variables down to the system
setup_results = ansible.runner.Runner(
basedir=self.basedir,
pattern=play.hosts,
module_name='setup',
module_args={},
inventory=self.inventory,
forks=self.forks,
module_path=self.module_path,
timeout=self.timeout,
remote_user=play.remote_user,
remote_pass=self.remote_pass,
remote_port=play.remote_port,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
callbacks=self.runner_callbacks,
become=play.become,
become_method=play.become_method,
become_user=play.become_user,
become_pass=self.become_pass,
vault_pass=self.vault_password,
transport=play.transport,
is_playbook=True,
module_vars=play.vars,
play_vars=play.vars,
play_file_vars=play.vars_file_vars,
role_vars=play.role_vars,
default_vars=play.default_vars,
check=self.check,
diff=self.diff,
accelerate=play.accelerate,
accelerate_port=play.accelerate_port,
).run()
self.stats.compute(setup_results, setup=True)
self.inventory.lift_restriction()
# now for each result, load into the setup cache so we can
# let runner template out future commands
setup_ok = setup_results.get('contacted', {})
for (host, result) in setup_ok.iteritems():
utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
return setup_results
# *****************************************************
def generate_retry_inventory(self, replay_hosts):
'''
called by /usr/bin/ansible when a playbook run fails. It generates an inventory
that allows re-running on ONLY the failed hosts. This may duplicate some
variable information in group_vars/host_vars but that is ok, and expected.
'''
buf = StringIO.StringIO()
for x in replay_hosts:
buf.write("%s\n" % x)
basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
filename = "%s.retry" % os.path.basename(self.filename)
filename = filename.replace(".yml","")
filename = os.path.join(basedir, filename)
try:
if not os.path.exists(basedir):
os.makedirs(basedir)
fd = open(filename, 'w')
fd.write(buf.getvalue())
fd.close()
except:
ansible.callbacks.display(
"\nERROR: could not create retry file. Check the value of \n"
+ "the configuration variable 'retry_files_save_path' or set \n"
+ "'retry_files_enabled' to False to avoid this message.\n",
color='red'
)
return None
return filename
# *****************************************************
def tasks_to_run_in_play(self, play):
tasks = []
for task in play.tasks():
# only run the task if the requested tags match or has 'always' tag
u = set(['untagged'])
task_set = set(task.tags)
if 'always' in task.tags:
should_run = True
else:
if 'all' in self.only_tags:
should_run = True
else:
should_run = False
if 'tagged' in self.only_tags:
if task_set != u:
should_run = True
elif 'untagged' in self.only_tags:
if task_set == u:
should_run = True
else:
if task_set.intersection(self.only_tags):
should_run = True
# Check for tags that we need to skip
if 'all' in self.skip_tags:
should_run = False
else:
if 'tagged' in self.skip_tags:
if task_set != u:
should_run = False
elif 'untagged' in self.skip_tags:
if task_set == u:
should_run = False
else:
if should_run:
if task_set.intersection(self.skip_tags):
should_run = False
if should_run:
tasks.append(task)
return tasks from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins import push_basedir
# *****************************************************
def _run_play(self, play):
''' run a list of tasks for a given pattern, in order '''
self.callbacks.on_play_start(play.name) __all__ = ['Playbook']
# Get the hosts for this play
play._play_hosts = self.inventory.list_hosts(play.hosts)
# if no hosts matches this play, drop out
if not play._play_hosts:
self.callbacks.on_no_hosts_matched()
return True
# get facts from system
self._do_setup_step(play)
# now with that data, handle contentional variable file imports! class Playbook:
all_hosts = self._trim_unavailable_hosts(play._play_hosts)
play.update_vars_files(all_hosts, vault_password=self.vault_password)
hosts_count = len(all_hosts)
if play.serial.endswith("%"): def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = os.getcwd()
self._loader = loader
# This is a percentage, so calculate it based on the @staticmethod
# number of hosts def load(file_name, variable_manager=None, loader=None):
serial_pct = int(play.serial.replace("%","")) pb = Playbook(loader=loader)
serial = int((serial_pct/100.0) * len(all_hosts)) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
# Ensure that no matter how small the percentage, serial def _load_playbook_data(self, file_name, variable_manager):
# can never fall below 1, so that things actually happen
serial = max(serial, 1)
else:
serial = int(play.serial)
serialized_batch = [] if os.path.isabs(file_name):
if serial <= 0: self._basedir = os.path.dirname(file_name)
serialized_batch = [all_hosts]
else: else:
# do N forks all the way through before moving to next self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
while len(all_hosts) > 0:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batch.append(play_hosts)
task_errors = False
for on_hosts in serialized_batch:
# restrict the play to just the hosts we have in our on_hosts block that are
# available.
play._play_hosts = self._trim_unavailable_hosts(on_hosts)
self.inventory.also_restrict_to(on_hosts)
for task in self.tasks_to_run_in_play(play):
if task.meta is not None:
# meta tasks can force handlers to run mid-play
if task.meta == 'flush_handlers':
self.run_handlers(play)
# skip calling the handler till the play is finished
continue
if not self._run_task(play, task, False):
# whether no hosts matched is fatal or not depends if it was on the initial step.
# if we got exactly no hosts on the first step (setup!) then the host group
# just didn't match anything and that's ok
return False
# Get a new list of what hosts are left as available, the ones that # set the loaders basedir
# did not go fail/dark during the task self._loader.set_basedir(self._basedir)
host_list = self._trim_unavailable_hosts(play._play_hosts)
# Set max_fail_pct to 0, So if any hosts fails, bail out # also add the basedir to the list of module directories
if task.any_errors_fatal and len(host_list) < hosts_count: push_basedir(self._basedir)
play.max_fail_pct = 0
# If threshold for max nodes failed is exceeded, bail out. ds = self._loader.load_from_file(os.path.basename(file_name))
if play.serial > 0: if not isinstance(ds, list):
# if serial is set, we need to shorten the size of host_count raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
play_count = len(play._play_hosts)
if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
host_list = None
else:
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
# if no hosts remain, drop out # Parse the playbook entries. For plays, we simply parse them
if not host_list: # using the Play() object, and includes are parsed using the
if play.force_handlers: # PlaybookInclude() object
task_errors = True for entry in ds:
break if not isinstance(entry, dict):
else: raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
self.callbacks.on_no_hosts_remaining()
return False
# lift restrictions after each play finishes if 'include' in entry:
self.inventory.lift_also_restriction() pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
self._entries.extend(pb._entries)
if task_errors and not play.force_handlers:
# if there were failed tasks and handler execution
# is not forced, quit the play with an error
return False
else: else:
# no errors, go ahead and execute all handlers entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
if not self.run_handlers(play): self._entries.append(entry_obj)
return False
return True
def run_handlers(self, play):
on_hosts = play._play_hosts
hosts_count = len(on_hosts)
for task in play.tasks():
if task.meta is not None:
fired_names = {}
for handler in play.handlers():
if len(handler.notified_by) > 0:
self.inventory.restrict_to(handler.notified_by)
# Resolve the variables first
handler_name = template(play.basedir, handler.name, handler.module_vars)
if handler_name not in fired_names:
self._run_task(play, handler, True)
# prevent duplicate handler includes from running more than once
fired_names[handler_name] = 1
host_list = self._trim_unavailable_hosts(play._play_hosts)
if handler.any_errors_fatal and len(host_list) < hosts_count:
play.max_fail_pct = 0
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
if not host_list and not play.force_handlers:
self.callbacks.on_no_hosts_remaining()
return False
self.inventory.lift_restriction()
new_list = handler.notified_by[:]
for host in handler.notified_by:
if host in on_hosts:
while host in new_list:
new_list.remove(host)
handler.notified_by = new_list
continue def get_loader(self):
return self._loader
return True def get_plays(self):
return self._entries[:]

File diff suppressed because it is too large Load Diff

@ -15,332 +15,296 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import errors # Make coding more python3-ish
from ansible import utils from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.splitter import split_args __metaclass__ = type
import os
import ansible.utils.template as template from ansible.errors import AnsibleError
import sys
from ansible.parsing.mod_args import ModuleArgsParser
class Task(object): from ansible.parsing.splitter import parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
_t_common = [
'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass', from ansible.plugins import module_loader, lookup_loader
'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log', from ansible.playbook.attribute import Attribute, FieldAttribute
'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user', from ansible.playbook.base import Base
'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when', from ansible.playbook.become import Become
] from ansible.playbook.block import Block
from ansible.playbook.conditional import Conditional
__slots__ = [ from ansible.playbook.role import Role
'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file', from ansible.playbook.taggable import Taggable
'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars', __all__ = ['Task']
] + _t_common
class Task(Base, Conditional, Taggable, Become):
# to prevent typos and such
VALID_KEYS = frozenset([ """
'async', 'connection', 'include', 'poll', A task is a language feature that represents a call to a module, with given arguments and other parameters.
] + _t_common) A handler is a subclass of a task.
def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None): Usage:
''' constructor loads from a task or handler datastructure '''
Task.load(datastructure) -> Task
# meta directives are used to tell things like ansible/playbook to run Task.something(...)
# operations like handler execution. Meta tasks are not executed """
# normally.
if 'meta' in ds: # =================================================================================
self.meta = ds['meta'] # ATTRIBUTES
self.tags = [] # load_<attribute_name> and
self.module_vars = module_vars # validate_<attribute_name>
self.role_name = role_name # will be used if defined
return # might be possible to define others
else:
self.meta = None _args = FieldAttribute(isa='dict', default=dict())
_action = FieldAttribute(isa='string')
library = os.path.join(play.basedir, 'library') _always_run = FieldAttribute(isa='bool')
if os.path.exists(library): _any_errors_fatal = FieldAttribute(isa='bool')
utils.plugins.module_finder.add_directory(library) _async = FieldAttribute(isa='int', default=0)
_changed_when = FieldAttribute(isa='string')
for x in ds.keys(): _delay = FieldAttribute(isa='int', default=5)
_delegate_to = FieldAttribute(isa='string')
# code to allow for saying "modulename: args" versus "action: modulename args" _failed_when = FieldAttribute(isa='string')
if x in utils.plugins.module_finder: _first_available_file = FieldAttribute(isa='list')
_ignore_errors = FieldAttribute(isa='bool')
if 'action' in ds:
raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action']))) _loop = FieldAttribute(isa='string', private=True)
if isinstance(ds[x], dict): _loop_args = FieldAttribute(isa='list', private=True)
if 'args' in ds: _local_action = FieldAttribute(isa='string')
raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
ds['args'] = ds[x] # FIXME: this should not be a Task
ds[x] = '' _meta = FieldAttribute(isa='string')
elif ds[x] is None:
ds[x] = '' _name = FieldAttribute(isa='string', default='')
if not isinstance(ds[x], basestring):
raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x]))) _notify = FieldAttribute(isa='list')
ds['action'] = x + " " + ds[x] _poll = FieldAttribute(isa='int')
ds.pop(x) _register = FieldAttribute(isa='string')
_retries = FieldAttribute(isa='int', default=1)
# code to allow "with_glob" and to reference a lookup plugin named glob _run_once = FieldAttribute(isa='bool')
elif x.startswith("with_"): _until = FieldAttribute(isa='list') # ?
if isinstance(ds[x], basestring):
param = ds[x].strip() def __init__(self, block=None, role=None, task_include=None):
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
plugin_name = x.replace("with_","")
if plugin_name in utils.plugins.lookup_loader: self._block = block
ds['items_lookup_plugin'] = plugin_name self._role = role
ds['items_lookup_terms'] = ds[x] self._task_include = task_include
ds.pop(x)
else: super(Task, self).__init__()
raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
def get_name(self):
elif x in [ 'changed_when', 'failed_when', 'when']: ''' return the name of the task '''
if isinstance(ds[x], basestring):
param = ds[x].strip() if self._role and self.name:
# Only a variable, no logic return "%s : %s" % (self._role.get_name(), self.name)
if (param.startswith('{{') and elif self.name:
param.find('}}') == len(ds[x]) - 2 and return self.name
param.find('|') == -1): else:
utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") flattened_args = self._merge_kv(self.args)
elif x.startswith("when_"): if self._role:
utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
else:
if 'when' in ds: return "%s %s" % (self.action, flattened_args)
raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
when_name = x.replace("when_","") def _merge_kv(self, ds):
ds['when'] = "%s %s" % (when_name, ds[x]) if ds is None:
ds.pop(x) return ""
elif not x in Task.VALID_KEYS: elif isinstance(ds, basestring):
raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x) return ds
elif isinstance(ds, dict):
self.module_vars = module_vars buf = ""
self.play_vars = play_vars for (k,v) in ds.iteritems():
self.play_file_vars = play_file_vars if k.startswith('_'):
self.role_vars = role_vars continue
self.role_params = role_params buf = buf + "%s=%s " % (k,v)
self.default_vars = default_vars buf = buf.strip()
self.play = play return buf
# load various attributes @staticmethod
self.name = ds.get('name', None) def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
self.tags = [ 'untagged' ] t = Task(block=block, role=role, task_include=task_include)
self.register = ds.get('register', None) return t.load_data(data, variable_manager=variable_manager, loader=loader)
self.environment = ds.get('environment', play.environment)
self.role_name = role_name def __repr__(self):
self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log ''' returns a human readable representation of the task '''
self.run_once = utils.boolean(ds.get('run_once', 'false')) return "TASK: %s" % self.get_name()
#Code to allow do until feature in a Task def _preprocess_loop(self, ds, new_ds, k, v):
if 'until' in ds: ''' take a lookup plugin name and store it correctly '''
if not ds.get('register'):
raise errors.AnsibleError("register keyword is mandatory when using do until feature") loop_name = k.replace("with_", "")
self.module_vars['delay'] = ds.get('delay', 5) if new_ds.get('loop') is not None:
self.module_vars['retries'] = ds.get('retries', 3) raise AnsibleError("duplicate loop in task: %s" % loop_name)
self.module_vars['register'] = ds.get('register', None) new_ds['loop'] = loop_name
self.until = ds.get('until') new_ds['loop_args'] = v
self.module_vars['until'] = self.until
def preprocess_data(self, ds):
# rather than simple key=value args on the options line, these represent structured data and the values '''
# can be hashes and lists, not just scalars tasks are especially complex arguments so need pre-processing.
self.args = ds.get('args', {}) keep it short.
'''
# get remote_user for task, then play, then playbook
if ds.get('remote_user') is not None: assert isinstance(ds, dict)
self.remote_user = ds.get('remote_user')
elif ds.get('remote_user', play.remote_user) is not None: # the new, cleaned datastructure, which will have legacy
self.remote_user = ds.get('remote_user', play.remote_user) # items reduced to a standard structure suitable for the
else: # attributes of the task class
self.remote_user = ds.get('remote_user', play.playbook.remote_user) new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
# Fail out if user specifies privilege escalation params in conflict new_ds.ansible_pos = ds.ansible_pos
if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) # use the args parsing class to determine the action, args,
# and the delegate_to value from the various possible forms
if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): # supported as legacy
raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name) args_parser = ModuleArgsParser(task_ds=ds)
(action, args, delegate_to) = args_parser.parse()
if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) new_ds['action'] = action
new_ds['args'] = args
self.become = utils.boolean(ds.get('become', play.become)) new_ds['delegate_to'] = delegate_to
self.become_method = ds.get('become_method', play.become_method)
self.become_user = ds.get('become_user', play.become_user) for (k,v) in ds.iteritems():
self.become_pass = ds.get('become_pass', play.playbook.become_pass) if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
# we don't want to re-assign these values, which were
# set only if passed in current task data # determined by the ModuleArgsParser() above
if 'sudo' in ds or 'sudo_user' in ds: continue
self.become_method='sudo' elif k.replace("with_", "") in lookup_loader:
self._preprocess_loop(ds, new_ds, k, v)
if 'sudo' in ds:
self.become=ds['sudo']
del ds['sudo']
else: else:
self.become=True new_ds[k] = v
if 'sudo_user' in ds:
self.become_user = ds['sudo_user'] return super(Task, self).preprocess_data(new_ds)
del ds['sudo_user']
if 'sudo_pass' in ds: def post_validate(self, templar):
self.become_pass = ds['sudo_pass'] '''
del ds['sudo_pass'] Override of base class post_validate, to also do final validation on
the block and task include (if any) to which this task belongs.
elif 'su' in ds or 'su_user' in ds: '''
self.become_method='su'
if self._block:
if 'su' in ds: self._block.post_validate(templar)
self.become=ds['su'] if self._task_include:
self._task_include.post_validate(templar)
super(Task, self).post_validate(templar)
def get_vars(self):
all_vars = self.vars.copy()
if self._block:
all_vars.update(self._block.get_vars())
if self._task_include:
all_vars.update(self._task_include.get_vars())
all_vars.update(self.serialize())
if 'tags' in all_vars:
del all_vars['tags']
if 'when' in all_vars:
del all_vars['when']
return all_vars
def copy(self, exclude_block=False):
new_me = super(Task, self).copy()
new_me._block = None
if self._block and not exclude_block:
new_me._block = self._block.copy()
new_me._role = None
if self._role:
new_me._role = self._role
new_me._task_include = None
if self._task_include:
new_me._task_include = self._task_include.copy()
return new_me
def serialize(self):
data = super(Task, self).serialize()
if self._block:
data['block'] = self._block.serialize()
if self._role:
data['role'] = self._role.serialize()
if self._task_include:
data['task_include'] = self._task_include.serialize()
return data
def deserialize(self, data):
# import is here to avoid import loops
#from ansible.playbook.task_include import TaskInclude
block_data = data.get('block')
if block_data:
b = Block()
b.deserialize(block_data)
self._block = b
del data['block']
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
del data['role']
ti_data = data.get('task_include')
if ti_data:
#ti = TaskInclude()
ti = Task()
ti.deserialize(ti_data)
self._task_include = ti
del data['task_include']
super(Task, self).deserialize(data)
def evaluate_conditional(self, all_vars):
if self._block is not None:
if not self._block.evaluate_conditional(all_vars):
return False
if self._task_include is not None:
if not self._task_include.evaluate_conditional(all_vars):
return False
return super(Task, self).evaluate_conditional(all_vars)
def set_loader(self, loader):
'''
Sets the loader on this object and recursively on parent, child objects.
This is used primarily after the Task has been serialized/deserialized, which
does not preserve the loader.
'''
self._loader = loader
if self._block:
self._block.set_loader(loader)
if self._task_include:
self._task_include.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False):
'''
Generic logic to get the attribute or parent attribute for a task value.
'''
value = self._attributes[attr]
if self._block and (not value or extend):
parent_value = getattr(self._block, attr)
if extend:
value = self._extend_value(value, parent_value)
else: else:
self.become=True value = parent_value
del ds['su'] if self._task_include and (not value or extend):
if 'su_user' in ds: parent_value = getattr(self._task_include, attr)
self.become_user = ds['su_user'] if extend:
del ds['su_user'] value = self._extend_value(value, parent_value)
if 'su_pass' in ds:
self.become_pass = ds['su_pass']
del ds['su_pass']
# Both are defined
if ('action' in ds) and ('local_action' in ds):
raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
# Both are NOT defined
elif (not 'action' in ds) and (not 'local_action' in ds):
raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '<Unnamed>'))
# Only one of them is defined
elif 'local_action' in ds:
self.action = ds.get('local_action', '')
self.delegate_to = '127.0.0.1'
else:
self.action = ds.get('action', '')
self.delegate_to = ds.get('delegate_to', None)
self.transport = ds.get('connection', ds.get('transport', play.transport))
if isinstance(self.action, dict):
if 'module' not in self.action:
raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
if self.args:
raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
self.args = self.action
self.action = self.args.pop('module')
# delegate_to can use variables
if not (self.delegate_to is None):
# delegate_to: localhost should use local transport
if self.delegate_to in ['127.0.0.1', 'localhost']:
self.transport = 'local'
# notified by is used by Playbook code to flag which hosts
# need to run a notifier
self.notified_by = []
# if no name is specified, use the action line as the name
if self.name is None:
self.name = self.action
# load various attributes
self.when = ds.get('when', None)
self.changed_when = ds.get('changed_when', None)
self.failed_when = ds.get('failed_when', None)
# combine the default and module vars here for use in templating
all_vars = self.default_vars.copy()
all_vars = utils.combine_vars(all_vars, self.play_vars)
all_vars = utils.combine_vars(all_vars, self.play_file_vars)
all_vars = utils.combine_vars(all_vars, self.role_vars)
all_vars = utils.combine_vars(all_vars, self.module_vars)
all_vars = utils.combine_vars(all_vars, self.role_params)
self.async_seconds = ds.get('async', 0) # not async by default
self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
self.async_seconds = int(self.async_seconds)
self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
self.async_poll_interval = int(self.async_poll_interval)
self.notify = ds.get('notify', [])
self.first_available_file = ds.get('first_available_file', None)
self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
self.items_lookup_terms = ds.get('items_lookup_terms', None)
self.ignore_errors = ds.get('ignore_errors', False)
self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
self.always_run = ds.get('always_run', False)
# action should be a string
if not isinstance(self.action, basestring):
raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
# notify can be a string or a list, store as a list
if isinstance(self.notify, basestring):
self.notify = [ self.notify ]
# split the action line into a module name + arguments
try:
tokens = split_args(self.action)
except Exception, e:
if "unbalanced" in str(e):
raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
"Make sure quotes are matched or escaped properly")
else: else:
raise value = parent_value
if len(tokens) < 1: return value
raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
self.module_name = tokens[0]
self.module_args = ''
if len(tokens) > 1:
self.module_args = " ".join(tokens[1:])
import_tags = self.module_vars.get('tags',[])
if type(import_tags) in [int,float]:
import_tags = str(import_tags)
elif type(import_tags) in [str,unicode]:
# allow the user to list comma delimited tags
import_tags = import_tags.split(",")
# handle mutually incompatible options
incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
if len(incompatibles) > 1:
raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
# make first_available_file accessible to Runner code
if self.first_available_file:
self.module_vars['first_available_file'] = self.first_available_file
# make sure that the 'item' variable is set when using
# first_available_file (issue #8220)
if 'item' not in self.module_vars:
self.module_vars['item'] = ''
if self.items_lookup_plugin is not None:
self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
self.module_vars['items_lookup_terms'] = self.items_lookup_terms
# allow runner to see delegate_to option
self.module_vars['delegate_to'] = self.delegate_to
# make some task attributes accessible to Runner code
self.module_vars['ignore_errors'] = self.ignore_errors
self.module_vars['register'] = self.register
self.module_vars['changed_when'] = self.changed_when
self.module_vars['failed_when'] = self.failed_when
self.module_vars['always_run'] = self.always_run
# tags allow certain parts of a playbook to be run without running the whole playbook
apply_tags = ds.get('tags', None)
if apply_tags is not None:
if type(apply_tags) in [ str, unicode ]:
self.tags.append(apply_tags)
elif type(apply_tags) in [ int, float ]:
self.tags.append(str(apply_tags))
elif type(apply_tags) == list:
self.tags.extend(apply_tags)
self.tags.extend(import_tags)
if len(self.tags) > 1:
self.tags.remove('untagged')
if additional_conditions:
new_conditions = additional_conditions[:]
if self.when:
new_conditions.append(self.when)
self.when = new_conditions

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save