whitespace + remove deprecated YAML parser (migration script lives in examples/scripts and warning was added

in 0.6 release)
pull/781/merge
Michael DeHaan 12 years ago
parent 0810f26095
commit faed4b5a33

@ -33,35 +33,35 @@ from ansible import inventory
class Cli(object): class Cli(object):
''' code behind bin/ansible ''' ''' code behind bin/ansible '''
# ---------------------------------------------- # ----------------------------------------------
def __init__(self): def __init__(self):
self.stats = callbacks.AggregateStats() self.stats = callbacks.AggregateStats()
self.callbacks = callbacks.CliRunnerCallbacks() self.callbacks = callbacks.CliRunnerCallbacks()
# ---------------------------------------------- # ----------------------------------------------
def parse(self): def parse(self):
''' create an options parser for bin/ansible ''' ''' create an options parser for bin/ansible '''
parser = utils.base_parser(constants=C, runas_opts=True, async_opts=True, parser = utils.base_parser(constants=C, runas_opts=True, async_opts=True,
output_opts=True, connect_opts=True, usage='%prog <host-pattern> [options]') output_opts=True, connect_opts=True, usage='%prog <host-pattern> [options]')
parser.add_option('-a', '--args', dest='module_args', parser.add_option('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS) help="module arguments", default=C.DEFAULT_MODULE_ARGS)
parser.add_option('-m', '--module-name', dest='module_name', parser.add_option('-m', '--module-name', dest='module_name',
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME) default=C.DEFAULT_MODULE_NAME)
options, args = parser.parse_args() options, args = parser.parse_args()
self.callbacks.options = options self.callbacks.options = options
if len(args) == 0 or len(args) > 1: if len(args) == 0 or len(args) > 1:
parser.print_help() parser.print_help()
sys.exit(1) sys.exit(1)
return (options, args) return (options, args)
# ---------------------------------------------- # ----------------------------------------------
def run(self, options, args): def run(self, options, args):
''' use Runner lib to do SSH things ''' ''' use Runner lib to do SSH things '''
@ -72,7 +72,7 @@ class Cli(object):
if len(hosts) == 0: if len(hosts) == 0:
print >>sys.stderr, "No hosts matched" print >>sys.stderr, "No hosts matched"
sys.exit(1) sys.exit(1)
sshpass = None sshpass = None
sudopass = None sudopass = None
if options.ask_pass: if options.ask_pass:
@ -90,11 +90,11 @@ class Cli(object):
module_name=options.module_name, module_path=options.module_path, module_name=options.module_name, module_path=options.module_path,
module_args=options.module_args, module_args=options.module_args,
remote_user=options.remote_user, remote_pass=sshpass, remote_user=options.remote_user, remote_pass=sshpass,
inventory=inventory_manager, timeout=options.timeout, inventory=inventory_manager, timeout=options.timeout,
private_key_file=options.private_key_file, private_key_file=options.private_key_file,
forks=options.forks, forks=options.forks,
pattern=pattern, pattern=pattern,
callbacks=self.callbacks, sudo=options.sudo, callbacks=self.callbacks, sudo=options.sudo,
sudo_pass=sudopass,sudo_user=options.sudo_user, sudo_pass=sudopass,sudo_user=options.sudo_user,
transport=options.connection, verbose=options.verbose transport=options.connection, verbose=options.verbose
) )
@ -108,7 +108,7 @@ class Cli(object):
return (runner, results) return (runner, results)
# ---------------------------------------------- # ----------------------------------------------
def poll_while_needed(self, poller, options): def poll_while_needed(self, poller, options):
''' summarize results from Runner ''' ''' summarize results from Runner '''

@ -87,15 +87,15 @@ def main(args):
pb = ansible.playbook.PlayBook( pb = ansible.playbook.PlayBook(
playbook=playbook, playbook=playbook,
module_path=options.module_path, module_path=options.module_path,
host_list=options.inventory, host_list=options.inventory,
forks=options.forks, forks=options.forks,
verbose=options.verbose, verbose=options.verbose,
remote_user=options.remote_user, remote_user=options.remote_user,
remote_pass=sshpass, remote_pass=sshpass,
callbacks=playbook_cb, callbacks=playbook_cb,
runner_callbacks=runner_cb, runner_callbacks=runner_cb,
stats=stats, stats=stats,
timeout=options.timeout, timeout=options.timeout,
transport=options.connection, transport=options.connection,
sudo=options.sudo, sudo=options.sudo,
sudo_user=options.sudo_user, sudo_user=options.sudo_user,

@ -5,7 +5,7 @@
# example playbook to bootstrap this script in the examples/ dir which # example playbook to bootstrap this script in the examples/ dir which
# installs ansible and sets it up to run on cron. # installs ansible and sets it up to run on cron.
# #
# usage: # usage:
# ansible-pull -d /var/ansible/local -U http://wherever/content.git -C production # ansible-pull -d /var/ansible/local -U http://wherever/content.git -C production
# #
# the git repo must contain a playbook named 'local.yml' # the git repo must contain a playbook named 'local.yml'

@ -29,8 +29,8 @@ elif os.path.exists("/usr/games/cowsay"):
cowsay = "/usr/games/cowsay" cowsay = "/usr/games/cowsay"
class AggregateStats(object): class AggregateStats(object):
''' holds stats about per-host activity during playbook runs ''' ''' holds stats about per-host activity during playbook runs '''
def __init__(self): def __init__(self):
self.processed = {} self.processed = {}
@ -49,7 +49,7 @@ class AggregateStats(object):
def compute(self, runner_results, setup=False, poll=False): def compute(self, runner_results, setup=False, poll=False):
''' walk through all results and increment stats ''' ''' walk through all results and increment stats '''
for (host, value) in runner_results.get('contacted', {}).iteritems(): for (host, value) in runner_results.get('contacted', {}).iteritems():
if ('failed' in value and bool(value['failed'])) or ('rc' in value and value['rc'] != 0): if ('failed' in value and bool(value['failed'])) or ('rc' in value and value['rc'] != 0):
self._increment('failures', host) self._increment('failures', host)
@ -65,7 +65,7 @@ class AggregateStats(object):
for (host, value) in runner_results.get('dark', {}).iteritems(): for (host, value) in runner_results.get('dark', {}).iteritems():
self._increment('dark', host) self._increment('dark', host)
def summarize(self, host): def summarize(self, host):
''' return information about a particular host ''' ''' return information about a particular host '''
@ -92,10 +92,10 @@ def regular_generic_msg(hostname, result, oneline, caption):
def banner(msg): def banner(msg):
if cowsay != None: if cowsay != None:
cmd = subprocess.Popen("%s -W 60 \"%s\"" % (cowsay, msg), cmd = subprocess.Popen("%s -W 60 \"%s\"" % (cowsay, msg),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = cmd.communicate() (out, err) = cmd.communicate()
return "%s\n" % out return "%s\n" % out
else: else:
return "\n%s ********************* " % msg return "\n%s ********************* " % msg
@ -182,7 +182,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
def __init__(self): def __init__(self):
# set by /usr/bin/ansible later # set by /usr/bin/ansible later
self.options = None self.options = None
self._async_notified = {} self._async_notified = {}
def on_failed(self, host, res, ignore_errors=False): def on_failed(self, host, res, ignore_errors=False):
@ -192,7 +192,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
def on_ok(self, host, res): def on_ok(self, host, res):
self._on_any(host,res) self._on_any(host,res)
def on_unreachable(self, host, res): def on_unreachable(self, host, res):
if type(res) == dict: if type(res) == dict:
@ -200,17 +200,17 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
print "%s | FAILED => %s" % (host, res) print "%s | FAILED => %s" % (host, res)
if self.options.tree: if self.options.tree:
utils.write_tree_file( utils.write_tree_file(
self.options.tree, host, self.options.tree, host,
utils.jsonify(dict(failed=True, msg=res),format=True) utils.jsonify(dict(failed=True, msg=res),format=True)
) )
def on_skipped(self, host): def on_skipped(self, host):
pass pass
def on_error(self, host, err): def on_error(self, host, err):
print >>sys.stderr, "err: [%s] => %s\n" % (host, err) print >>sys.stderr, "err: [%s] => %s\n" % (host, err)
def on_no_hosts(self): def on_no_hosts(self):
print >>sys.stderr, "no hosts matched\n" print >>sys.stderr, "no hosts matched\n"
@ -277,11 +277,11 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
item = host_result.get('item', None) item = host_result.get('item', None)
# show verbose output for non-setup module results if --verbose is used # show verbose output for non-setup module results if --verbose is used
msg = '' msg = ''
if not self.verbose or host_result.get("verbose_override",None) is not None: if not self.verbose or host_result.get("verbose_override",None) is not None:
if item: if item:
msg = "ok: [%s] => (item=%s)" % (host,item) msg = "ok: [%s] => (item=%s)" % (host,item)
else: else:
if 'ansible_job_id' not in host_result or 'finished' in host_result: if 'ansible_job_id' not in host_result or 'finished' in host_result:
msg = "ok: [%s]" % (host) msg = "ok: [%s]" % (host)
else: else:
@ -347,7 +347,7 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
class PlaybookCallbacks(object): class PlaybookCallbacks(object):
''' playbook.py callbacks used by /usr/bin/ansible-playbook ''' ''' playbook.py callbacks used by /usr/bin/ansible-playbook '''
def __init__(self, verbose=False): def __init__(self, verbose=False):
self.verbose = verbose self.verbose = verbose
@ -376,11 +376,11 @@ class PlaybookCallbacks(object):
if private: if private:
return getpass.getpass(msg) return getpass.getpass(msg)
return raw_input(msg) return raw_input(msg)
def on_setup(self): def on_setup(self):
print banner("GATHERING FACTS") print banner("GATHERING FACTS")
def on_import_for_host(self, host, imported_file): def on_import_for_host(self, host, imported_file):
msg = "%s: importing %s" % (host, imported_file) msg = "%s: importing %s" % (host, imported_file)

@ -20,7 +20,7 @@ class AnsibleError(Exception):
def __init__(self, msg): def __init__(self, msg):
self.msg = msg self.msg = msg
def __str__(self): def __str__(self):
return self.msg return self.msg

@ -23,7 +23,6 @@ import os
import subprocess import subprocess
import ansible.constants as C import ansible.constants as C
from ansible.inventory.ini import InventoryParser from ansible.inventory.ini import InventoryParser
from ansible.inventory.yaml import InventoryParserYaml
from ansible.inventory.script import InventoryScript from ansible.inventory.script import InventoryScript
from ansible.inventory.group import Group from ansible.inventory.group import Group
from ansible.inventory.host import Host from ansible.inventory.host import Host
@ -31,12 +30,12 @@ from ansible import errors
from ansible import utils from ansible import utils
class Inventory(object): class Inventory(object):
""" """
Host inventory for ansible. Host inventory for ansible.
""" """
__slots__ = [ 'host_list', 'groups', '_restriction', '_is_script', __slots__ = [ 'host_list', 'groups', '_restriction', '_is_script',
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache' ] 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache' ]
def __init__(self, host_list=C.DEFAULT_HOST_LIST): def __init__(self, host_list=C.DEFAULT_HOST_LIST):
@ -46,14 +45,14 @@ class Inventory(object):
# caching to avoid repeated calculations, particularly with # caching to avoid repeated calculations, particularly with
# external inventory scripts. # external inventory scripts.
self._vars_per_host = {} self._vars_per_host = {}
self._vars_per_group = {} self._vars_per_group = {}
self._hosts_cache = {} self._hosts_cache = {}
# the inventory object holds a list of groups # the inventory object holds a list of groups
self.groups = [] self.groups = []
# a list of host(names) to contain current inquiries to # a list of host(names) to contain current inquiries to
self._restriction = None self._restriction = None
@ -83,10 +82,9 @@ class Inventory(object):
if not data.startswith("---"): if not data.startswith("---"):
self.parser = InventoryParser(filename=host_list) self.parser = InventoryParser(filename=host_list)
self.groups = self.parser.groups.values() self.groups = self.parser.groups.values()
else: else:
self.parser = InventoryParserYaml(filename=host_list) raise errors.AnsibleError("YAML inventory support is deprecated in 0.6 and removed in 0.7, see the migration script in examples/scripts in the git checkout")
self.groups = self.parser.groups.values()
def _match(self, str, pattern_str): def _match(self, str, pattern_str):
return fnmatch.fnmatch(str, pattern_str) return fnmatch.fnmatch(str, pattern_str)
@ -107,7 +105,7 @@ class Inventory(object):
for host in group.get_hosts(): for host in group.get_hosts():
if self._match(group.name, pat) or pat == 'all' or self._match(host.name, pat): if self._match(group.name, pat) or pat == 'all' or self._match(host.name, pat):
# must test explicitly for None because [] means no hosts allowed # must test explicitly for None because [] means no hosts allowed
if self._restriction==None or host.name in self._restriction: if self._restriction==None or host.name in self._restriction:
if inverted: if inverted:
if host.name in hosts: if host.name in hosts:
del hosts[host.name] del hosts[host.name]
@ -135,7 +133,7 @@ class Inventory(object):
if group.name == groupname: if group.name == groupname:
return group return group
return None return None
def get_group_variables(self, groupname): def get_group_variables(self, groupname):
if groupname not in self._vars_per_group: if groupname not in self._vars_per_group:
self._vars_per_group[groupname] = self._get_group_variables(groupname) self._vars_per_group[groupname] = self._get_group_variables(groupname)
@ -157,8 +155,8 @@ class Inventory(object):
if self._is_script: if self._is_script:
host = self.get_host(hostname) host = self.get_host(hostname)
cmd = subprocess.Popen( cmd = subprocess.Popen(
[self.host_list,"--host",hostname], [self.host_list,"--host",hostname],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE stderr=subprocess.PIPE
) )
(out, err) = cmd.communicate() (out, err) = cmd.communicate()
@ -184,7 +182,7 @@ class Inventory(object):
return [ h.name for h in self.get_hosts(pattern) ] return [ h.name for h in self.get_hosts(pattern) ]
def list_groups(self): def list_groups(self):
return [ g.name for g in self.groups ] return [ g.name for g in self.groups ]
def get_restriction(self): def get_restriction(self):
return self._restriction return self._restriction

@ -40,15 +40,15 @@ def detect_range(line = None):
Returnes True if the given line contains a pattern, else False. Returnes True if the given line contains a pattern, else False.
''' '''
if (not line.startswith("[") and if (not line.startswith("[") and
line.find("[") != -1 and line.find("[") != -1 and
line.find(":") != -1 and line.find(":") != -1 and
line.find("]") != -1 and line.find("]") != -1 and
line.index("[") < line.index(":") < line.index("]")): line.index("[") < line.index(":") < line.index("]")):
return True return True
else: else:
return False return False
def expand_hostname_range(line = None): def expand_hostname_range(line = None):
''' '''
A helper function that expands a given line that contains a pattern A helper function that expands a given line that contains a pattern
@ -64,11 +64,11 @@ def expand_hostname_range(line = None):
all_hosts = [] all_hosts = []
if line: if line:
# A hostname such as db[1:6]-node is considered to consists # A hostname such as db[1:6]-node is considered to consists
# three parts: # three parts:
# head: 'db' # head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name # nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node' # tail: '-node'
(head, nrange, tail) = line.replace('[','|').replace(']','|').split('|') (head, nrange, tail) = line.replace('[','|').replace(']','|').split('|')
bounds = nrange.split(":") bounds = nrange.split(":")
if len(bounds) != 2: if len(bounds) != 2:
@ -85,7 +85,7 @@ def expand_hostname_range(line = None):
rlen = None rlen = None
if rlen > 1 and rlen != len(end): if rlen > 1 and rlen != len(end):
raise errors.AnsibleError("host range format incorrectly specified!") raise errors.AnsibleError("host range format incorrectly specified!")
for _ in range(int(beg), int(end)+1): for _ in range(int(beg), int(end)+1):
if rlen: if rlen:
rseq = str(_).zfill(rlen) # range sequence rseq = str(_).zfill(rlen) # range sequence
@ -93,5 +93,5 @@ def expand_hostname_range(line = None):
rseq = str(_) rseq = str(_)
hname = ''.join((head, rseq, tail)) hname = ''.join((head, rseq, tail))
all_hosts.append(hname) all_hosts.append(hname)
return all_hosts return all_hosts

@ -52,7 +52,7 @@ class Group(object):
for kid in self.child_groups: for kid in self.child_groups:
hosts.extend(kid.get_hosts()) hosts.extend(kid.get_hosts())
hosts.extend(self.hosts) hosts.extend(self.hosts)
return hosts return hosts
def get_variables(self): def get_variables(self):

@ -49,7 +49,7 @@ class Host(object):
groups[g.name] = g groups[g.name] = g
ancestors = g.get_ancestors() ancestors = g.get_ancestors()
for a in ancestors: for a in ancestors:
groups[a.name] = a groups[a.name] = a
return groups.values() return groups.values()
def get_variables(self): def get_variables(self):

@ -30,7 +30,7 @@ from ansible import errors
from ansible import utils from ansible import utils
class InventoryParser(object): class InventoryParser(object):
""" """
Host inventory for ansible. Host inventory for ansible.
""" """
@ -41,20 +41,20 @@ class InventoryParser(object):
self.groups = {} self.groups = {}
self.hosts = {} self.hosts = {}
self._parse() self._parse()
def _parse(self): def _parse(self):
self._parse_base_groups() self._parse_base_groups()
self._parse_group_children() self._parse_group_children()
self._parse_group_variables() self._parse_group_variables()
return self.groups return self.groups
# [webservers] # [webservers]
# alpha # alpha
# beta:2345 # beta:2345
# gamma sudo=True user=root # gamma sudo=True user=root
# delta asdf=jkl favcolor=red # delta asdf=jkl favcolor=red
def _parse_base_groups(self): def _parse_base_groups(self):
# FIXME: refactor # FIXME: refactor
@ -93,7 +93,7 @@ class InventoryParser(object):
tokens2 = hostname.rsplit(":", 1) tokens2 = hostname.rsplit(":", 1)
hostname = tokens2[0] hostname = tokens2[0]
port = tokens2[1] port = tokens2[1]
host = None host = None
_all_hosts = [] _all_hosts = []
if hostname in self.hosts: if hostname in self.hosts:

@ -52,5 +52,5 @@ class InventoryScript(object):
# FIXME: hack shouldn't be needed # FIXME: hack shouldn't be needed
all.add_host(host) all.add_host(host)
all.add_child_group(group) all.add_child_group(group)
return groups return groups

@ -1,142 +0,0 @@
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible import errors
from ansible import utils
import sys
class InventoryParserYaml(object):
''' Host inventory parser for ansible '''
__slots__ = [ '_hosts', 'groups' ]
def __init__(self, filename=C.DEFAULT_HOST_LIST):
sys.stderr.write("WARNING: YAML inventory files are deprecated in 0.6 and will be removed in 0.7, to migrate" +
" download and run https://github.com/ansible/ansible/blob/devel/examples/scripts/yaml_to_ini.py\n")
fh = open(filename)
data = fh.read()
fh.close()
self._hosts = {}
self._parse(data)
def _make_host(self, hostname):
if hostname in self._hosts:
return self._hosts[hostname]
else:
host = Host(hostname)
self._hosts[hostname] = host
return host
# see file 'test/yaml_hosts' for syntax
def _parse(self, data):
# FIXME: refactor into subfunctions
all = Group('all')
ungrouped = Group('ungrouped')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
grouped_hosts = []
yaml = utils.parse_yaml(data)
# first add all groups
for item in yaml:
if type(item) == dict and 'group' in item:
group = Group(item['group'])
for subresult in item.get('hosts',[]):
if type(subresult) in [ str, unicode ]:
host = self._make_host(subresult)
group.add_host(host)
grouped_hosts.append(host)
elif type(subresult) == dict:
host = self._make_host(subresult['host'])
vars = subresult.get('vars',{})
if type(vars) == list:
for subitem in vars:
for (k,v) in subitem.items():
host.set_variable(k,v)
elif type(vars) == dict:
for (k,v) in subresult.get('vars',{}).items():
host.set_variable(k,v)
else:
raise errors.AnsibleError("unexpected type for variable")
group.add_host(host)
grouped_hosts.append(host)
vars = item.get('vars',{})
if type(vars) == dict:
for (k,v) in item.get('vars',{}).items():
group.set_variable(k,v)
elif type(vars) == list:
for subitem in vars:
if type(subitem) != dict:
raise errors.AnsibleError("expected a dictionary")
for (k,v) in subitem.items():
group.set_variable(k,v)
self.groups[group.name] = group
all.add_child_group(group)
# add host definitions
for item in yaml:
if type(item) in [ str, unicode ]:
host = self._make_host(item)
if host not in grouped_hosts:
ungrouped.add_host(host)
elif type(item) == dict and 'host' in item:
host = self._make_host(item['host'])
vars = item.get('vars', {})
if type(vars)==list:
varlist, vars = vars, {}
for subitem in varlist:
vars.update(subitem)
for (k,v) in vars.items():
host.set_variable(k,v)
groups = item.get('groups', {})
if type(groups) in [ str, unicode ]:
groups = [ groups ]
if type(groups)==list:
for subitem in groups:
if subitem in self.groups:
group = self.groups[subitem]
else:
group = Group(subitem)
self.groups[group.name] = group
all.add_child_group(group)
group.add_host(host)
grouped_hosts.append(host)
if host not in grouped_hosts:
ungrouped.add_host(host)
# make sure ungrouped.hosts is the complement of grouped_hosts
ungrouped_hosts = [host for host in ungrouped.hosts if host not in grouped_hosts]

@ -56,7 +56,7 @@ except ImportError:
class AnsibleModule(object): class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True): def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True):
''' '''
common code for quickly building an ansible module in Python common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON) (although you can write modules in anything that can return JSON)
see library/slurp and others for examples see library/slurp and others for examples
@ -67,7 +67,7 @@ class AnsibleModule(object):
self._legal_inputs = [] self._legal_inputs = []
self._handle_aliases() self._handle_aliases()
# this may be disabled where modules are going to daisy chain into others # this may be disabled where modules are going to daisy chain into others
if check_invalid_arguments: if check_invalid_arguments:
self._check_invalid_arguments() self._check_invalid_arguments()
@ -151,7 +151,7 @@ class AnsibleModule(object):
for x in items: for x in items:
(k, v) = x.split("=",1) (k, v) = x.split("=",1)
params[k] = v params[k] = v
return (params, args) return (params, args)
def _log_invocation(self): def _log_invocation(self):
''' log that ansible ran the module ''' ''' log that ansible ran the module '''
@ -173,7 +173,7 @@ class AnsibleModule(object):
return False return False
else: else:
self.fail_json(msg='Boolean %s not in either boolean list' % arg) self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data): def jsonify(self, data):
return json.dumps(data) return json.dumps(data)

@ -28,12 +28,12 @@ SETUP_CACHE = collections.defaultdict(dict)
class PlayBook(object): class PlayBook(object):
''' '''
runs an ansible playbook, given as a datastructure or YAML filename. runs an ansible playbook, given as a datastructure or YAML filename.
A playbook is a deployment, config management, or automation based A playbook is a deployment, config management, or automation based
set of commands to run in series. set of commands to run in series.
multiple plays/tasks do not execute simultaneously, but tasks in each multiple plays/tasks do not execute simultaneously, but tasks in each
pattern do execute in parallel (according to the number of forks pattern do execute in parallel (according to the number of forks
requested) among the hosts they address requested) among the hosts they address
''' '''
@ -86,7 +86,7 @@ class PlayBook(object):
extra_vars = {} extra_vars = {}
if only_tags is None: if only_tags is None:
only_tags = [ 'all' ] only_tags = [ 'all' ]
self.module_path = module_path self.module_path = module_path
self.forks = forks self.forks = forks
self.timeout = timeout self.timeout = timeout
@ -107,7 +107,7 @@ class PlayBook(object):
self.only_tags = only_tags self.only_tags = only_tags
self.inventory = ansible.inventory.Inventory(host_list) self.inventory = ansible.inventory.Inventory(host_list)
if not self.inventory._is_script: if not self.inventory._is_script:
self.global_vars.update(self.inventory.get_group_variables('all')) self.global_vars.update(self.inventory.get_group_variables('all'))
@ -143,7 +143,7 @@ class PlayBook(object):
return accumulated_plays return accumulated_plays
# ***************************************************** # *****************************************************
def run(self): def run(self):
''' run all patterns in the playbook ''' ''' run all patterns in the playbook '''
@ -186,11 +186,11 @@ class PlayBook(object):
pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name, pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name,
module_args=task.module_args, forks=self.forks, module_args=task.module_args, forks=self.forks,
remote_pass=self.remote_pass, module_path=self.module_path, remote_pass=self.remote_pass, module_path=self.module_path,
timeout=self.timeout, remote_user=task.play.remote_user, timeout=self.timeout, remote_user=task.play.remote_user,
remote_port=task.play.remote_port, module_vars=task.module_vars, remote_port=task.play.remote_port, module_vars=task.module_vars,
private_key_file=self.private_key_file, private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE, basedir=self.basedir, setup_cache=self.SETUP_CACHE, basedir=self.basedir,
conditional=task.only_if, callbacks=self.runner_callbacks, conditional=task.only_if, callbacks=self.runner_callbacks,
verbose=self.verbose, sudo=task.play.sudo, sudo_user=task.play.sudo_user, verbose=self.verbose, sudo=task.play.sudo, sudo_user=task.play.sudo_user,
transport=task.play.transport, sudo_pass=self.sudo_pass, is_playbook=True transport=task.play.transport, sudo_pass=self.sudo_pass, is_playbook=True
) )
@ -226,7 +226,7 @@ class PlayBook(object):
for host, result in results['contacted'].iteritems(): for host, result in results['contacted'].iteritems():
facts = result.get('ansible_facts', {}) facts = result.get('ansible_facts', {})
self.SETUP_CACHE[host].update(facts) self.SETUP_CACHE[host].update(facts)
# flag which notify handlers need to be run # flag which notify handlers need to be run
if len(task.notify) > 0: if len(task.notify) > 0:
for host, results in results.get('contacted',{}).iteritems(): for host, results in results.get('contacted',{}).iteritems():
@ -237,7 +237,7 @@ class PlayBook(object):
# ***************************************************** # *****************************************************
def _flag_handler(self, handlers, handler_name, host): def _flag_handler(self, handlers, handler_name, host):
''' '''
if a task has any notify elements, flag handlers for run if a task has any notify elements, flag handlers for run
at end of execution cycle for hosts that have indicated at end of execution cycle for hosts that have indicated
changes have been made changes have been made
@ -256,8 +256,8 @@ class PlayBook(object):
def _do_setup_step(self, play): def _do_setup_step(self, play):
''' get facts from the remote system ''' ''' get facts from the remote system '''
host_list = [ h for h in self.inventory.list_hosts(play.hosts) host_list = [ h for h in self.inventory.list_hosts(play.hosts)
if not (h in self.stats.failures or h in self.stats.dark) ] if not (h in self.stats.failures or h in self.stats.dark) ]
if not play.gather_facts: if not play.gather_facts:
@ -271,7 +271,7 @@ class PlayBook(object):
pattern=play.hosts, module_name='setup', module_args={}, inventory=self.inventory, pattern=play.hosts, module_name='setup', module_args={}, inventory=self.inventory,
forks=self.forks, module_path=self.module_path, timeout=self.timeout, remote_user=play.remote_user, forks=self.forks, module_path=self.module_path, timeout=self.timeout, remote_user=play.remote_user,
remote_pass=self.remote_pass, remote_port=play.remote_port, private_key_file=self.private_key_file, remote_pass=self.remote_pass, remote_port=play.remote_port, private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user, setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user,
verbose=self.verbose, transport=play.transport, sudo_pass=self.sudo_pass, is_playbook=True verbose=self.verbose, transport=play.transport, sudo_pass=self.sudo_pass, is_playbook=True
).run() ).run()
self.stats.compute(setup_results, setup=True) self.stats.compute(setup_results, setup=True)
@ -297,14 +297,14 @@ class PlayBook(object):
self.callbacks.on_play_start(play.name) self.callbacks.on_play_start(play.name)
# get facts from system # get facts from system
rc = self._do_setup_step(play) rc = self._do_setup_step(play)
# now with that data, handle contentional variable file imports! # now with that data, handle contentional variable file imports!
if play.vars_files and len(play.vars_files) > 0: if play.vars_files and len(play.vars_files) > 0:
play.update_vars_files(self.inventory.list_hosts(play.hosts)) play.update_vars_files(self.inventory.list_hosts(play.hosts))
for task in play.tasks(): for task in play.tasks():
# only run the task if the requested tags match # only run the task if the requested tags match
should_run = False should_run = False
for x in self.only_tags: for x in self.only_tags:

@ -25,10 +25,10 @@ import os
class Play(object): class Play(object):
__slots__ = [ __slots__ = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files', 'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
'handlers', 'remote_user', 'remote_port', 'handlers', 'remote_user', 'remote_port',
'sudo', 'sudo_user', 'transport', 'playbook', 'sudo', 'sudo_user', 'transport', 'playbook',
'tags', 'gather_facts', '_ds', '_handlers', '_tasks' 'tags', 'gather_facts', '_ds', '_handlers', '_tasks'
] ]
@ -60,7 +60,7 @@ class Play(object):
self._ds = ds self._ds = ds
self.playbook = playbook self.playbook = playbook
self.hosts = hosts self.hosts = hosts
self.name = ds.get('name', self.hosts) self.name = ds.get('name', self.hosts)
self.vars = ds.get('vars', {}) self.vars = ds.get('vars', {})
self.vars_files = ds.get('vars_files', []) self.vars_files = ds.get('vars_files', [])
@ -126,7 +126,7 @@ class Play(object):
def tasks(self): def tasks(self):
''' return task objects for this play ''' ''' return task objects for this play '''
return self._tasks return self._tasks
def handlers(self): def handlers(self):
''' return handler objects for this play ''' ''' return handler objects for this play '''
@ -146,7 +146,7 @@ class Play(object):
raise errors.AnsibleError("'vars' section must contain only key/value pairs") raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = self.playbook.global_vars vars = self.playbook.global_vars
# translate a list of vars into a dict # translate a list of vars into a dict
if type(self.vars) == list: if type(self.vars) == list:
for item in self.vars: for item in self.vars:
@ -178,7 +178,7 @@ class Play(object):
def update_vars_files(self, hosts): def update_vars_files(self, hosts):
''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in ''' ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
# now loop through all the hosts... # now loop through all the hosts...
for h in hosts: for h in hosts:
self._update_vars_files_for_host(h) self._update_vars_files_for_host(h)
@ -196,11 +196,11 @@ class Play(object):
return True return True
if tags_counted > 0: if tags_counted > 0:
return False return False
# didn't tag the play, and the play contains no steps # didn't tag the play, and the play contains no steps
# so assume we just want to gather facts # so assume we just want to gather facts
return True return True
# ************************************************* # *************************************************
@ -213,7 +213,7 @@ class Play(object):
if type(self.vars_files) != list: if type(self.vars_files) != list:
self.vars_files = [ self.vars_files ] self.vars_files = [ self.vars_files ]
if (host is not None): if (host is not None):
inventory = self.playbook.inventory inventory = self.playbook.inventory
hostrec = inventory.get_host(host) hostrec = inventory.get_host(host)
@ -288,8 +288,8 @@ class Play(object):
raise errors.AnsibleError("%s must be stored as dictonary/hash: %s" % filename4) raise errors.AnsibleError("%s must be stored as dictonary/hash: %s" % filename4)
if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3): if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3):
# running a host specific pass and has host specific variables # running a host specific pass and has host specific variables
# load into setup cache # load into setup cache
self.playbook.SETUP_CACHE[host].update(new_vars) self.playbook.SETUP_CACHE[host].update(new_vars)
elif host is None: elif host is None:
# running a non-host specific pass and we can update the global vars instead # running a non-host specific pass and we can update the global vars instead
self.vars.update(new_vars) self.vars.update(new_vars)

@ -20,9 +20,9 @@ from ansible import utils
class Task(object): class Task(object):
__slots__ = [ __slots__ = [
'name', 'action', 'only_if', 'async_seconds', 'async_poll_interval', 'name', 'action', 'only_if', 'async_seconds', 'async_poll_interval',
'notify', 'module_name', 'module_args', 'module_vars', 'notify', 'module_name', 'module_args', 'module_vars',
'play', 'notified_by', 'tags', 'with_items', 'first_available_file', 'ignore_errors' 'play', 'notified_by', 'tags', 'with_items', 'first_available_file', 'ignore_errors'
] ]
@ -63,7 +63,7 @@ class Task(object):
self.first_available_file = ds.get('first_available_file', None) self.first_available_file = ds.get('first_available_file', None)
self.with_items = ds.get('with_items', None) self.with_items = ds.get('with_items', None)
self.ignore_errors = ds.get('ignore_errors', False) self.ignore_errors = ds.get('ignore_errors', False)
# notify can be a string or a list, store as a list # notify can be a string or a list, store as a list
if isinstance(self.notify, basestring): if isinstance(self.notify, basestring):
self.notify = [ self.notify ] self.notify = [ self.notify ]
@ -92,8 +92,8 @@ class Task(object):
# make first_available_file accessable to Runner code # make first_available_file accessable to Runner code
if self.first_available_file: if self.first_available_file:
self.module_vars['first_available_file'] = self.first_available_file self.module_vars['first_available_file'] = self.first_available_file
# process with_items so it can be used by Runner code # process with_items so it can be used by Runner code
if self.with_items is None: if self.with_items is None:
self.with_items = [ ] self.with_items = [ ]
self.module_vars['items'] = self.with_items self.module_vars['items'] = self.with_items
@ -109,4 +109,4 @@ class Task(object):
elif type(apply_tags) == list: elif type(apply_tags) == list:
self.tags.extend(apply_tags) self.tags.extend(apply_tags)
self.tags.extend(import_tags) self.tags.extend(import_tags)

@ -30,7 +30,7 @@ import codecs
import collections import collections
import re import re
import ansible.constants as C import ansible.constants as C
import ansible.inventory import ansible.inventory
from ansible import utils from ansible import utils
from ansible import errors from ansible import errors
@ -38,7 +38,7 @@ from ansible import module_common
import poller import poller
import connection import connection
from ansible import callbacks as ans_callbacks from ansible import callbacks as ans_callbacks
HAS_ATFORK=True HAS_ATFORK=True
try: try:
from Crypto.Random import atfork from Crypto.Random import atfork
@ -65,7 +65,7 @@ def _executor_hook(job_queue, result_queue):
pass pass
except: except:
traceback.print_exc() traceback.print_exc()
################################################ ################################################
class ReturnData(object): class ReturnData(object):
@ -103,7 +103,7 @@ class Runner(object):
# see bin/ansible for how this is used... # see bin/ansible for how this is used...
def __init__(self, def __init__(self,
host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
module_path=C.DEFAULT_MODULE_PATH, # ex: /usr/share/ansible module_path=C.DEFAULT_MODULE_PATH, # ex: /usr/share/ansible
module_name=C.DEFAULT_MODULE_NAME, # ex: copy module_name=C.DEFAULT_MODULE_NAME, # ex: copy
@ -114,7 +114,7 @@ class Runner(object):
remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username' remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
remote_port=C.DEFAULT_REMOTE_PORT, # if SSH on different ports remote_port=C.DEFAULT_REMOTE_PORT, # if SSH on different ports
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None
background=0, # async poll every X seconds, else 0 for non-async background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable basedir=None, # directory of playbook, if applicable
@ -125,7 +125,7 @@ class Runner(object):
verbose=False, # whether to show more or less verbose=False, # whether to show more or less
sudo=False, # whether to run sudo or not sudo=False, # whether to run sudo or not
sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root'
module_vars=None, # a playbooks internals thing module_vars=None, # a playbooks internals thing
is_playbook=False, # running from playbook or not? is_playbook=False, # running from playbook or not?
inventory=None # reference to Inventory object inventory=None # reference to Inventory object
): ):
@ -162,7 +162,7 @@ class Runner(object):
raise errors.AnsibleError("SSH transport does not support passwords, only keys or agents") raise errors.AnsibleError("SSH transport does not support passwords, only keys or agents")
if self.transport == 'local': if self.transport == 'local':
self.remote_user = pwd.getpwuid(os.geteuid())[0] self.remote_user = pwd.getpwuid(os.geteuid())[0]
# ensure we are using unique tmp paths # ensure we are using unique tmp paths
random.seed() random.seed()
@ -201,7 +201,7 @@ class Runner(object):
# ***************************************************** # *****************************************************
def _execute_module(self, conn, tmp, module_name, args, def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None): async_jid=None, async_module=None, async_limit=None, inject=None):
''' runs a module that has already been transferred ''' ''' runs a module that has already been transferred '''
@ -270,7 +270,7 @@ class Runner(object):
return self._execute_module(conn, tmp, 'async_wrapper', module_args, return self._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path, async_module=module_path,
async_jid=self.generated_jid, async_jid=self.generated_jid,
async_limit=self.background, async_limit=self.background,
inject=inject inject=inject
) )
@ -301,7 +301,7 @@ class Runner(object):
if not found: if not found:
results=dict(failed=True, msg="could not find src in first_available_file list") results=dict(failed=True, msg="could not find src in first_available_file list")
return ReturnData(host=conn.host, results=results) return ReturnData(host=conn.host, results=results)
source = utils.template(source, inject) source = utils.template(source, inject)
source = utils.path_dwim(self.basedir, source) source = utils.path_dwim(self.basedir, source)
@ -309,10 +309,10 @@ class Runner(object):
if local_md5 is None: if local_md5 is None:
result=dict(failed=True, msg="could not find src=%s" % source) result=dict(failed=True, msg="could not find src=%s" % source)
return ReturnData(host=conn.host, result=result) return ReturnData(host=conn.host, result=result)
remote_md5 = self._remote_md5(conn, tmp, dest)
exec_rc = None remote_md5 = self._remote_md5(conn, tmp, dest)
exec_rc = None
if local_md5 != remote_md5: if local_md5 != remote_md5:
# transfer the file to a remote tmp location # transfer the file to a remote tmp location
tmp_src = tmp + source.split('/')[-1] tmp_src = tmp + source.split('/')[-1]
@ -344,7 +344,7 @@ class Runner(object):
source = utils.template(source, inject) source = utils.template(source, inject)
# apply templating to dest argument # apply templating to dest argument
dest = utils.template(dest, inject) dest = utils.template(dest, inject)
# files are saved in dest dir, with a subdir for each host, then the filename # files are saved in dest dir, with a subdir for each host, then the filename
dest = "%s/%s/%s" % (utils.path_dwim(self.basedir, dest), conn.host, source) dest = "%s/%s/%s" % (utils.path_dwim(self.basedir, dest), conn.host, source)
dest = dest.replace("//","/") dest = dest.replace("//","/")
@ -383,7 +383,7 @@ class Runner(object):
else: else:
result = dict(changed=False, md5sum=local_md5, file=source) result = dict(changed=False, md5sum=local_md5, file=source)
return ReturnData(host=conn.host, result=result) return ReturnData(host=conn.host, result=result)
# ***************************************************** # *****************************************************
def _execute_template(self, conn, tmp, inject=None): def _execute_template(self, conn, tmp, inject=None):
@ -423,7 +423,7 @@ class Runner(object):
result = dict(failed=True, msg=str(e)) result = dict(failed=True, msg=str(e))
return ReturnData(host=conn.host, comm_ok=False, result=result) return ReturnData(host=conn.host, comm_ok=False, result=result)
xfered = self._transfer_str(conn, tmp, 'source', resultant) xfered = self._transfer_str(conn, tmp, 'source', resultant)
# run the copy module, queue the file module # run the copy module, queue the file module
self.module_args = "%s src=%s dest=%s" % (self.module_args, xfered, dest) self.module_args = "%s src=%s dest=%s" % (self.module_args, xfered, dest)
return self._execute_module(conn, tmp, 'copy', self.module_args, inject=inject).daisychain('file') return self._execute_module(conn, tmp, 'copy', self.module_args, inject=inject).daisychain('file')
@ -435,7 +435,7 @@ class Runner(object):
# FIXME: once assemble is ported over to the use the new common logic, this method # FIXME: once assemble is ported over to the use the new common logic, this method
# will be unneccessary as it can decide to daisychain via it's own module returns. # will be unneccessary as it can decide to daisychain via it's own module returns.
# and this function can be deleted. # and this function can be deleted.
return self._execute_module(conn, tmp, 'assemble', self.module_args, inject=inject).daisychain('file') return self._execute_module(conn, tmp, 'assemble', self.module_args, inject=inject).daisychain('file')
@ -489,7 +489,7 @@ class Runner(object):
# hack for apt and soon yum, with_items maps back into a single module call # hack for apt and soon yum, with_items maps back into a single module call
inject['item'] = ",".join(items) inject['item'] = ",".join(items)
items = [] items = []
if len(items) == 0: if len(items) == 0:
return self._executor_internal_inner(host, inject, port) return self._executor_internal_inner(host, inject, port)
else: else:
@ -589,7 +589,7 @@ class Runner(object):
changed = True changed = True
result.result.update(result2.result) result.result.update(result2.result)
result.result['changed'] = changed result.result['changed'] = changed
del result.result['daisychain'] del result.result['daisychain']
self._delete_remote_files(conn, tmp) self._delete_remote_files(conn, tmp)
@ -631,7 +631,7 @@ class Runner(object):
out = "\n".join(stdout.readlines()) out = "\n".join(stdout.readlines())
else: else:
out = stdout out = stdout
if type(stderr) != str: if type(stderr) != str:
err = "\n".join(stderr.readlines()) err = "\n".join(stderr.readlines())
else: else:
@ -642,15 +642,15 @@ class Runner(object):
# ***************************************************** # *****************************************************
def _remote_md5(self, conn, tmp, path): def _remote_md5(self, conn, tmp, path):
''' takes a remote md5sum without requiring python, and returns 0 if no file ''' ''' takes a remote md5sum without requiring python, and returns 0 if no file '''
test = "rc=0; [[ -r \"%s\" ]] || rc=2; [[ -f \"%s\" ]] || rc=1" % (path,path) test = "rc=0; [[ -r \"%s\" ]] || rc=2; [[ -f \"%s\" ]] || rc=1" % (path,path)
md5s = [ md5s = [
"(/usr/bin/md5sum %s 2>/dev/null)" % path, "(/usr/bin/md5sum %s 2>/dev/null)" % path,
"(/sbin/md5sum -q %s 2>/dev/null)" % path, "(/sbin/md5sum -q %s 2>/dev/null)" % path,
"(/usr/bin/digest -a md5 -v %s 2>/dev/null)" % path "(/usr/bin/digest -a md5 -v %s 2>/dev/null)" % path
] ]
cmd = " || ".join(md5s) cmd = " || ".join(md5s)
cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path) cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path)
return self._low_level_exec_command(conn, cmd, tmp, sudoable=False).split()[0] return self._low_level_exec_command(conn, cmd, tmp, sudoable=False).split()[0]
@ -702,7 +702,7 @@ class Runner(object):
module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON) module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON)
encoded_args = "\"\"\"%s\"\"\"" % utils.template(self.module_args, inject).replace("\"","\\\"") encoded_args = "\"\"\"%s\"\"\"" % utils.template(self.module_args, inject).replace("\"","\\\"")
module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args) module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args)
# use the correct python interpreter for the host # use the correct python interpreter for the host
if 'ansible_python_interpreter' in inject: if 'ansible_python_interpreter' in inject:
interpreter = inject['ansible_python_interpreter'] interpreter = inject['ansible_python_interpreter']
@ -771,13 +771,13 @@ class Runner(object):
def run(self): def run(self):
''' xfer & run module on all matched hosts ''' ''' xfer & run module on all matched hosts '''
# find hosts that match the pattern # find hosts that match the pattern
hosts = self.inventory.list_hosts(self.pattern) hosts = self.inventory.list_hosts(self.pattern)
if len(hosts) == 0: if len(hosts) == 0:
self.callbacks.on_no_hosts() self.callbacks.on_no_hosts()
return dict(contacted={}, dark={}) return dict(contacted={}, dark={})
hosts = [ (self,x) for x in hosts ] hosts = [ (self,x) for x in hosts ]
results = None results = None
if self.forks > 1: if self.forks > 1:

@ -80,17 +80,17 @@ class ParamikoConnection(object):
bufsize = 4096 bufsize = 4096
chan = self.ssh.get_transport().open_session() chan = self.ssh.get_transport().open_session()
chan.get_pty() chan.get_pty()
if not self.runner.sudo or not sudoable: if not self.runner.sudo or not sudoable:
quoted_command = '"$SHELL" -c ' + pipes.quote(cmd) quoted_command = '"$SHELL" -c ' + pipes.quote(cmd)
chan.exec_command(quoted_command) chan.exec_command(quoted_command)
else: else:
# Rather than detect if sudo wants a password this time, -k makes # Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required. The "--" # sudo always ask for a password if one is required. The "--"
# tells sudo that this is the end of sudo options and the command # tells sudo that this is the end of sudo options and the command
# follows. Passing a quoted compound command to sudo (or sudo -s) # follows. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() # directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading # and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with # output until we see the randomly-generated sudo prompt set with
# the -p option. # the -p option.

@ -58,11 +58,11 @@ class SSHConnection(object):
ssh_cmd = ["ssh", "-tt", "-q"] + self.common_args + [self.host] ssh_cmd = ["ssh", "-tt", "-q"] + self.common_args + [self.host]
if self.runner.sudo and sudoable: if self.runner.sudo and sudoable:
# Rather than detect if sudo wants a password this time, -k makes # Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required. The "--" # sudo always ask for a password if one is required. The "--"
# tells sudo that this is the end of sudo options and the command # tells sudo that this is the end of sudo options and the command
# follows. Passing a quoted compound command to sudo (or sudo -s) # follows. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() # directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading # and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with # output until we see the randomly-generated sudo prompt set with
# the -p option. # the -p option.
@ -104,12 +104,12 @@ class SSHConnection(object):
stdout += os.read(p.stdout.fileno(), 1024) stdout += os.read(p.stdout.fileno(), 1024)
# older versions of ssh generate this error which we ignore # older versions of ssh generate this error which we ignore
stdout=stdout.replace("tcgetattr: Invalid argument\n", "") stdout=stdout.replace("tcgetattr: Invalid argument\n", "")
# suppress Ubuntu 10.04/12.04 error on -tt option # suppress Ubuntu 10.04/12.04 error on -tt option
stdout=stdout.replace("tcgetattr: Inappropriate ioctl for device\n","") stdout=stdout.replace("tcgetattr: Inappropriate ioctl for device\n","")
if p.returncode != 0 and stdout.find('Bad configuration option: ControlPersist') != -1: if p.returncode != 0 and stdout.find('Bad configuration option: ControlPersist') != -1:
raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" before running again') raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" before running again')
return ('', stdout, '') return ('', stdout, '')
def put_file(self, in_path, out_path): def put_file(self, in_path, out_path):

@ -36,7 +36,7 @@ except ImportError:
try: try:
from hashlib import md5 as _md5 from hashlib import md5 as _md5
except ImportError: except ImportError:
from md5 import md5 as _md5 from md5 import md5 as _md5
############################################################### ###############################################################
@ -117,10 +117,10 @@ def parse_json(data):
results = {} results = {}
try: try:
tokens = shlex.split(data) tokens = shlex.split(data)
except: except:
print "failed to parse json: "+ data print "failed to parse json: "+ data
raise raise
for t in tokens: for t in tokens:
if t.find("=") == -1: if t.find("=") == -1:
raise errors.AnsibleError("failed to parse: %s" % data) raise errors.AnsibleError("failed to parse: %s" % data)
@ -131,7 +131,7 @@ def parse_json(data):
elif value.lower() in [ 'false', '0' ]: elif value.lower() in [ 'false', '0' ]:
value = False value = False
if key == 'rc': if key == 'rc':
value = int(value) value = int(value)
results[key] = value results[key] = value
if len(results.keys()) == 0: if len(results.keys()) == 0:
return { "failed" : True, "parsed" : False, "msg" : data } return { "failed" : True, "parsed" : False, "msg" : data }
@ -204,7 +204,7 @@ def template(text, vars):
if (depth > 20): if (depth > 20):
raise errors.AnsibleError("template recursion depth exceeded") raise errors.AnsibleError("template recursion depth exceeded")
prev_text = text prev_text = text
text = varReplace(unicode(text), vars) text = varReplace(unicode(text), vars)
return text return text
def template_from_file(basedir, path, vars): def template_from_file(basedir, path, vars):
@ -223,7 +223,7 @@ def parse_yaml(data):
''' convert a yaml string to a data structure ''' ''' convert a yaml string to a data structure '''
return yaml.load(data) return yaml.load(data)
def parse_yaml_from_file(path): def parse_yaml_from_file(path):
''' convert a yaml file to a data structure ''' ''' convert a yaml file to a data structure '''
@ -268,7 +268,7 @@ def default(value, function):
return value return value
#################################################################### ####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook # option handling code for /usr/bin/ansible and ansible-playbook
# below this line # below this line
class SortedOptParser(optparse.OptionParser): class SortedOptParser(optparse.OptionParser):
@ -287,7 +287,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int', parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS) help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory', parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST, help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST) default=constants.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password') help='ask for SSH password')
@ -299,7 +299,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH, help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=constants.DEFAULT_MODULE_PATH) default=constants.DEFAULT_MODULE_PATH)
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int', parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout', dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT) help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts: if output_opts:
@ -314,9 +314,9 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
parser.add_option('-U', '--sudo-user', dest='sudo_user', help='desired sudo user (default=root)', parser.add_option('-U', '--sudo-user', dest='sudo_user', help='desired sudo user (default=root)',
default=None) # Can't default to root because we need to detect when this option was given default=None) # Can't default to root because we need to detect when this option was given
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
dest='remote_user', dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER) help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
if connect_opts: if connect_opts:
parser.add_option('-c', '--connection', dest='connection', parser.add_option('-c', '--connection', dest='connection',
choices=C.DEFAULT_TRANSPORT_OPTS, choices=C.DEFAULT_TRANSPORT_OPTS,
@ -325,7 +325,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
if async_opts: if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int', parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL) help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0, parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)') help='run asynchronously, failing after X seconds (default=N/A)')

@ -77,7 +77,7 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, install_reco
installed, upgradable = package_status(m, name, version, cache) installed, upgradable = package_status(m, name, version, cache)
if not installed or (upgrade and upgradable): if not installed or (upgrade and upgradable):
packages += "'%s' " % package packages += "'%s' " % package
if len(packages) != 0: if len(packages) != 0:
if force: if force:
force_yes = '--force-yes' force_yes = '--force-yes'
@ -105,7 +105,7 @@ def remove(m, pkgspec, cache, purge=False):
installed, upgradable = package_status(m, name, version, cache) installed, upgradable = package_status(m, name, version, cache)
if installed: if installed:
packages += "'%s' " % package packages += "'%s' " % package
if len(packages) == 0: if len(packages) == 0:
m.exit_json(changed=False) m.exit_json(changed=False)
else: else:
@ -157,9 +157,9 @@ def main():
module.exit_json(changed=False) module.exit_json(changed=False)
force_yes = module.boolean(p['force']) force_yes = module.boolean(p['force'])
packages = p['package'].split(',') packages = p['package'].split(',')
latest = p['state'] == 'latest' latest = p['state'] == 'latest'
for package in packages: for package in packages:
if package.count('=') > 1: if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package) module.fail_json(msg="invalid package spec: %s" % package)

@ -45,39 +45,39 @@ def write_temp_file(data):
# main # main
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
src = dict(required=True), src = dict(required=True),
dest = dict(required=True), dest = dict(required=True),
) )
) )
changed=False changed=False
pathmd5 = None pathmd5 = None
destmd5 = None destmd5 = None
src = os.path.expanduser(module.params['src']) src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest']) dest = os.path.expanduser(module.params['dest'])
if not os.path.exists(src): if not os.path.exists(src):
module.fail_json(msg="Source (%s) does not exist" % src) module.fail_json(msg="Source (%s) does not exist" % src)
if not os.path.isdir(src): if not os.path.isdir(src):
module.fail_json(msg="Source (%s) is not a directory" % src) module.fail_json(msg="Source (%s) is not a directory" % src)
path = write_temp_file(assemble_from_fragments(src)) path = write_temp_file(assemble_from_fragments(src))
pathmd5 = module.md5(path) pathmd5 = module.md5(path)
if os.path.exists(dest): if os.path.exists(dest):
destmd5 = module.md5(dest) destmd5 = module.md5(dest)
if pathmd5 != destmd5: if pathmd5 != destmd5:
shutil.copy(path, dest) shutil.copy(path, dest)
changed = True changed = True
# Mission complete # Mission complete
module.exit_json(src=src, dest=dest, md5sum=destmd5, module.exit_json(src=src, dest=dest, md5sum=destmd5,
changed=changed, msg="OK", changed=changed, msg="OK",
daisychain="file", daisychain_args=module.params) daisychain="file", daisychain_args=module.params)

@ -55,7 +55,7 @@ def main():
# file not written yet? That means it is running # file not written yet? That means it is running
module.exit_json(results_file=log_path, ansible_job_id=jid, started=1) module.exit_json(results_file=log_path, ansible_job_id=jid, started=1)
else: else:
module_fail_json(ansible_job_id=jid, results_file=log_path, module_fail_json(ansible_job_id=jid, results_file=log_path,
msg="Could not parse job output: %s" % data) msg="Could not parse job output: %s" % data)
if not data.has_key("started"): if not data.has_key("started"):

@ -63,7 +63,7 @@ def daemonize_self():
dev_null = file('/dev/null','rw') dev_null = file('/dev/null','rw')
os.dup2(dev_null.fileno(), sys.stdin.fileno()) os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno()) os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno()) os.dup2(dev_null.fileno(), sys.stderr.fileno())
if len(sys.argv) < 3: if len(sys.argv) < 3:
print json.dumps({ print json.dumps({
@ -101,11 +101,11 @@ def _run_command(wrapped_cmd, jid, log_path):
logfile.close() logfile.close()
logfile = open(log_path, "w") logfile = open(log_path, "w")
result = {} result = {}
outdata = '' outdata = ''
try: try:
cmd = shlex.split(wrapped_cmd) cmd = shlex.split(wrapped_cmd)
script = subprocess.Popen(cmd, shell=False, script = subprocess.Popen(cmd, shell=False,
stdin=None, stdout=logfile, stderr=logfile) stdin=None, stdout=logfile, stderr=logfile)
script.communicate() script.communicate()
outdata = file(log_path).read() outdata = file(log_path).read()
@ -125,7 +125,7 @@ def _run_command(wrapped_cmd, jid, log_path):
"cmd" : wrapped_cmd, "cmd" : wrapped_cmd,
"data" : outdata, # temporary debug only "data" : outdata, # temporary debug only
"msg" : traceback.format_exc() "msg" : traceback.format_exc()
} }
result['ansible_job_id'] = jid result['ansible_job_id'] = jid
logfile.write(json.dumps(result)) logfile.write(json.dumps(result))
logfile.close() logfile.close()

@ -38,7 +38,7 @@ import os.path
def keyfile(user, write=False): def keyfile(user, write=False):
""" """
Calculate name of authorized keys file, optionally creating the Calculate name of authorized keys file, optionally creating the
directories and file, properly setting permissions. directories and file, properly setting permissions.
:param str user: name of user in passwd file :param str user: name of user in passwd file
@ -51,13 +51,13 @@ def keyfile(user, write=False):
sshdir = os.path.join(homedir, ".ssh") sshdir = os.path.join(homedir, ".ssh")
keysfile = os.path.join(sshdir, "authorized_keys") keysfile = os.path.join(sshdir, "authorized_keys")
if not write: if not write:
return keysfile return keysfile
uid = user_entry.pw_uid uid = user_entry.pw_uid
gid = user_entry.pw_gid gid = user_entry.pw_gid
if not os.path.exists(sshdir): if not os.path.exists(sshdir):
os.mkdir(sshdir, 0700) os.mkdir(sshdir, 0700)
os.chown(sshdir, uid, gid) os.chown(sshdir, uid, gid)
os.chmod(sshdir, 0700) os.chmod(sshdir, 0700)
@ -74,7 +74,7 @@ def keyfile(user, write=False):
def readkeys(filename): def readkeys(filename):
if not os.path.isfile(filename): if not os.path.isfile(filename):
return [] return []
f = open(filename) f = open(filename)
keys = [line.rstrip() for line in f.readlines()] keys = [line.rstrip() for line in f.readlines()]
@ -97,19 +97,19 @@ def enforce_state(module, params):
state = params.get("state", "present") state = params.get("state", "present")
# check current state -- just get the filename, don't create file # check current state -- just get the filename, don't create file
params["keyfile"] = keyfile(user, write=False) params["keyfile"] = keyfile(user, write=False)
keys = readkeys(params["keyfile"]) keys = readkeys(params["keyfile"])
present = key in keys present = key in keys
# handle idempotent state=present # handle idempotent state=present
if state=="present": if state=="present":
if present: if present:
module.exit_json(changed=False) module.exit_json(changed=False)
keys.append(key) keys.append(key)
writekeys(keyfile(user,write=True), keys) writekeys(keyfile(user,write=True), keys)
elif state=="absent": elif state=="absent":
if not present: if not present:
module.exit_json(changed=False) module.exit_json(changed=False)
keys.remove(key) keys.remove(key)
writekeys(keyfile(user,write=True), keys) writekeys(keyfile(user,write=True), keys)
@ -133,4 +133,4 @@ def main():
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> #<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main() main()

@ -30,7 +30,7 @@ def main():
src=dict(required=True), src=dict(required=True),
dest=dict(required=True) dest=dict(required=True)
) )
) )
src = os.path.expanduser(module.params['src']) src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest']) dest = os.path.expanduser(module.params['dest'])
@ -60,9 +60,9 @@ def main():
try: try:
shutil.copyfile(src, dest) shutil.copyfile(src, dest)
except shutil.Error: except shutil.Error:
module.fail_json(msg="failed to copy: %s and %s are the same" % (src, dest)) module.fail_json(msg="failed to copy: %s and %s are the same" % (src, dest))
except IOError: except IOError:
module.fail_json(msg="failed to copy: %s to %s" % (src, dest)) module.fail_json(msg="failed to copy: %s to %s" % (src, dest))
changed = True changed = True
else: else:
changed = False changed = False

@ -47,7 +47,7 @@ def add_path_info(kwargs):
kwargs['secontext'] = ':'.join(selinux_context(path)) kwargs['secontext'] = ':'.join(selinux_context(path))
else: else:
kwargs['state'] = 'absent' kwargs['state'] = 'absent'
return kwargs return kwargs
def module_exit_json(**kwargs): def module_exit_json(**kwargs):
add_path_info(kwargs) add_path_info(kwargs)
@ -155,7 +155,7 @@ def set_context_if_different(path, context, changed):
module_fail_json(path=path, msg='set selinux context failed') module_fail_json(path=path, msg='set selinux context failed')
changed = True changed = True
return changed return changed
def set_owner_if_different(path, owner, changed): def set_owner_if_different(path, owner, changed):
if owner is None: if owner is None:
return changed return changed
@ -167,7 +167,7 @@ def set_owner_if_different(path, owner, changed):
return True return True
return changed return changed
def set_group_if_different(path, group, changed): def set_group_if_different(path, group, changed):
if group is None: if group is None:
return changed return changed
@ -186,8 +186,8 @@ def set_mode_if_different(path, mode, changed):
# FIXME: support English modes # FIXME: support English modes
mode = int(mode, 8) mode = int(mode, 8)
except Exception, e: except Exception, e:
module_fail_json(path=path, msg='mode needs to be something octalish', details=str(e)) module_fail_json(path=path, msg='mode needs to be something octalish', details=str(e))
st = os.stat(path) st = os.stat(path)
prev_mode = stat.S_IMODE(st[stat.ST_MODE]) prev_mode = stat.S_IMODE(st[stat.ST_MODE])
@ -290,7 +290,7 @@ def main():
module_exit_json(path=path, changed=False) module_exit_json(path=path, changed=False)
if state == 'file': if state == 'file':
if prev_state == 'absent': if prev_state == 'absent':
module_fail_json(path=path, msg='file does not exist, use copy or template module to create') module_fail_json(path=path, msg='file does not exist, use copy or template module to create')
@ -307,7 +307,7 @@ def main():
if prev_state == 'absent': if prev_state == 'absent':
os.makedirs(path) os.makedirs(path)
changed = True changed = True
# set modes owners and context as needed # set modes owners and context as needed
changed = set_context_if_different(path, secontext, changed) changed = set_context_if_different(path, secontext, changed)
changed = set_owner_if_different(path, owner, changed) changed = set_owner_if_different(path, owner, changed)
@ -317,14 +317,14 @@ def main():
module_exit_json(path=path, changed=changed) module_exit_json(path=path, changed=changed)
elif state == 'link': elif state == 'link':
if os.path.isabs(src): if os.path.isabs(src):
abs_src = src abs_src = src
else: else:
abs_src = os.path.join(os.path.dirname(dest), src) abs_src = os.path.join(os.path.dirname(dest), src)
if not os.path.exists(abs_src): if not os.path.exists(abs_src):
module_fail_json(path=path, src=src, msg='src file does not exist') module_fail_json(path=path, src=src, msg='src file does not exist')
if prev_state == 'absent': if prev_state == 'absent':
os.symlink(src, path) os.symlink(src, path)
changed = True changed = True

@ -27,14 +27,14 @@ import tempfile
HAS_URLLIB2=True HAS_URLLIB2=True
try: try:
import urllib2 import urllib2
except ImportError: except ImportError:
HAS_URLLIB2=False HAS_URLLIB2=False
HAS_URLPARSE=True HAS_URLPARSE=True
try: try:
import urlparse import urlparse
import socket import socket
except ImportError: except ImportError:
HAS_URLPARSE=False HAS_URLPARSE=False
# ============================================================== # ==============================================================
@ -92,7 +92,7 @@ def url_do_get(module, url, dest):
def url_get(module, url, dest): def url_get(module, url, dest):
""" """
Download url and store at dest. Download url and store at dest.
If dest is a directory, determine filename from url. If dest is a directory, determine filename from url.
Return (tempfile, info about the request) Return (tempfile, info about the request)
""" """
@ -123,8 +123,8 @@ def url_get(module, url, dest):
# main # main
def main(): def main():
# does this really happen on non-ancient python? # does this really happen on non-ancient python?
if not HAS_URLLIB2: if not HAS_URLLIB2:
module.fail_json(msg="urllib2 is not installed") module.fail_json(msg="urllib2 is not installed")
if not HAS_URLPARSE: if not HAS_URLPARSE:
@ -138,16 +138,16 @@ def main():
dest = dict(required=True), dest = dict(required=True),
) )
) )
url = module.params['url'] url = module.params['url']
dest = os.path.expanduser(module.params['dest']) dest = os.path.expanduser(module.params['dest'])
# download to tmpsrc # download to tmpsrc
tmpsrc, info = url_get(module, url, dest) tmpsrc, info = url_get(module, url, dest)
md5sum_src = None md5sum_src = None
md5sum_dest = None md5sum_dest = None
dest = info['actualdest'] dest = info['actualdest']
# raise an error if there is no tmpsrc file # raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc): if not os.path.exists(tmpsrc):
os.remove(tmpsrc) os.remove(tmpsrc)
@ -156,7 +156,7 @@ def main():
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc)) module.fail_json( msg="Source %s not readable" % (tmpsrc))
md5sum_src = module.md5(tmpsrc) md5sum_src = module.md5(tmpsrc)
# check if there is no dest file # check if there is no dest file
if os.path.exists(dest): if os.path.exists(dest):
# raise an error if copy has no permission on dest # raise an error if copy has no permission on dest
@ -171,22 +171,22 @@ def main():
if not os.access(os.path.dirname(dest), os.W_OK): if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest))) module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest)))
if md5sum_src != md5sum_dest: if md5sum_src != md5sum_dest:
try: try:
shutil.copyfile(tmpsrc, dest) shutil.copyfile(tmpsrc, dest)
except Exception, err: except Exception, err:
os.remove(tmpsrc) os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
changed = True changed = True
else: else:
changed = False changed = False
os.remove(tmpsrc) os.remove(tmpsrc)
# Mission complete # Mission complete
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src, module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src,
changed=changed, msg=info.get('msg',''), changed=changed, msg=info.get('msg',''),
daisychain="file", daisychain_args=info.get('daisychain_args','')) daisychain="file", daisychain_args=info.get('daisychain_args',''))
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py

@ -166,7 +166,7 @@ def main():
if rc != 0: if rc != 0:
module.fail_json(msg=err) module.fail_json(msg=err)
else: else:
# else do a pull # else do a pull
before = get_version(dest) before = get_version(dest)
(rc, out, err) = reset(dest) (rc, out, err) = reset(dest)
if rc != 0: if rc != 0:

@ -80,7 +80,7 @@ def group_info(group):
except KeyError: except KeyError:
return False return False
return info return info
# =========================================== # ===========================================
def main(): def main():

@ -23,8 +23,8 @@
# mount module - mount fs and define in fstab # mount module - mount fs and define in fstab
# usage: # usage:
# #
# mount name=mountpoint, src=device_to_be_mounted fstype=fstype # mount name=mountpoint, src=device_to_be_mounted fstype=fstype
# opts=mount_opts, dump=0 passno=0 state=[present|absent|mounted|unmounted] # opts=mount_opts, dump=0 passno=0 state=[present|absent|mounted|unmounted]
# #
# absent == remove from fstab and unmounted # absent == remove from fstab and unmounted
# present == add to fstab, do not change mount state # present == add to fstab, do not change mount state
@ -52,7 +52,7 @@ def set_mount(**kwargs):
) )
args.update(kwargs) args.update(kwargs)
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
to_write = [] to_write = []
exists = False exists = False
@ -69,7 +69,7 @@ def set_mount(**kwargs):
# but it is not our fault so leave it be # but it is not our fault so leave it be
to_write.append(line) to_write.append(line)
continue continue
ld = {} ld = {}
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
@ -88,16 +88,16 @@ def set_mount(**kwargs):
to_write.append(new_line % ld) to_write.append(new_line % ld)
else: else:
to_write.append(line) to_write.append(line)
if not exists: if not exists:
to_write.append(new_line % args) to_write.append(new_line % args)
changed = True changed = True
if changed: if changed:
write_fstab(to_write, args['fstab']) write_fstab(to_write, args['fstab'])
return (args['name'], changed) return (args['name'], changed)
def unset_mount(**kwargs): def unset_mount(**kwargs):
""" remove a mount point from fstab """ """ remove a mount point from fstab """
@ -125,7 +125,7 @@ def unset_mount(**kwargs):
# but it is not our fault so leave it be # but it is not our fault so leave it be
to_write.append(line) to_write.append(line)
continue continue
ld = {} ld = {}
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
@ -141,7 +141,7 @@ def unset_mount(**kwargs):
return (args['name'], changed) return (args['name'], changed)
def mount(**kwargs): def mount(**kwargs):
""" mount up a path or remount if needed """ """ mount up a path or remount if needed """
@ -185,7 +185,7 @@ def main():
fstab = dict(default=None) fstab = dict(default=None)
) )
) )
changed = False changed = False
rc = 0 rc = 0
args = { args = {
@ -201,12 +201,12 @@ def main():
args['dump'] = module.params['dump'] args['dump'] = module.params['dump']
if module.params['fstab'] is not None: if module.params['fstab'] is not None:
args['fstab'] = module.params['fstab'] args['fstab'] = module.params['fstab']
# absent == remove from fstab and unmounted # absent == remove from fstab and unmounted
# unmounted == do not change fstab state, but unmount # unmounted == do not change fstab state, but unmount
# present == add to fstab, do not change mount state # present == add to fstab, do not change mount state
# mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it # mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it
state = module.params['state'] state = module.params['state']
name = module.params['name'] name = module.params['name']
if state == 'absent': if state == 'absent':
@ -216,24 +216,24 @@ def main():
res,msg = umount(**args) res,msg = umount(**args)
if res: if res:
fail_json(msg="Error unmounting %s: %s" % (name, msg)) fail_json(msg="Error unmounting %s: %s" % (name, msg))
if os.path.exists(name): if os.path.exists(name):
try: try:
os.rmdir(name) os.rmdir(name)
except (OSError, IOError), e: except (OSError, IOError), e:
fail_json(msg="Error rmdir %s: %s" % (name, str(e))) fail_json(msg="Error rmdir %s: %s" % (name, str(e)))
module.exit_json(changed=changed, **args) module.exit_json(changed=changed, **args)
if state == 'unmounted': if state == 'unmounted':
if os.path.ismount(name): if os.path.ismount(name):
res,msg = umount(**args) res,msg = umount(**args)
if res: if res:
fail_json(msg="Error unmounting %s: %s" % (name, msg)) fail_json(msg="Error unmounting %s: %s" % (name, msg))
changed = True changed = True
module.exit_json(changed=changed, **args) module.exit_json(changed=changed, **args)
if state in ['mounted', 'present']: if state in ['mounted', 'present']:
name, changed = set_mount(**args) name, changed = set_mount(**args)
if state == 'mounted': if state == 'mounted':
@ -242,7 +242,7 @@ def main():
os.makedirs(name) os.makedirs(name)
except (OSError, IOError), e: except (OSError, IOError), e:
fail_json(msg="Error making dir %s: %s" % (name, str(e))) fail_json(msg="Error making dir %s: %s" % (name, str(e)))
res = 0 res = 0
if os.path.ismount(name): if os.path.ismount(name):
if changed: if changed:
@ -250,16 +250,16 @@ def main():
else: else:
changed = True changed = True
res,msg = mount(**args) res,msg = mount(**args)
if res: if res:
fail_json(msg="Error mounting %s: %s" % (name, msg)) fail_json(msg="Error mounting %s: %s" % (name, msg))
module.exit_json(changed=changed, **args) module.exit_json(changed=changed, **args)
module.fail_json(msg='Unexpected position reached') module.fail_json(msg='Unexpected position reached')
sys.exit(0) sys.exit(0)
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> #<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main() main()

@ -101,14 +101,14 @@ def _get_service_status(name):
# iptables status command output is lame # iptables status command output is lame
# TODO: lookup if we can use a return code for this instead? # TODO: lookup if we can use a return code for this instead?
running = True running = True
return running return running
def _run(cmd): def _run(cmd):
# returns (rc, stdout, stderr) from shell command # returns (rc, stdout, stderr) from shell command
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate() stdout, stderr = process.communicate()
return (process.returncode, stdout, stderr) return (process.returncode, stdout, stderr)
def _do_enable(name, enable): def _do_enable(name, enable):
@ -126,9 +126,9 @@ def _do_enable(name, enable):
rc, stdout, stderr = _run("%s %s %s" % (CHKCONFIG, name, valid_argument['on'])) rc, stdout, stderr = _run("%s %s %s" % (CHKCONFIG, name, valid_argument['on']))
else: else:
rc, stdout, stderr = _run("%s %s %s" % (CHKCONFIG, name, valid_argument['off'])) rc, stdout, stderr = _run("%s %s %s" % (CHKCONFIG, name, valid_argument['off']))
return rc, stdout, stderr return rc, stdout, stderr
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
@ -145,7 +145,7 @@ def main():
# =========================================== # ===========================================
# find binaries locations on minion # find binaries locations on minion
_find_binaries(module) _find_binaries(module)
# =========================================== # ===========================================
# get service status # get service status
running = _get_service_status(name) running = _get_service_status(name)
@ -156,7 +156,7 @@ def main():
rc = 0 rc = 0
err = '' err = ''
out = '' out = ''
if module.params['enabled']: if module.params['enabled']:
rc_enable, out_enable, err_enable = _do_enable(name, enable) rc_enable, out_enable, err_enable = _do_enable(name, enable)
rc += rc_enable rc += rc_enable
@ -208,10 +208,10 @@ def main():
result['state'] = state result['state'] = state
rc, stdout, stderr = _run("%s %s status" % (SERVICE, name)) rc, stdout, stderr = _run("%s %s status" % (SERVICE, name))
module.exit_json(**result); module.exit_json(**result);
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> # this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main() main()

@ -360,7 +360,7 @@ class FreeBSDHardware(Hardware):
# Get swapinfo. swapinfo output looks like: # Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity # Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0% # /dev/ada0p3 314368 0 314368 0%
# #
cmd = subprocess.Popen("/usr/sbin/swapinfo -m", shell=True, cmd = subprocess.Popen("/usr/sbin/swapinfo -m", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate() out, err = cmd.communicate()
@ -601,7 +601,7 @@ def run_setup(module):
setup_options = {} setup_options = {}
facts = ansible_facts() facts = ansible_facts()
for (k, v) in facts.items(): for (k, v) in facts.items():
setup_options["ansible_%s" % k] = v setup_options["ansible_%s" % k] = v

@ -1,3 +1,3 @@
# There is actually no actual shell module source, when you use 'shell' in ansible, # There is actually no actual shell module source, when you use 'shell' in ansible,
# it runs the 'command' module with special arguments and it behaves differently. # it runs the 'command' module with special arguments and it behaves differently.
# See the command source and the comment "#USE_SHELL". # See the command source and the comment "#USE_SHELL".

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
@ -24,9 +24,9 @@ try:
except ImportError: except ImportError:
print "failed=True msg='libvirt python module unavailable'" print "failed=True msg='libvirt python module unavailable'"
sys.exit(1) sys.exit(1)
ALL_COMMANDS = [] ALL_COMMANDS = []
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause', VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart'] 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart']
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype'] HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
ALL_COMMANDS.extend(VM_COMMANDS) ALL_COMMANDS.extend(VM_COMMANDS)
@ -141,11 +141,11 @@ class LibvirtConnection(object):
def get_autostart(self, vmid): def get_autostart(self, vmid):
vm = self.conn.lookupByName(vmid) vm = self.conn.lookupByName(vmid)
return vm.autostart() return vm.autostart()
def set_autostart(self, vmid, val): def set_autostart(self, vmid, val):
vm = self.conn.lookupByName(vmid) vm = self.conn.lookupByName(vmid)
return vm.setAutostart(val) return vm.setAutostart(val)
class Virt(object): class Virt(object):
@ -157,7 +157,7 @@ class Virt(object):
def get_vm(self, vmid): def get_vm(self, vmid):
self.__get_conn() self.__get_conn()
return self.conn.find_vm(vmid) return self.conn.find_vm(vmid)
def state(self): def state(self):
vms = self.list_vms() vms = self.list_vms()
state = [] state = []
@ -216,7 +216,7 @@ class Virt(object):
def virttype(self): def virttype(self):
return self.__get_conn().get_type() return self.__get_conn().get_type()
def autostart(self, vmid): def autostart(self, vmid):
self.conn = self.__get_conn() self.conn = self.__get_conn()
return self.conn.set_autostart(vmid, True) return self.conn.set_autostart(vmid, True)
@ -313,26 +313,26 @@ def core(module):
v = Virt() v = Virt()
res = {} res = {}
if state: if state:
if not guest: if not guest:
module.fail_json(msg = "state change requires a guest specified") module.fail_json(msg = "state change requires a guest specified")
res['changed'] = False res['changed'] = False
if state == 'running': if state == 'running':
if v.status(guest) is not 'running': if v.status(guest) is not 'running':
res['changed'] = True res['changed'] = True
res['msg'] = v.start(guest) res['msg'] = v.start(guest)
elif state == 'shutdown': elif state == 'shutdown':
if v.status(guest) is not 'shutdown': if v.status(guest) is not 'shutdown':
res['changed'] = True res['changed'] = True
res['msg'] = v.shutdown(guest) res['msg'] = v.shutdown(guest)
else: else:
module.fail_json(msg="unexpected state") module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res return VIRT_SUCCESS, res
if command: if command:
if command in VM_COMMANDS: if command in VM_COMMANDS:
if not guest: if not guest:
@ -341,20 +341,20 @@ def core(module):
if type(res) != dict: if type(res) != dict:
res = { command: res } res = { command: res }
return VIRT_SUCCESS, res return VIRT_SUCCESS, res
elif hasattr(v, command): elif hasattr(v, command):
res = getattr(v, command)() res = getattr(v, command)()
if type(res) != dict: if type(res) != dict:
res = { command: res } res = { command: res }
return VIRT_SUCCESS, res return VIRT_SUCCESS, res
else: else:
module.fail_json(msg="Command %s not recognized" % basecmd) module.fail_json(msg="Command %s not recognized" % basecmd)
module.fail_json(msg="expected state or command parameter to be specified") module.fail_json(msg="expected state or command parameter to be specified")
def main(): def main():
module = AnsibleModule(argument_spec=dict( module = AnsibleModule(argument_spec=dict(
name = dict(aliases=['guest']), name = dict(aliases=['guest']),
state = dict(choices=['running', 'shutdown']), state = dict(choices=['running', 'shutdown']),
@ -375,4 +375,4 @@ def main():
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> #<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main() main()

@ -33,7 +33,7 @@ def is_installed(repoq, pkgspec, qf=def_qf):
rc,out,err = run(cmd) rc,out,err = run(cmd)
if rc == 0: if rc == 0:
return [ p for p in out.split('\n') if p.strip() ] return [ p for p in out.split('\n') if p.strip() ]
return [] return []
def is_available(repoq, pkgspec, qf=def_qf): def is_available(repoq, pkgspec, qf=def_qf):
@ -50,7 +50,7 @@ def is_update(repoq, pkgspec, qf=def_qf):
rc,out,err = run(cmd) rc,out,err = run(cmd)
if rc == 0: if rc == 0:
return set([ p for p in out.split('\n') if p.strip() ]) return set([ p for p in out.split('\n') if p.strip() ])
return [] return []
@ -60,26 +60,26 @@ def what_provides(repoq, req_spec, qf=def_qf):
ret = [] ret = []
if rc == 0: if rc == 0:
ret = set([ p for p in out.split('\n') if p.strip() ]) ret = set([ p for p in out.split('\n') if p.strip() ])
return ret return ret
def local_nvra(path): def local_nvra(path):
"""return nvra of a local rpm passed in""" """return nvra of a local rpm passed in"""
cmd = "/bin/rpm -qp --qf='%%{name}-%%{version}-%%{release}.%%{arch}\n' %s'" % path cmd = "/bin/rpm -qp --qf='%%{name}-%%{version}-%%{release}.%%{arch}\n' %s'" % path
rc, out, err = run(cmd) rc, out, err = run(cmd)
if rc != 0: if rc != 0:
return None return None
nvra = out.split('\n')[0] nvra = out.split('\n')[0]
return nvra return nvra
def pkg_to_dict(pkgstr): def pkg_to_dict(pkgstr):
if pkgstr.strip(): if pkgstr.strip():
n,e,v,r,a,repo = pkgstr.split('|') n,e,v,r,a,repo = pkgstr.split('|')
else: else:
return {'error_parsing': pkgstr} return {'error_parsing': pkgstr}
d = { d = {
'name':n, 'name':n,
'arch':a, 'arch':a,
@ -89,7 +89,7 @@ def pkg_to_dict(pkgstr):
'repo':repo, 'repo':repo,
'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a) 'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a)
} }
if repo == 'installed': if repo == 'installed':
d['yumstate'] = 'installed' d['yumstate'] = 'installed'
else: else:
@ -103,16 +103,16 @@ def repolist(repoq, qf="%{repoid}"):
ret = [] ret = []
if rc == 0: if rc == 0:
ret = set([ p for p in out.split('\n') if p.strip() ]) ret = set([ p for p in out.split('\n') if p.strip() ])
return ret return ret
def list_stuff(conf_file, stuff): def list_stuff(conf_file, stuff):
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
repoq = '%s --plugins --quiet -q ' % repoquery repoq = '%s --plugins --quiet -q ' % repoquery
if conf_file and os.path.exists(conf_file): if conf_file and os.path.exists(conf_file):
repoq = '%s -c %s --plugins --quiet -q ' % (repoquery,conf_file) repoq = '%s -c %s --plugins --quiet -q ' % (repoquery,conf_file)
if stuff == 'installed': if stuff == 'installed':
return [ pkg_to_dict(p) for p in is_installed(repoq, '-a', qf=qf) if p.strip() ] return [ pkg_to_dict(p) for p in is_installed(repoq, '-a', qf=qf) if p.strip() ]
elif stuff == 'updates': elif stuff == 'updates':
@ -126,7 +126,7 @@ def list_stuff(conf_file, stuff):
def run(command): def run(command):
try: try:
cmd = subprocess.Popen(command, shell=True, cmd = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate() out, err = cmd.communicate()
except (OSError, IOError), e: except (OSError, IOError), e:
@ -144,7 +144,7 @@ def run(command):
err = '' err = ''
else: else:
rc = cmd.returncode rc = cmd.returncode
return rc, out, err return rc, out, err
@ -161,8 +161,8 @@ def install(module, items, repoq, yum_basecmd):
# check if pkgspec is installed (if possible for idempotence) # check if pkgspec is installed (if possible for idempotence)
# localpkg # localpkg
if spec.endswith('.rpm'): if spec.endswith('.rpm'):
# get the pkg name-v-r.arch # get the pkg name-v-r.arch
nvra = local_nvra(spec) nvra = local_nvra(spec)
# look for them in the rpmdb # look for them in the rpmdb
if is_installed(repoq, nvra): if is_installed(repoq, nvra):
# if they are there, skip it # if they are there, skip it
@ -181,23 +181,23 @@ def install(module, items, repoq, yum_basecmd):
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
res['failed'] = True res['failed'] = True
module.exit_json(**res) module.exit_json(**res)
# if any of them are installed # if any of them are installed
# then nothing to do # then nothing to do
found = False found = False
for this in pkglist: for this in pkglist:
if is_installed(repoq, this): if is_installed(repoq, this):
found = True found = True
res['results'].append('%s providing %s is already installed' % (this, spec)) res['results'].append('%s providing %s is already installed' % (this, spec))
if found: if found:
continue continue
# if not - then pass in the spec as what to install # if not - then pass in the spec as what to install
# we could get here if nothing provides it but that's not # we could get here if nothing provides it but that's not
# the error we're catching here # the error we're catching here
pkg = spec pkg = spec
cmd = "%s install '%s'" % (yum_basecmd, pkg) cmd = "%s install '%s'" % (yum_basecmd, pkg)
rc, out, err = run(cmd) rc, out, err = run(cmd)
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed # FIXME - if we did an install - go and check the rpmdb to see if it actually installed
@ -213,9 +213,9 @@ def install(module, items, repoq, yum_basecmd):
res['rc'] = 0 res['rc'] = 0
res['results'].append(out) res['results'].append(out)
res['msg'] += err res['msg'] += err
module.exit_json(**res) module.exit_json(**res)
def remove(module, items, repoq, yum_basecmd): def remove(module, items, repoq, yum_basecmd):
res = {} res = {}
@ -223,7 +223,7 @@ def remove(module, items, repoq, yum_basecmd):
res['msg'] = '' res['msg'] = ''
res['changed'] = False res['changed'] = False
res['rc'] = 0 res['rc'] = 0
for spec in items: for spec in items:
pkg = None pkg = None
@ -237,12 +237,12 @@ def remove(module, items, repoq, yum_basecmd):
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
res['failed']=True res['failed']=True
module.exit_json(**res) module.exit_json(**res)
found = False found = False
for this in pkglist: for this in pkglist:
if is_installed(repoq, this): if is_installed(repoq, this):
found = True found = True
if not found: if not found:
res['results'].append('%s is not installed' % spec) res['results'].append('%s is not installed' % spec)
continue continue
@ -250,7 +250,7 @@ def remove(module, items, repoq, yum_basecmd):
cmd = "%s remove '%s'" % (yum_basecmd, pkg) cmd = "%s remove '%s'" % (yum_basecmd, pkg)
rc, out, err = run(cmd) rc, out, err = run(cmd)
# FIXME if we ran the remove - check to make sure it actually removed :( # FIXME if we ran the remove - check to make sure it actually removed :(
# look for the pkg in the rpmdb - this is notoriously hard for groups :( # look for the pkg in the rpmdb - this is notoriously hard for groups :(
if rc != 0: if rc != 0:
@ -264,7 +264,7 @@ def remove(module, items, repoq, yum_basecmd):
res['rc'] = 0 res['rc'] = 0
res['results'].append(out) res['results'].append(out)
res['msg'] += err res['msg'] += err
module.exit_json(**res) module.exit_json(**res)
def latest(module, items, repoq, yum_basecmd): def latest(module, items, repoq, yum_basecmd):
@ -273,7 +273,7 @@ def latest(module, items, repoq, yum_basecmd):
res['msg'] = '' res['msg'] = ''
res['changed'] = False res['changed'] = False
res['rc'] = 0 res['rc'] = 0
for spec in items: for spec in items:
pkg = None pkg = None
@ -296,11 +296,11 @@ def latest(module, items, repoq, yum_basecmd):
found = True found = True
else: else:
nothing_to_do = True nothing_to_do = True
if nothing_to_do: if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec) res['results'].append("All packages providing %s are up to date" % spec)
continue continue
if not found: if not found:
basecmd = 'install' basecmd = 'install'
else: else:
@ -333,16 +333,16 @@ def latest(module, items, repoq, yum_basecmd):
res['rc'] = 0 res['rc'] = 0
res['results'].append(out) res['results'].append(out)
res['msg'] += err res['msg'] += err
module.exit_json(**res) module.exit_json(**res)
def ensure(module, state, pkgspec, conf_file): def ensure(module, state, pkgspec, conf_file):
res = {} res = {}
stdout = "" stdout = ""
stderr = "" stderr = ""
# take multiple args comma separated # take multiple args comma separated
items = [pkgspec] items = [pkgspec]
if pkgspec.find(',') != -1: if pkgspec.find(',') != -1:
@ -361,9 +361,9 @@ def ensure(module, state, pkgspec, conf_file):
elif state == 'latest': elif state == 'latest':
latest(module, items, repoq, yum_basecmd) latest(module, items, repoq, yum_basecmd)
# should be caught by AnsibleModule argument_spec # should be caught by AnsibleModule argument_spec
return dict(changed=False, failed=True, results='', errors='unexpected state') return dict(changed=False, failed=True, results='', errors='unexpected state')
def remove_only(pkgspec): def remove_only(pkgspec):
# remove this pkg and only this pkg - fail if it will require more to remove # remove this pkg and only this pkg - fail if it will require more to remove
@ -380,7 +380,7 @@ def main():
# list=available # list=available
# list=repos # list=repos
# list=pkgspec # list=pkgspec
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
pkg=dict(aliases=['name']), pkg=dict(aliases=['name']),
@ -400,11 +400,11 @@ def main():
if not os.path.exists(repoquery): if not os.path.exists(repoquery):
module.fail_json(msg="%s is required to run this module. Please install the yum-utils package." % repoquery) module.fail_json(msg="%s is required to run this module. Please install the yum-utils package." % repoquery)
if params['list']: if params['list']:
results = dict(results=list_stuff(params['conf_file'], params['list'])) results = dict(results=list_stuff(params['conf_file'], params['list']))
module.exit_json(**results) module.exit_json(**results)
else: else:
pkg = params['pkg'] pkg = params['pkg']
if 'pkg' is None: if 'pkg' is None:

Loading…
Cancel
Save