mirror of https://github.com/ansible/ansible.git
Creating playbook executor and dependent classes
parent
b6c3670f8a
commit
62d79568be
@ -0,0 +1,167 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import pipes
|
||||
import random
|
||||
|
||||
from ansible import constants as C
|
||||
|
||||
|
||||
__all__ = ['ConnectionInformation']
|
||||
|
||||
|
||||
class ConnectionInformation:
|
||||
|
||||
'''
|
||||
This class is used to consolidate the connection information for
|
||||
hosts in a play and child tasks, where the task may override some
|
||||
connection/authentication information.
|
||||
'''
|
||||
|
||||
def __init__(self, play=None, options=None):
|
||||
# FIXME: implement the new methodology here for supporting
|
||||
# various different auth escalation methods (becomes, etc.)
|
||||
|
||||
self.connection = C.DEFAULT_TRANSPORT
|
||||
self.remote_user = 'root'
|
||||
self.password = ''
|
||||
self.port = 22
|
||||
self.su = False
|
||||
self.su_user = ''
|
||||
self.su_pass = ''
|
||||
self.sudo = False
|
||||
self.sudo_user = ''
|
||||
self.sudo_pass = ''
|
||||
self.verbosity = 0
|
||||
self.only_tags = set()
|
||||
self.skip_tags = set()
|
||||
|
||||
if play:
|
||||
self.set_play(play)
|
||||
|
||||
if options:
|
||||
self.set_options(options)
|
||||
|
||||
def set_play(self, play):
|
||||
'''
|
||||
Configures this connection information instance with data from
|
||||
the play class.
|
||||
'''
|
||||
|
||||
if play.connection:
|
||||
self.connection = play.connection
|
||||
|
||||
self.remote_user = play.remote_user
|
||||
self.password = ''
|
||||
self.port = int(play.port) if play.port else 22
|
||||
self.su = play.su
|
||||
self.su_user = play.su_user
|
||||
self.su_pass = play.su_pass
|
||||
self.sudo = play.sudo
|
||||
self.sudo_user = play.sudo_user
|
||||
self.sudo_pass = play.sudo_pass
|
||||
|
||||
def set_options(self, options):
|
||||
'''
|
||||
Configures this connection information instance with data from
|
||||
options specified by the user on the command line. These have a
|
||||
higher precedence than those set on the play or host.
|
||||
'''
|
||||
|
||||
# FIXME: set other values from options here?
|
||||
|
||||
self.verbosity = options.verbosity
|
||||
if options.connection:
|
||||
self.connection = options.connection
|
||||
|
||||
# get the tag info from options, converting a comma-separated list
|
||||
# of values into a proper list if need be
|
||||
if isinstance(options.tags, list):
|
||||
self.only_tags.update(options.tags)
|
||||
elif isinstance(options.tags, basestring):
|
||||
self.only_tags.update(options.tags.split(','))
|
||||
if isinstance(options.skip_tags, list):
|
||||
self.skip_tags.update(options.skip_tags)
|
||||
elif isinstance(options.skip_tags, basestring):
|
||||
self.skip_tags.update(options.skip_tags.split(','))
|
||||
|
||||
def copy(self, ci):
|
||||
'''
|
||||
Copies the connection info from another connection info object, used
|
||||
when merging in data from task overrides.
|
||||
'''
|
||||
|
||||
self.connection = ci.connection
|
||||
self.remote_user = ci.remote_user
|
||||
self.password = ci.password
|
||||
self.port = ci.port
|
||||
self.su = ci.su
|
||||
self.su_user = ci.su_user
|
||||
self.su_pass = ci.su_pass
|
||||
self.sudo = ci.sudo
|
||||
self.sudo_user = ci.sudo_user
|
||||
self.sudo_pass = ci.sudo_pass
|
||||
self.verbosity = ci.verbosity
|
||||
self.only_tags = ci.only_tags.copy()
|
||||
self.skip_tags = ci.skip_tags.copy()
|
||||
|
||||
def set_task_override(self, task):
|
||||
'''
|
||||
Sets attributes from the task if they are set, which will override
|
||||
those from the play.
|
||||
'''
|
||||
|
||||
new_info = ConnectionInformation()
|
||||
new_info.copy(self)
|
||||
|
||||
for attr in ('connection', 'remote_user', 'su', 'su_user', 'su_pass', 'sudo', 'sudo_user', 'sudo_pass'):
|
||||
if hasattr(task, attr):
|
||||
attr_val = getattr(task, attr)
|
||||
if attr_val:
|
||||
setattr(new_info, attr, attr_val)
|
||||
|
||||
return new_info
|
||||
|
||||
def make_sudo_cmd(self, sudo_exe, executable, cmd):
|
||||
"""
|
||||
Helper function for wrapping commands with sudo.
|
||||
|
||||
Rather than detect if sudo wants a password this time, -k makes
|
||||
sudo always ask for a password if one is required. Passing a quoted
|
||||
compound command to sudo (or sudo -s) directly doesn't work, so we
|
||||
shellquote it with pipes.quote() and pass the quoted string to the
|
||||
user's shell. We loop reading output until we see the randomly-
|
||||
generated sudo prompt set with the -p option.
|
||||
"""
|
||||
|
||||
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
|
||||
prompt = '[sudo via ansible, key=%s] password: ' % randbits
|
||||
success_key = 'SUDO-SUCCESS-%s' % randbits
|
||||
|
||||
sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
|
||||
sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt,
|
||||
self.sudo_user, executable or '$SHELL',
|
||||
pipes.quote('echo %s; %s' % (success_key, cmd))
|
||||
)
|
||||
|
||||
#return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
|
||||
return (sudocmd, prompt, success_key)
|
||||
|
@ -0,0 +1,66 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from multiprocessing.managers import SyncManager, BaseProxy
|
||||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
__all__ = ['AnsibleManager']
|
||||
|
||||
|
||||
class VariableManagerWrapper:
|
||||
'''
|
||||
This class simply acts as a wrapper around the VariableManager class,
|
||||
since manager proxies expect a new object to be returned rather than
|
||||
any existing one. Using this wrapper, a shared proxy can be created
|
||||
and an existing VariableManager class assigned to it, which can then
|
||||
be accessed through the exposed proxy methods.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self._vm = None
|
||||
|
||||
def get_vars(self, loader, play=None, host=None, task=None):
|
||||
return self._vm.get_vars(loader=loader, play=play, host=host, task=task)
|
||||
|
||||
def set_variable_manager(self, vm):
|
||||
self._vm = vm
|
||||
|
||||
def set_host_variable(self, host, varname, value):
|
||||
self._vm.set_host_variable(host, varname, value)
|
||||
|
||||
def set_host_facts(self, host, facts):
|
||||
self._vm.set_host_facts(host, facts)
|
||||
|
||||
class AnsibleManager(SyncManager):
|
||||
'''
|
||||
This is our custom manager class, which exists only so we may register
|
||||
the new proxy below
|
||||
'''
|
||||
pass
|
||||
|
||||
AnsibleManager.register(
|
||||
typeid='VariableManagerWrapper',
|
||||
callable=VariableManagerWrapper,
|
||||
)
|
||||
|
@ -0,0 +1,185 @@
|
||||
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# from python and deps
|
||||
from cStringIO import StringIO
|
||||
import inspect
|
||||
import json
|
||||
import os
|
||||
import shlex
|
||||
|
||||
# from Ansible
|
||||
from ansible import __version__
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.parsing.utils.jsonify import jsonify
|
||||
|
||||
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
|
||||
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
|
||||
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
|
||||
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
|
||||
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
|
||||
|
||||
class ModuleReplacer(object):
|
||||
|
||||
"""
|
||||
The Replacer is used to insert chunks of code into modules before
|
||||
transfer. Rather than doing classical python imports, this allows for more
|
||||
efficient transfer in a no-bootstrapping scenario by not moving extra files
|
||||
over the wire, and also takes care of embedding arguments in the transferred
|
||||
modules.
|
||||
|
||||
This version is done in such a way that local imports can still be
|
||||
used in the module code, so IDEs don't have to be aware of what is going on.
|
||||
|
||||
Example:
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
... will result in the insertion basic.py into the module
|
||||
|
||||
from the module_utils/ directory in the source tree.
|
||||
|
||||
All modules are required to import at least basic, though there will also
|
||||
be other snippets.
|
||||
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
Also results in the inclusion of the common code in powershell.ps1
|
||||
|
||||
"""
|
||||
|
||||
# ******************************************************************************
|
||||
|
||||
def __init__(self, strip_comments=False):
|
||||
# FIXME: these members need to be prefixed with '_' and the rest of the file fixed
|
||||
this_file = inspect.getfile(inspect.currentframe())
|
||||
# we've moved the module_common relative to the snippets, so fix the path
|
||||
self.snippet_path = os.path.join(os.path.dirname(this_file), '..', 'module_utils')
|
||||
self.strip_comments = strip_comments
|
||||
|
||||
# ******************************************************************************
|
||||
|
||||
|
||||
def slurp(self, path):
|
||||
if not os.path.exists(path):
|
||||
raise AnsibleError("imported module support code does not exist at %s" % path)
|
||||
fd = open(path)
|
||||
data = fd.read()
|
||||
fd.close()
|
||||
return data
|
||||
|
||||
def _find_snippet_imports(self, module_data, module_path):
|
||||
"""
|
||||
Given the source of the module, convert it to a Jinja2 template to insert
|
||||
module code and return whether it's a new or old style module.
|
||||
"""
|
||||
|
||||
module_style = 'old'
|
||||
if REPLACER in module_data:
|
||||
module_style = 'new'
|
||||
elif 'from ansible.module_utils.' in module_data:
|
||||
module_style = 'new'
|
||||
elif 'WANT_JSON' in module_data:
|
||||
module_style = 'non_native_want_json'
|
||||
|
||||
output = StringIO()
|
||||
lines = module_data.split('\n')
|
||||
snippet_names = []
|
||||
|
||||
for line in lines:
|
||||
|
||||
if REPLACER in line:
|
||||
output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
|
||||
snippet_names.append('basic')
|
||||
if REPLACER_WINDOWS in line:
|
||||
ps_data = self.slurp(os.path.join(self.snippet_path, "powershell.ps1"))
|
||||
output.write(ps_data)
|
||||
snippet_names.append('powershell')
|
||||
elif line.startswith('from ansible.module_utils.'):
|
||||
tokens=line.split(".")
|
||||
import_error = False
|
||||
if len(tokens) != 3:
|
||||
import_error = True
|
||||
if " import *" not in line:
|
||||
import_error = True
|
||||
if import_error:
|
||||
raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
|
||||
snippet_name = tokens[2].split()[0]
|
||||
snippet_names.append(snippet_name)
|
||||
output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
|
||||
else:
|
||||
if self.strip_comments and line.startswith("#") or line == '':
|
||||
pass
|
||||
output.write(line)
|
||||
output.write("\n")
|
||||
|
||||
if not module_path.endswith(".ps1"):
|
||||
# Unixy modules
|
||||
if len(snippet_names) > 0 and not 'basic' in snippet_names:
|
||||
raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
|
||||
else:
|
||||
# Windows modules
|
||||
if len(snippet_names) > 0 and not 'powershell' in snippet_names:
|
||||
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
|
||||
|
||||
return (output.getvalue(), module_style)
|
||||
|
||||
# ******************************************************************************
|
||||
|
||||
def modify_module(self, module_path, module_args):
|
||||
|
||||
with open(module_path) as f:
|
||||
|
||||
# read in the module source
|
||||
module_data = f.read()
|
||||
|
||||
(module_data, module_style) = self._find_snippet_imports(module_data, module_path)
|
||||
|
||||
#module_args_json = jsonify(module_args)
|
||||
module_args_json = json.dumps(module_args)
|
||||
encoded_args = repr(module_args_json.encode('utf-8'))
|
||||
|
||||
# these strings should be part of the 'basic' snippet which is required to be included
|
||||
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
|
||||
module_data = module_data.replace(REPLACER_ARGS, "''")
|
||||
module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
|
||||
|
||||
# FIXME: we're not passing around an inject dictionary anymore, so
|
||||
# this needs to be fixed with whatever method we use for vars
|
||||
# like this moving forward
|
||||
#if module_style == 'new':
|
||||
# facility = C.DEFAULT_SYSLOG_FACILITY
|
||||
# if 'ansible_syslog_facility' in inject:
|
||||
# facility = inject['ansible_syslog_facility']
|
||||
# module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
|
||||
|
||||
lines = module_data.split("\n")
|
||||
shebang = None
|
||||
if lines[0].startswith("#!"):
|
||||
shebang = lines[0].strip()
|
||||
args = shlex.split(str(shebang[2:]))
|
||||
interpreter = args[0]
|
||||
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
|
||||
|
||||
# FIXME: more inject stuff here...
|
||||
#if interpreter_config in inject:
|
||||
# lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
|
||||
# module_data = "\n".join(lines)
|
||||
|
||||
return (module_data, module_style, shebang)
|
||||
|
@ -0,0 +1,258 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import *
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
from ansible.utils.boolean import boolean
|
||||
|
||||
__all__ = ['PlayIterator']
|
||||
|
||||
|
||||
# the primary running states for the play iteration
|
||||
ITERATING_SETUP = 0
|
||||
ITERATING_TASKS = 1
|
||||
ITERATING_RESCUE = 2
|
||||
ITERATING_ALWAYS = 3
|
||||
ITERATING_COMPLETE = 4
|
||||
|
||||
# the failure states for the play iteration
|
||||
FAILED_NONE = 0
|
||||
FAILED_SETUP = 1
|
||||
FAILED_TASKS = 2
|
||||
FAILED_RESCUE = 3
|
||||
FAILED_ALWAYS = 4
|
||||
|
||||
class PlayState:
|
||||
|
||||
'''
|
||||
A helper class, which keeps track of the task iteration
|
||||
state for a given playbook. This is used in the PlaybookIterator
|
||||
class on a per-host basis.
|
||||
'''
|
||||
|
||||
# FIXME: this class is the representation of a finite state machine,
|
||||
# so we really should have a well defined state representation
|
||||
# documented somewhere...
|
||||
|
||||
def __init__(self, parent_iterator, host):
|
||||
'''
|
||||
Create the initial state, which tracks the running state as well
|
||||
as the failure state, which are used when executing block branches
|
||||
(rescue/always)
|
||||
'''
|
||||
|
||||
self._run_state = ITERATING_SETUP
|
||||
self._failed_state = FAILED_NONE
|
||||
self._task_list = parent_iterator._play.compile()
|
||||
self._gather_facts = parent_iterator._play.gather_facts
|
||||
self._host = host
|
||||
|
||||
self._cur_block = None
|
||||
self._cur_role = None
|
||||
self._cur_task_pos = 0
|
||||
self._cur_rescue_pos = 0
|
||||
self._cur_always_pos = 0
|
||||
self._cur_handler_pos = 0
|
||||
|
||||
def next(self, peek=False):
|
||||
'''
|
||||
Determines and returns the next available task from the playbook,
|
||||
advancing through the list of plays as it goes. If peek is set to True,
|
||||
the internal state is not stored.
|
||||
'''
|
||||
|
||||
task = None
|
||||
|
||||
# save this locally so that we can peek at the next task
|
||||
# without updating the internal state of the iterator
|
||||
run_state = self._run_state
|
||||
failed_state = self._failed_state
|
||||
cur_block = self._cur_block
|
||||
cur_role = self._cur_role
|
||||
cur_task_pos = self._cur_task_pos
|
||||
cur_rescue_pos = self._cur_rescue_pos
|
||||
cur_always_pos = self._cur_always_pos
|
||||
cur_handler_pos = self._cur_handler_pos
|
||||
|
||||
|
||||
while True:
|
||||
if run_state == ITERATING_SETUP:
|
||||
if failed_state == FAILED_SETUP:
|
||||
run_state = ITERATING_COMPLETE
|
||||
else:
|
||||
run_state = ITERATING_TASKS
|
||||
|
||||
if self._gather_facts == 'smart' and not self._host.gathered_facts or boolean(self._gather_facts):
|
||||
self._host.set_gathered_facts(True)
|
||||
task = Task()
|
||||
task.action = 'setup'
|
||||
break
|
||||
elif run_state == ITERATING_TASKS:
|
||||
# if there is any failure state besides FAILED_NONE, we should
|
||||
# change to some other running state
|
||||
if failed_state != FAILED_NONE or cur_task_pos > len(self._task_list) - 1:
|
||||
# if there is a block (and there always should be), start running
|
||||
# the rescue portion if it exists (and if we haven't failed that
|
||||
# already), or the always portion (if it exists and we didn't fail
|
||||
# there too). Otherwise, we're done iterating.
|
||||
if cur_block:
|
||||
if failed_state != FAILED_RESCUE and cur_block.rescue:
|
||||
run_state = ITERATING_RESCUE
|
||||
cur_rescue_pos = 0
|
||||
elif failed_state != FAILED_ALWAYS and cur_block.always:
|
||||
run_state = ITERATING_ALWAYS
|
||||
cur_always_pos = 0
|
||||
else:
|
||||
run_state = ITERATING_COMPLETE
|
||||
else:
|
||||
run_state = ITERATING_COMPLETE
|
||||
else:
|
||||
task = self._task_list[cur_task_pos]
|
||||
if cur_block is not None and cur_block != task._block:
|
||||
run_state = ITERATING_ALWAYS
|
||||
continue
|
||||
else:
|
||||
cur_block = task._block
|
||||
cur_task_pos += 1
|
||||
|
||||
# Break out of the while loop now that we have our task
|
||||
break
|
||||
|
||||
elif run_state == ITERATING_RESCUE:
|
||||
# If we're iterating through the rescue tasks, make sure we haven't
|
||||
# failed yet. If so, move on to the always block or if not get the
|
||||
# next rescue task (if one exists)
|
||||
if failed_state == FAILED_RESCUE or cur_block.rescue is None or cur_rescue_pos > len(cur_block.rescue) - 1:
|
||||
run_state = ITERATING_ALWAYS
|
||||
else:
|
||||
task = cur_block.rescue[cur_rescue_pos]
|
||||
cur_rescue_pos += 1
|
||||
break
|
||||
|
||||
elif run_state == ITERATING_ALWAYS:
|
||||
# If we're iterating through the always tasks, make sure we haven't
|
||||
# failed yet. If so, we're done iterating otherwise get the next always
|
||||
# task (if one exists)
|
||||
if failed_state == FAILED_ALWAYS or cur_block.always is None or cur_always_pos > len(cur_block.always) - 1:
|
||||
cur_block = None
|
||||
if failed_state == FAILED_ALWAYS or cur_task_pos > len(self._task_list) - 1:
|
||||
run_state = ITERATING_COMPLETE
|
||||
else:
|
||||
run_state = ITERATING_TASKS
|
||||
else:
|
||||
task = cur_block.always[cur_always_pos]
|
||||
cur_always_pos += 1
|
||||
break
|
||||
|
||||
elif run_state == ITERATING_COMPLETE:
|
||||
# done iterating, return None to signify that
|
||||
return None
|
||||
|
||||
if task._role:
|
||||
if cur_role and task._role != cur_role:
|
||||
cur_role._completed = True
|
||||
cur_role = task._role
|
||||
|
||||
# If we're not just peeking at the next task, save the internal state
|
||||
if not peek:
|
||||
self._run_state = run_state
|
||||
self._failed_state = failed_state
|
||||
self._cur_block = cur_block
|
||||
self._cur_role = cur_role
|
||||
self._cur_task_pos = cur_task_pos
|
||||
self._cur_rescue_pos = cur_rescue_pos
|
||||
self._cur_always_pos = cur_always_pos
|
||||
self._cur_handler_pos = cur_handler_pos
|
||||
|
||||
return task
|
||||
|
||||
def mark_failed(self):
|
||||
'''
|
||||
Escalates the failed state relative to the running state.
|
||||
'''
|
||||
if self._run_state == ITERATING_SETUP:
|
||||
self._failed_state = FAILED_SETUP
|
||||
elif self._run_state == ITERATING_TASKS:
|
||||
self._failed_state = FAILED_TASKS
|
||||
elif self._run_state == ITERATING_RESCUE:
|
||||
self._failed_state = FAILED_RESCUE
|
||||
elif self._run_state == ITERATING_ALWAYS:
|
||||
self._failed_state = FAILED_ALWAYS
|
||||
|
||||
|
||||
class PlayIterator:
|
||||
|
||||
'''
|
||||
The main iterator class, which keeps the state of the playbook
|
||||
on a per-host basis using the above PlaybookState class.
|
||||
'''
|
||||
|
||||
def __init__(self, inventory, play):
|
||||
self._play = play
|
||||
self._inventory = inventory
|
||||
self._host_entries = dict()
|
||||
self._first_host = None
|
||||
|
||||
# Build the per-host dictionary of playbook states, using a copy
|
||||
# of the play object so we can post_validate it to ensure any templated
|
||||
# fields are filled in without modifying the original object, since
|
||||
# post_validate() saves the templated values.
|
||||
|
||||
# FIXME: this is a hacky way of doing this, the iterator should
|
||||
# instead get the loader and variable manager directly
|
||||
# as args to __init__
|
||||
all_vars = inventory._variable_manager.get_vars(loader=inventory._loader, play=play)
|
||||
new_play = play.copy()
|
||||
new_play.post_validate(all_vars, ignore_undefined=True)
|
||||
|
||||
for host in inventory.get_hosts(new_play.hosts):
|
||||
if self._first_host is None:
|
||||
self._first_host = host
|
||||
self._host_entries[host.get_name()] = PlayState(parent_iterator=self, host=host)
|
||||
|
||||
# FIXME: remove, probably not required anymore
|
||||
#def get_next_task(self, peek=False):
|
||||
# ''' returns the next task for host[0] '''
|
||||
#
|
||||
# first_entry = self._host_entries[self._first_host.get_name()]
|
||||
# if not peek:
|
||||
# for entry in self._host_entries:
|
||||
# if entry != self._first_host.get_name():
|
||||
# target_entry = self._host_entries[entry]
|
||||
# if target_entry._cur_task_pos == first_entry._cur_task_pos:
|
||||
# target_entry.next()
|
||||
# return first_entry.next(peek=peek)
|
||||
|
||||
def get_next_task_for_host(self, host, peek=False):
|
||||
''' fetch the next task for the given host '''
|
||||
if host.get_name() not in self._host_entries:
|
||||
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
|
||||
|
||||
return self._host_entries[host.get_name()].next(peek=peek)
|
||||
|
||||
def mark_host_failed(self, host):
|
||||
''' mark the given host as failed '''
|
||||
if host.get_name() not in self._host_entries:
|
||||
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
|
||||
|
||||
self._host_entries[host.get_name()].mark_failed()
|
||||
|
@ -1,125 +0,0 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
class PlaybookState:
|
||||
|
||||
'''
|
||||
A helper class, which keeps track of the task iteration
|
||||
state for a given playbook. This is used in the PlaybookIterator
|
||||
class on a per-host basis.
|
||||
'''
|
||||
def __init__(self, parent_iterator):
|
||||
self._parent_iterator = parent_iterator
|
||||
self._cur_play = 0
|
||||
self._task_list = None
|
||||
self._cur_task_pos = 0
|
||||
self._done = False
|
||||
|
||||
def next(self, peek=False):
|
||||
'''
|
||||
Determines and returns the next available task from the playbook,
|
||||
advancing through the list of plays as it goes.
|
||||
'''
|
||||
|
||||
task = None
|
||||
|
||||
# we save these locally so that we can peek at the next task
|
||||
# without updating the internal state of the iterator
|
||||
cur_play = self._cur_play
|
||||
task_list = self._task_list
|
||||
cur_task_pos = self._cur_task_pos
|
||||
|
||||
while True:
|
||||
# when we hit the end of the playbook entries list, we set a flag
|
||||
# and return None to indicate we're there
|
||||
# FIXME: accessing the entries and parent iterator playbook members
|
||||
# should be done through accessor functions
|
||||
if self._done or cur_play > len(self._parent_iterator._playbook._entries) - 1:
|
||||
self._done = True
|
||||
return None
|
||||
|
||||
# initialize the task list by calling the .compile() method
|
||||
# on the play, which will call compile() for all child objects
|
||||
if task_list is None:
|
||||
task_list = self._parent_iterator._playbook._entries[cur_play].compile()
|
||||
|
||||
# if we've hit the end of this plays task list, move on to the next
|
||||
# and reset the position values for the next iteration
|
||||
if cur_task_pos > len(task_list) - 1:
|
||||
cur_play += 1
|
||||
task_list = None
|
||||
cur_task_pos = 0
|
||||
continue
|
||||
else:
|
||||
# FIXME: do tag/conditional evaluation here and advance
|
||||
# the task position if it should be skipped without
|
||||
# returning a task
|
||||
task = task_list[cur_task_pos]
|
||||
cur_task_pos += 1
|
||||
|
||||
# Skip the task if it is the member of a role which has already
|
||||
# been run, unless the role allows multiple executions
|
||||
if task._role:
|
||||
# FIXME: this should all be done via member functions
|
||||
# instead of direct access to internal variables
|
||||
if task._role.has_run() and not task._role._metadata._allow_duplicates:
|
||||
continue
|
||||
|
||||
# Break out of the while loop now that we have our task
|
||||
break
|
||||
|
||||
# If we're not just peeking at the next task, save the internal state
|
||||
if not peek:
|
||||
self._cur_play = cur_play
|
||||
self._task_list = task_list
|
||||
self._cur_task_pos = cur_task_pos
|
||||
|
||||
return task
|
||||
|
||||
class PlaybookIterator:
|
||||
|
||||
'''
|
||||
The main iterator class, which keeps the state of the playbook
|
||||
on a per-host basis using the above PlaybookState class.
|
||||
'''
|
||||
|
||||
def __init__(self, inventory, log_manager, playbook):
|
||||
self._playbook = playbook
|
||||
self._log_manager = log_manager
|
||||
self._host_entries = dict()
|
||||
self._first_host = None
|
||||
|
||||
# build the per-host dictionary of playbook states
|
||||
for host in inventory.get_hosts():
|
||||
if self._first_host is None:
|
||||
self._first_host = host
|
||||
self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self)
|
||||
|
||||
def get_next_task(self, peek=False):
|
||||
''' returns the next task for host[0] '''
|
||||
return self._host_entries[self._first_host.get_name()].next(peek=peek)
|
||||
|
||||
def get_next_task_for_host(self, host, peek=False):
|
||||
''' fetch the next task for the given host '''
|
||||
if host.get_name() not in self._host_entries:
|
||||
raise AnsibleError("invalid host specified for playbook iteration")
|
||||
|
||||
return self._host_entries[host.get_name()].next(peek=peek)
|
@ -0,0 +1,155 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import Queue
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
HAS_ATFORK=True
|
||||
try:
|
||||
from Crypto.Random import atfork
|
||||
except ImportError:
|
||||
HAS_ATFORK=False
|
||||
|
||||
from ansible.executor.task_result import TaskResult
|
||||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
__all__ = ['ResultProcess']
|
||||
|
||||
|
||||
class ResultProcess(multiprocessing.Process):
|
||||
'''
|
||||
The result worker thread, which reads results from the results
|
||||
queue and fires off callbacks/etc. as necessary.
|
||||
'''
|
||||
|
||||
def __init__(self, final_q, workers):
|
||||
|
||||
# takes a task queue manager as the sole param:
|
||||
self._final_q = final_q
|
||||
self._workers = workers
|
||||
self._cur_worker = 0
|
||||
self._terminated = False
|
||||
|
||||
super(ResultProcess, self).__init__()
|
||||
|
||||
def _send_result(self, result):
|
||||
debug("sending result: %s" % (result,))
|
||||
self._final_q.put(result, block=False)
|
||||
debug("done sending result")
|
||||
|
||||
def _read_worker_result(self):
|
||||
result = None
|
||||
starting_point = self._cur_worker
|
||||
while True:
|
||||
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
|
||||
self._cur_worker += 1
|
||||
if self._cur_worker >= len(self._workers):
|
||||
self._cur_worker = 0
|
||||
|
||||
try:
|
||||
if not rslt_q.empty():
|
||||
debug("worker %d has data to read" % self._cur_worker)
|
||||
result = rslt_q.get(block=False)
|
||||
debug("got a result from worker %d: %s" % (self._cur_worker, result))
|
||||
break
|
||||
except Queue.Empty:
|
||||
pass
|
||||
|
||||
if self._cur_worker == starting_point:
|
||||
break
|
||||
|
||||
return result
|
||||
|
||||
def terminate(self):
|
||||
self._terminated = True
|
||||
super(ResultProcess, self).terminate()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
The main thread execution, which reads from the results queue
|
||||
indefinitely and sends callbacks/etc. when results are received.
|
||||
'''
|
||||
|
||||
if HAS_ATFORK:
|
||||
atfork()
|
||||
|
||||
while True:
|
||||
try:
|
||||
result = self._read_worker_result()
|
||||
if result is None:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
host_name = result._host.get_name()
|
||||
|
||||
# send callbacks, execute other options based on the result status
|
||||
if result.is_failed():
|
||||
#self._callback.runner_on_failed(result._task, result)
|
||||
self._send_result(('host_task_failed', result))
|
||||
elif result.is_unreachable():
|
||||
#self._callback.runner_on_unreachable(result._task, result)
|
||||
self._send_result(('host_unreachable', result))
|
||||
elif result.is_skipped():
|
||||
#self._callback.runner_on_skipped(result._task, result)
|
||||
self._send_result(('host_task_skipped', result))
|
||||
else:
|
||||
#self._callback.runner_on_ok(result._task, result)
|
||||
self._send_result(('host_task_ok', result))
|
||||
|
||||
# if this task is notifying a handler, do it now
|
||||
if result._task.notify:
|
||||
# The shared dictionary for notified handlers is a proxy, which
|
||||
# does not detect when sub-objects within the proxy are modified.
|
||||
# So, per the docs, we reassign the list so the proxy picks up and
|
||||
# notifies all other threads
|
||||
for notify in result._task.notify:
|
||||
self._send_result(('notify_handler', notify, result._host))
|
||||
|
||||
# if this task is registering facts, do that now
|
||||
if 'ansible_facts' in result._result:
|
||||
if result._task.action in ('set_fact', 'include_vars'):
|
||||
for (key, value) in result._result['ansible_facts'].iteritems():
|
||||
self._send_result(('set_host_var', result._host, key, value))
|
||||
else:
|
||||
self._send_result(('set_host_facts', result._host, result._result['ansible_facts']))
|
||||
|
||||
# if this task is registering a result, do it now
|
||||
if result._task.register:
|
||||
self._send_result(('set_host_var', result._host, result._task.register, result._result))
|
||||
|
||||
except Queue.Empty:
|
||||
pass
|
||||
except (KeyboardInterrupt, IOError, EOFError):
|
||||
break
|
||||
except:
|
||||
# FIXME: we should probably send a proper callback here instead of
|
||||
# simply dumping a stack trace on the screen
|
||||
traceback.print_exc()
|
||||
break
|
||||
|
@ -0,0 +1,141 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import Queue
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
HAS_ATFORK=True
|
||||
try:
|
||||
from Crypto.Random import atfork
|
||||
except ImportError:
|
||||
HAS_ATFORK=False
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
from ansible.executor.task_executor import TaskExecutor
|
||||
from ansible.executor.task_result import TaskResult
|
||||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
__all__ = ['ExecutorProcess']
|
||||
|
||||
|
||||
class WorkerProcess(multiprocessing.Process):
|
||||
'''
|
||||
The worker thread class, which uses TaskExecutor to run tasks
|
||||
read from a job queue and pushes results into a results queue
|
||||
for reading later.
|
||||
'''
|
||||
|
||||
def __init__(self, tqm, main_q, rslt_q, loader, new_stdin):
|
||||
|
||||
# takes a task queue manager as the sole param:
|
||||
self._main_q = main_q
|
||||
self._rslt_q = rslt_q
|
||||
self._loader = loader
|
||||
|
||||
# dupe stdin, if we have one
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
except ValueError:
|
||||
fileno = None
|
||||
|
||||
self._new_stdin = new_stdin
|
||||
if not new_stdin and fileno is not None:
|
||||
try:
|
||||
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||
except OSError, e:
|
||||
# couldn't dupe stdin, most likely because it's
|
||||
# not a valid file descriptor, so we just rely on
|
||||
# using the one that was passed in
|
||||
pass
|
||||
|
||||
super(WorkerProcess, self).__init__()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Called when the process is started, and loops indefinitely
|
||||
until an error is encountered (typically an IOerror from the
|
||||
queue pipe being disconnected). During the loop, we attempt
|
||||
to pull tasks off the job queue and run them, pushing the result
|
||||
onto the results queue. We also remove the host from the blocked
|
||||
hosts list, to signify that they are ready for their next task.
|
||||
'''
|
||||
|
||||
if HAS_ATFORK:
|
||||
atfork()
|
||||
|
||||
while True:
|
||||
task = None
|
||||
try:
|
||||
if not self._main_q.empty():
|
||||
debug("there's work to be done!")
|
||||
(host, task, job_vars, connection_info) = self._main_q.get(block=False)
|
||||
debug("got a task/handler to work on: %s" % task)
|
||||
|
||||
new_connection_info = connection_info.set_task_override(task)
|
||||
|
||||
# execute the task and build a TaskResult from the result
|
||||
debug("running TaskExecutor() for %s/%s" % (host, task))
|
||||
executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._loader).run()
|
||||
debug("done running TaskExecutor() for %s/%s" % (host, task))
|
||||
task_result = TaskResult(host, task, executor_result)
|
||||
|
||||
# put the result on the result queue
|
||||
debug("sending task result")
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
debug("done sending task result")
|
||||
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
|
||||
except Queue.Empty:
|
||||
pass
|
||||
except (IOError, EOFError, KeyboardInterrupt):
|
||||
break
|
||||
except AnsibleConnectionFailure:
|
||||
try:
|
||||
if task:
|
||||
task_result = TaskResult(host, task, dict(unreachable=True))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
except:
|
||||
# FIXME: most likely an abort, catch those kinds of errors specifically
|
||||
break
|
||||
except Exception, e:
|
||||
debug("WORKER EXCEPTION: %s" % e)
|
||||
debug("WORKER EXCEPTION: %s" % traceback.format_exc())
|
||||
try:
|
||||
if task:
|
||||
task_result = TaskResult(host, task, dict(failed=True, exception=True, stdout=traceback.format_exc()))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
except:
|
||||
# FIXME: most likely an abort, catch those kinds of errors specifically
|
||||
break
|
||||
|
||||
debug("WORKER PROCESS EXITING")
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,229 @@
|
||||
# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
|
||||
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#############################################
|
||||
|
||||
import os
|
||||
import ansible.constants as C
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.inventory.ini import InventoryParser
|
||||
from ansible.inventory.script import InventoryScript
|
||||
from ansible import utils
|
||||
from ansible import errors
|
||||
|
||||
class InventoryDirectory(object):
|
||||
''' Host inventory parser for ansible using a directory of inventories. '''
|
||||
|
||||
def __init__(self, filename=C.DEFAULT_HOST_LIST):
|
||||
self.names = os.listdir(filename)
|
||||
self.names.sort()
|
||||
self.directory = filename
|
||||
self.parsers = []
|
||||
self.hosts = {}
|
||||
self.groups = {}
|
||||
|
||||
for i in self.names:
|
||||
|
||||
# Skip files that end with certain extensions or characters
|
||||
if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
|
||||
continue
|
||||
# Skip hidden files
|
||||
if i.startswith('.') and not i.startswith('./'):
|
||||
continue
|
||||
# These are things inside of an inventory basedir
|
||||
if i in ("host_vars", "group_vars", "vars_plugins"):
|
||||
continue
|
||||
fullpath = os.path.join(self.directory, i)
|
||||
if os.path.isdir(fullpath):
|
||||
parser = InventoryDirectory(filename=fullpath)
|
||||
elif utils.is_executable(fullpath):
|
||||
parser = InventoryScript(filename=fullpath)
|
||||
else:
|
||||
parser = InventoryParser(filename=fullpath)
|
||||
self.parsers.append(parser)
|
||||
|
||||
# retrieve all groups and hosts form the parser and add them to
|
||||
# self, don't look at group lists yet, to avoid
|
||||
# recursion trouble, but just make sure all objects exist in self
|
||||
newgroups = parser.groups.values()
|
||||
for group in newgroups:
|
||||
for host in group.hosts:
|
||||
self._add_host(host)
|
||||
for group in newgroups:
|
||||
self._add_group(group)
|
||||
|
||||
# now check the objects lists so they contain only objects from
|
||||
# self; membership data in groups is already fine (except all &
|
||||
# ungrouped, see later), but might still reference objects not in self
|
||||
for group in self.groups.values():
|
||||
# iterate on a copy of the lists, as those lists get changed in
|
||||
# the loop
|
||||
# list with group's child group objects:
|
||||
for child in group.child_groups[:]:
|
||||
if child != self.groups[child.name]:
|
||||
group.child_groups.remove(child)
|
||||
group.child_groups.append(self.groups[child.name])
|
||||
# list with group's parent group objects:
|
||||
for parent in group.parent_groups[:]:
|
||||
if parent != self.groups[parent.name]:
|
||||
group.parent_groups.remove(parent)
|
||||
group.parent_groups.append(self.groups[parent.name])
|
||||
# list with group's host objects:
|
||||
for host in group.hosts[:]:
|
||||
if host != self.hosts[host.name]:
|
||||
group.hosts.remove(host)
|
||||
group.hosts.append(self.hosts[host.name])
|
||||
# also check here that the group that contains host, is
|
||||
# also contained in the host's group list
|
||||
if group not in self.hosts[host.name].groups:
|
||||
self.hosts[host.name].groups.append(group)
|
||||
|
||||
# extra checks on special groups all and ungrouped
|
||||
# remove hosts from 'ungrouped' if they became member of other groups
|
||||
if 'ungrouped' in self.groups:
|
||||
ungrouped = self.groups['ungrouped']
|
||||
# loop on a copy of ungrouped hosts, as we want to change that list
|
||||
for host in ungrouped.hosts[:]:
|
||||
if len(host.groups) > 1:
|
||||
host.groups.remove(ungrouped)
|
||||
ungrouped.hosts.remove(host)
|
||||
|
||||
# remove hosts from 'all' if they became member of other groups
|
||||
# all should only contain direct children, not grandchildren
|
||||
# direct children should have dept == 1
|
||||
if 'all' in self.groups:
|
||||
allgroup = self.groups['all' ]
|
||||
# loop on a copy of all's child groups, as we want to change that list
|
||||
for group in allgroup.child_groups[:]:
|
||||
# groups might once have beeen added to all, and later be added
|
||||
# to another group: we need to remove the link wit all then
|
||||
if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
|
||||
# real children of all have just 1 parent, all
|
||||
# this one has more, so not a direct child of all anymore
|
||||
group.parent_groups.remove(allgroup)
|
||||
allgroup.child_groups.remove(group)
|
||||
elif allgroup not in group.parent_groups:
|
||||
# this group was once added to all, but doesn't list it as
|
||||
# a parent any more; the info in the group is the correct
|
||||
# info
|
||||
allgroup.child_groups.remove(group)
|
||||
|
||||
|
||||
def _add_group(self, group):
|
||||
""" Merge an existing group or add a new one;
|
||||
Track parent and child groups, and hosts of the new one """
|
||||
|
||||
if group.name not in self.groups:
|
||||
# it's brand new, add him!
|
||||
self.groups[group.name] = group
|
||||
if self.groups[group.name] != group:
|
||||
# different object, merge
|
||||
self._merge_groups(self.groups[group.name], group)
|
||||
|
||||
def _add_host(self, host):
|
||||
if host.name not in self.hosts:
|
||||
# Papa's got a brand new host
|
||||
self.hosts[host.name] = host
|
||||
if self.hosts[host.name] != host:
|
||||
# different object, merge
|
||||
self._merge_hosts(self.hosts[host.name], host)
|
||||
|
||||
def _merge_groups(self, group, newgroup):
|
||||
""" Merge all of instance newgroup into group,
|
||||
update parent/child relationships
|
||||
group lists may still contain group objects that exist in self with
|
||||
same name, but was instanciated as a different object in some other
|
||||
inventory parser; these are handled later """
|
||||
|
||||
# name
|
||||
if group.name != newgroup.name:
|
||||
raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
|
||||
|
||||
# depth
|
||||
group.depth = max([group.depth, newgroup.depth])
|
||||
|
||||
# hosts list (host objects are by now already added to self.hosts)
|
||||
for host in newgroup.hosts:
|
||||
grouphosts = dict([(h.name, h) for h in group.hosts])
|
||||
if host.name in grouphosts:
|
||||
# same host name but different object, merge
|
||||
self._merge_hosts(grouphosts[host.name], host)
|
||||
else:
|
||||
# new membership, add host to group from self
|
||||
# group from self will also be added again to host.groups, but
|
||||
# as different object
|
||||
group.add_host(self.hosts[host.name])
|
||||
# now remove this the old object for group in host.groups
|
||||
for hostgroup in [g for g in host.groups]:
|
||||
if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
|
||||
self.hosts[host.name].groups.remove(hostgroup)
|
||||
|
||||
|
||||
# group child membership relation
|
||||
for newchild in newgroup.child_groups:
|
||||
# dict with existing child groups:
|
||||
childgroups = dict([(g.name, g) for g in group.child_groups])
|
||||
# check if child of new group is already known as a child
|
||||
if newchild.name not in childgroups:
|
||||
self.groups[group.name].add_child_group(newchild)
|
||||
|
||||
# group parent membership relation
|
||||
for newparent in newgroup.parent_groups:
|
||||
# dict with existing parent groups:
|
||||
parentgroups = dict([(g.name, g) for g in group.parent_groups])
|
||||
# check if parent of new group is already known as a parent
|
||||
if newparent.name not in parentgroups:
|
||||
if newparent.name not in self.groups:
|
||||
# group does not exist yet in self, import him
|
||||
self.groups[newparent.name] = newparent
|
||||
# group now exists but not yet as a parent here
|
||||
self.groups[newparent.name].add_child_group(group)
|
||||
|
||||
# variables
|
||||
group.vars = utils.combine_vars(group.vars, newgroup.vars)
|
||||
|
||||
def _merge_hosts(self,host, newhost):
|
||||
""" Merge all of instance newhost into host """
|
||||
|
||||
# name
|
||||
if host.name != newhost.name:
|
||||
raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
|
||||
|
||||
# group membership relation
|
||||
for newgroup in newhost.groups:
|
||||
# dict with existing groups:
|
||||
hostgroups = dict([(g.name, g) for g in host.groups])
|
||||
# check if new group is already known as a group
|
||||
if newgroup.name not in hostgroups:
|
||||
if newgroup.name not in self.groups:
|
||||
# group does not exist yet in self, import him
|
||||
self.groups[newgroup.name] = newgroup
|
||||
# group now exists but doesn't have host yet
|
||||
self.groups[newgroup.name].add_host(host)
|
||||
|
||||
# variables
|
||||
host.vars = utils.combine_vars(host.vars, newhost.vars)
|
||||
|
||||
def get_host_variables(self, host):
|
||||
""" Gets additional host variables from all inventories """
|
||||
vars = {}
|
||||
for i in self.parsers:
|
||||
vars.update(i.get_host_variables(host))
|
||||
return vars
|
||||
|
@ -0,0 +1,116 @@
|
||||
# (c) 2012, Zettar Inc.
|
||||
# Written by Chin Fang <fangchin@zettar.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
'''
|
||||
This module is for enhancing ansible's inventory parsing capability such
|
||||
that it can deal with hostnames specified using a simple pattern in the
|
||||
form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified,
|
||||
it defaults to 0.
|
||||
|
||||
If beg is given and is left-zero-padded, e.g. '001', it is taken as a
|
||||
formatting hint when the range is expanded. e.g. [001:010] is to be
|
||||
expanded into 001, 002 ...009, 010.
|
||||
|
||||
Note that when beg is specified with left zero padding, then the length of
|
||||
end must be the same as that of beg, else an exception is raised.
|
||||
'''
|
||||
import string
|
||||
|
||||
from ansible import errors
|
||||
|
||||
def detect_range(line = None):
|
||||
'''
|
||||
A helper function that checks a given host line to see if it contains
|
||||
a range pattern described in the docstring above.
|
||||
|
||||
Returnes True if the given line contains a pattern, else False.
|
||||
'''
|
||||
if 0 <= line.find("[") < line.find(":") < line.find("]"):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def expand_hostname_range(line = None):
|
||||
'''
|
||||
A helper function that expands a given line that contains a pattern
|
||||
specified in top docstring, and returns a list that consists of the
|
||||
expanded version.
|
||||
|
||||
The '[' and ']' characters are used to maintain the pseudo-code
|
||||
appearance. They are replaced in this function with '|' to ease
|
||||
string splitting.
|
||||
|
||||
References: http://ansible.github.com/patterns.html#hosts-and-groups
|
||||
'''
|
||||
all_hosts = []
|
||||
if line:
|
||||
# A hostname such as db[1:6]-node is considered to consists
|
||||
# three parts:
|
||||
# head: 'db'
|
||||
# nrange: [1:6]; range() is a built-in. Can't use the name
|
||||
# tail: '-node'
|
||||
|
||||
# Add support for multiple ranges in a host so:
|
||||
# db[01:10:3]node-[01:10]
|
||||
# - to do this we split off at the first [...] set, getting the list
|
||||
# of hosts and then repeat until none left.
|
||||
# - also add an optional third parameter which contains the step. (Default: 1)
|
||||
# so range can be [01:10:2] -> 01 03 05 07 09
|
||||
# FIXME: make this work for alphabetic sequences too.
|
||||
|
||||
(head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|')
|
||||
bounds = nrange.split(":")
|
||||
if len(bounds) != 2 and len(bounds) != 3:
|
||||
raise errors.AnsibleError("host range incorrectly specified")
|
||||
beg = bounds[0]
|
||||
end = bounds[1]
|
||||
if len(bounds) == 2:
|
||||
step = 1
|
||||
else:
|
||||
step = bounds[2]
|
||||
if not beg:
|
||||
beg = "0"
|
||||
if not end:
|
||||
raise errors.AnsibleError("host range end value missing")
|
||||
if beg[0] == '0' and len(beg) > 1:
|
||||
rlen = len(beg) # range length formatting hint
|
||||
if rlen != len(end):
|
||||
raise errors.AnsibleError("host range format incorrectly specified!")
|
||||
fill = lambda _: str(_).zfill(rlen) # range sequence
|
||||
else:
|
||||
fill = str
|
||||
|
||||
try:
|
||||
i_beg = string.ascii_letters.index(beg)
|
||||
i_end = string.ascii_letters.index(end)
|
||||
if i_beg > i_end:
|
||||
raise errors.AnsibleError("host range format incorrectly specified!")
|
||||
seq = string.ascii_letters[i_beg:i_end+1]
|
||||
except ValueError: # not an alpha range
|
||||
seq = range(int(beg), int(end)+1, int(step))
|
||||
|
||||
for rseq in seq:
|
||||
hname = ''.join((head, fill(rseq), tail))
|
||||
|
||||
if detect_range(hname):
|
||||
all_hosts.extend( expand_hostname_range( hname ) )
|
||||
else:
|
||||
all_hosts.append(hname)
|
||||
|
||||
return all_hosts
|
@ -0,0 +1,159 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
class Group:
|
||||
''' a group of ansible hosts '''
|
||||
|
||||
#__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
|
||||
|
||||
def __init__(self, name=None):
|
||||
|
||||
self.depth = 0
|
||||
self.name = name
|
||||
self.hosts = []
|
||||
self.vars = {}
|
||||
self.child_groups = []
|
||||
self.parent_groups = []
|
||||
self._hosts_cache = None
|
||||
|
||||
#self.clear_hosts_cache()
|
||||
#if self.name is None:
|
||||
# raise Exception("group name is required")
|
||||
|
||||
def __repr__(self):
|
||||
return self.get_name()
|
||||
|
||||
def __getstate__(self):
|
||||
return self.serialize()
|
||||
|
||||
def __setstate__(self, data):
|
||||
return self.deserialize(data)
|
||||
|
||||
def serialize(self):
|
||||
parent_groups = []
|
||||
for parent in self.parent_groups:
|
||||
parent_groups.append(parent.serialize())
|
||||
|
||||
result = dict(
|
||||
name=self.name,
|
||||
vars=self.vars.copy(),
|
||||
parent_groups=parent_groups,
|
||||
depth=self.depth,
|
||||
)
|
||||
|
||||
debug("serializing group, result is: %s" % result)
|
||||
return result
|
||||
|
||||
def deserialize(self, data):
|
||||
debug("deserializing group, data is: %s" % data)
|
||||
self.__init__()
|
||||
self.name = data.get('name')
|
||||
self.vars = data.get('vars', dict())
|
||||
|
||||
parent_groups = data.get('parent_groups', [])
|
||||
for parent_data in parent_groups:
|
||||
g = Group()
|
||||
g.deserialize(parent_data)
|
||||
self.parent_groups.append(g)
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def add_child_group(self, group):
|
||||
|
||||
if self == group:
|
||||
raise Exception("can't add group to itself")
|
||||
|
||||
# don't add if it's already there
|
||||
if not group in self.child_groups:
|
||||
self.child_groups.append(group)
|
||||
|
||||
# update the depth of the child
|
||||
group.depth = max([self.depth+1, group.depth])
|
||||
|
||||
# update the depth of the grandchildren
|
||||
group._check_children_depth()
|
||||
|
||||
# now add self to child's parent_groups list, but only if there
|
||||
# isn't already a group with the same name
|
||||
if not self.name in [g.name for g in group.parent_groups]:
|
||||
group.parent_groups.append(self)
|
||||
|
||||
self.clear_hosts_cache()
|
||||
|
||||
def _check_children_depth(self):
|
||||
|
||||
for group in self.child_groups:
|
||||
group.depth = max([self.depth+1, group.depth])
|
||||
group._check_children_depth()
|
||||
|
||||
def add_host(self, host):
|
||||
|
||||
self.hosts.append(host)
|
||||
host.add_group(self)
|
||||
self.clear_hosts_cache()
|
||||
|
||||
def set_variable(self, key, value):
|
||||
|
||||
self.vars[key] = value
|
||||
|
||||
def clear_hosts_cache(self):
|
||||
|
||||
self._hosts_cache = None
|
||||
for g in self.parent_groups:
|
||||
g.clear_hosts_cache()
|
||||
|
||||
def get_hosts(self):
|
||||
|
||||
if self._hosts_cache is None:
|
||||
self._hosts_cache = self._get_hosts()
|
||||
|
||||
return self._hosts_cache
|
||||
|
||||
def _get_hosts(self):
|
||||
|
||||
hosts = []
|
||||
seen = {}
|
||||
for kid in self.child_groups:
|
||||
kid_hosts = kid.get_hosts()
|
||||
for kk in kid_hosts:
|
||||
if kk not in seen:
|
||||
seen[kk] = 1
|
||||
hosts.append(kk)
|
||||
for mine in self.hosts:
|
||||
if mine not in seen:
|
||||
seen[mine] = 1
|
||||
hosts.append(mine)
|
||||
return hosts
|
||||
|
||||
def get_vars(self):
|
||||
return self.vars.copy()
|
||||
|
||||
def _get_ancestors(self):
|
||||
|
||||
results = {}
|
||||
for g in self.parent_groups:
|
||||
results[g.name] = g
|
||||
results.update(g._get_ancestors())
|
||||
return results
|
||||
|
||||
def get_ancestors(self):
|
||||
|
||||
return self._get_ancestors().values()
|
||||
|
@ -0,0 +1,127 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.utils.vars import combine_vars
|
||||
|
||||
__all__ = ['Host']
|
||||
|
||||
class Host:
|
||||
''' a single ansible host '''
|
||||
|
||||
#__slots__ = [ 'name', 'vars', 'groups' ]
|
||||
|
||||
def __getstate__(self):
|
||||
return self.serialize()
|
||||
|
||||
def __setstate__(self, data):
|
||||
return self.deserialize(data)
|
||||
|
||||
def serialize(self):
|
||||
groups = []
|
||||
for group in self.groups:
|
||||
groups.append(group.serialize())
|
||||
|
||||
return dict(
|
||||
name=self.name,
|
||||
vars=self.vars.copy(),
|
||||
ipv4_address=self.ipv4_address,
|
||||
ipv6_address=self.ipv6_address,
|
||||
port=self.port,
|
||||
gathered_facts=self._gathered_facts,
|
||||
groups=groups,
|
||||
)
|
||||
|
||||
def deserialize(self, data):
|
||||
self.__init__()
|
||||
|
||||
self.name = data.get('name')
|
||||
self.vars = data.get('vars', dict())
|
||||
self.ipv4_address = data.get('ipv4_address', '')
|
||||
self.ipv6_address = data.get('ipv6_address', '')
|
||||
self.port = data.get('port')
|
||||
|
||||
groups = data.get('groups', [])
|
||||
for group_data in groups:
|
||||
g = Group()
|
||||
g.deserialize(group_data)
|
||||
self.groups.append(g)
|
||||
|
||||
def __init__(self, name=None, port=None):
|
||||
|
||||
self.name = name
|
||||
self.vars = {}
|
||||
self.groups = []
|
||||
|
||||
self.ipv4_address = name
|
||||
self.ipv6_address = name
|
||||
|
||||
if port and port != C.DEFAULT_REMOTE_PORT:
|
||||
self.port = int(port)
|
||||
else:
|
||||
self.port = C.DEFAULT_REMOTE_PORT
|
||||
|
||||
self._gathered_facts = False
|
||||
|
||||
def __repr__(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def gathered_facts(self):
|
||||
return self._gathered_facts
|
||||
|
||||
def set_gathered_facts(self, gathered):
|
||||
self._gathered_facts = gathered
|
||||
|
||||
def add_group(self, group):
|
||||
|
||||
self.groups.append(group)
|
||||
|
||||
def set_variable(self, key, value):
|
||||
|
||||
self.vars[key]=value
|
||||
|
||||
def get_groups(self):
|
||||
|
||||
groups = {}
|
||||
for g in self.groups:
|
||||
groups[g.name] = g
|
||||
ancestors = g.get_ancestors()
|
||||
for a in ancestors:
|
||||
groups[a.name] = a
|
||||
return groups.values()
|
||||
|
||||
def get_vars(self):
|
||||
|
||||
results = {}
|
||||
groups = self.get_groups()
|
||||
for group in sorted(groups, key=lambda g: g.depth):
|
||||
results = combine_vars(results, group.get_vars())
|
||||
results = combine_vars(results, self.vars)
|
||||
results['inventory_hostname'] = self.name
|
||||
results['inventory_hostname_short'] = self.name.split('.')[0]
|
||||
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
|
||||
return results
|
||||
|
@ -0,0 +1,215 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#############################################
|
||||
|
||||
import ast
|
||||
import shlex
|
||||
import re
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import *
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.inventory.expand_hosts import detect_range
|
||||
from ansible.inventory.expand_hosts import expand_hostname_range
|
||||
|
||||
class InventoryParser(object):
|
||||
"""
|
||||
Host inventory for ansible.
|
||||
"""
|
||||
|
||||
def __init__(self, filename=C.DEFAULT_HOST_LIST):
|
||||
|
||||
with open(filename) as fh:
|
||||
self.lines = fh.readlines()
|
||||
self.groups = {}
|
||||
self.hosts = {}
|
||||
self._parse()
|
||||
|
||||
def _parse(self):
|
||||
|
||||
self._parse_base_groups()
|
||||
self._parse_group_children()
|
||||
self._add_allgroup_children()
|
||||
self._parse_group_variables()
|
||||
return self.groups
|
||||
|
||||
@staticmethod
|
||||
def _parse_value(v):
|
||||
if "#" not in v:
|
||||
try:
|
||||
return ast.literal_eval(v)
|
||||
# Using explicit exceptions.
|
||||
# Likely a string that literal_eval does not like. We wil then just set it.
|
||||
except ValueError:
|
||||
# For some reason this was thought to be malformed.
|
||||
pass
|
||||
except SyntaxError:
|
||||
# Is this a hash with an equals at the end?
|
||||
pass
|
||||
return v
|
||||
|
||||
# [webservers]
|
||||
# alpha
|
||||
# beta:2345
|
||||
# gamma sudo=True user=root
|
||||
# delta asdf=jkl favcolor=red
|
||||
|
||||
def _add_allgroup_children(self):
|
||||
|
||||
for group in self.groups.values():
|
||||
if group.depth == 0 and group.name != 'all':
|
||||
self.groups['all'].add_child_group(group)
|
||||
|
||||
|
||||
def _parse_base_groups(self):
|
||||
# FIXME: refactor
|
||||
|
||||
ungrouped = Group(name='ungrouped')
|
||||
all = Group(name='all')
|
||||
all.add_child_group(ungrouped)
|
||||
|
||||
self.groups = dict(all=all, ungrouped=ungrouped)
|
||||
active_group_name = 'ungrouped'
|
||||
|
||||
for line in self.lines:
|
||||
line = self._before_comment(line).strip()
|
||||
if line.startswith("[") and line.endswith("]"):
|
||||
active_group_name = line.replace("[","").replace("]","")
|
||||
if ":vars" in line or ":children" in line:
|
||||
active_group_name = active_group_name.rsplit(":", 1)[0]
|
||||
if active_group_name not in self.groups:
|
||||
new_group = self.groups[active_group_name] = Group(name=active_group_name)
|
||||
active_group_name = None
|
||||
elif active_group_name not in self.groups:
|
||||
new_group = self.groups[active_group_name] = Group(name=active_group_name)
|
||||
elif line.startswith(";") or line == '':
|
||||
pass
|
||||
elif active_group_name:
|
||||
tokens = shlex.split(line)
|
||||
if len(tokens) == 0:
|
||||
continue
|
||||
hostname = tokens[0]
|
||||
port = C.DEFAULT_REMOTE_PORT
|
||||
# Three cases to check:
|
||||
# 0. A hostname that contains a range pesudo-code and a port
|
||||
# 1. A hostname that contains just a port
|
||||
if hostname.count(":") > 1:
|
||||
# Possible an IPv6 address, or maybe a host line with multiple ranges
|
||||
# IPv6 with Port XXX:XXX::XXX.port
|
||||
# FQDN foo.example.com
|
||||
if hostname.count(".") == 1:
|
||||
(hostname, port) = hostname.rsplit(".", 1)
|
||||
elif ("[" in hostname and
|
||||
"]" in hostname and
|
||||
":" in hostname and
|
||||
(hostname.rindex("]") < hostname.rindex(":")) or
|
||||
("]" not in hostname and ":" in hostname)):
|
||||
(hostname, port) = hostname.rsplit(":", 1)
|
||||
|
||||
hostnames = []
|
||||
if detect_range(hostname):
|
||||
hostnames = expand_hostname_range(hostname)
|
||||
else:
|
||||
hostnames = [hostname]
|
||||
|
||||
for hn in hostnames:
|
||||
host = None
|
||||
if hn in self.hosts:
|
||||
host = self.hosts[hn]
|
||||
else:
|
||||
host = Host(name=hn, port=port)
|
||||
self.hosts[hn] = host
|
||||
if len(tokens) > 1:
|
||||
for t in tokens[1:]:
|
||||
if t.startswith('#'):
|
||||
break
|
||||
try:
|
||||
(k,v) = t.split("=", 1)
|
||||
except ValueError, e:
|
||||
raise AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
|
||||
if k == 'ansible_ssh_host':
|
||||
host.ipv4_address = self._parse_value(v)
|
||||
else:
|
||||
host.set_variable(k, self._parse_value(v))
|
||||
self.groups[active_group_name].add_host(host)
|
||||
|
||||
# [southeast:children]
|
||||
# atlanta
|
||||
# raleigh
|
||||
|
||||
def _parse_group_children(self):
|
||||
group = None
|
||||
|
||||
for line in self.lines:
|
||||
line = line.strip()
|
||||
if line is None or line == '':
|
||||
continue
|
||||
if line.startswith("[") and ":children]" in line:
|
||||
line = line.replace("[","").replace(":children]","")
|
||||
group = self.groups.get(line, None)
|
||||
if group is None:
|
||||
group = self.groups[line] = Group(name=line)
|
||||
elif line.startswith("#") or line.startswith(";"):
|
||||
pass
|
||||
elif line.startswith("["):
|
||||
group = None
|
||||
elif group:
|
||||
kid_group = self.groups.get(line, None)
|
||||
if kid_group is None:
|
||||
raise AnsibleError("child group is not defined: (%s)" % line)
|
||||
else:
|
||||
group.add_child_group(kid_group)
|
||||
|
||||
|
||||
# [webservers:vars]
|
||||
# http_port=1234
|
||||
# maxRequestsPerChild=200
|
||||
|
||||
def _parse_group_variables(self):
|
||||
group = None
|
||||
for line in self.lines:
|
||||
line = line.strip()
|
||||
if line.startswith("[") and ":vars]" in line:
|
||||
line = line.replace("[","").replace(":vars]","")
|
||||
group = self.groups.get(line, None)
|
||||
if group is None:
|
||||
raise AnsibleError("can't add vars to undefined group: %s" % line)
|
||||
elif line.startswith("#") or line.startswith(";"):
|
||||
pass
|
||||
elif line.startswith("["):
|
||||
group = None
|
||||
elif line == '':
|
||||
pass
|
||||
elif group:
|
||||
if "=" not in line:
|
||||
raise AnsibleError("variables assigned to group must be in key=value form")
|
||||
else:
|
||||
(k, v) = [e.strip() for e in line.split("=", 1)]
|
||||
group.set_variable(k, self._parse_value(v))
|
||||
|
||||
def get_host_variables(self, host):
|
||||
return {}
|
||||
|
||||
def _before_comment(self, msg):
|
||||
''' what's the part of a string before a comment? '''
|
||||
msg = msg.replace("\#","**NOT_A_COMMENT**")
|
||||
msg = msg.split("#")[0]
|
||||
msg = msg.replace("**NOT_A_COMMENT**","#")
|
||||
return msg
|
||||
|
@ -0,0 +1,150 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#############################################
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import ansible.constants as C
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.module_utils.basic import json_dict_unicode_to_bytes
|
||||
from ansible import utils
|
||||
from ansible import errors
|
||||
import sys
|
||||
|
||||
|
||||
class InventoryScript(object):
|
||||
''' Host inventory parser for ansible using external inventory scripts. '''
|
||||
|
||||
def __init__(self, filename=C.DEFAULT_HOST_LIST):
|
||||
|
||||
# Support inventory scripts that are not prefixed with some
|
||||
# path information but happen to be in the current working
|
||||
# directory when '.' is not in PATH.
|
||||
self.filename = os.path.abspath(filename)
|
||||
cmd = [ self.filename, "--list" ]
|
||||
try:
|
||||
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError, e:
|
||||
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
|
||||
(stdout, stderr) = sp.communicate()
|
||||
self.data = stdout
|
||||
# see comment about _meta below
|
||||
self.host_vars_from_top = None
|
||||
self.groups = self._parse(stderr)
|
||||
|
||||
|
||||
def _parse(self, err):
|
||||
|
||||
all_hosts = {}
|
||||
|
||||
# not passing from_remote because data from CMDB is trusted
|
||||
self.raw = utils.parse_json(self.data)
|
||||
self.raw = json_dict_unicode_to_bytes(self.raw)
|
||||
|
||||
all = Group('all')
|
||||
groups = dict(all=all)
|
||||
group = None
|
||||
|
||||
|
||||
if 'failed' in self.raw:
|
||||
sys.stderr.write(err + "\n")
|
||||
raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
|
||||
|
||||
for (group_name, data) in self.raw.items():
|
||||
|
||||
# in Ansible 1.3 and later, a "_meta" subelement may contain
|
||||
# a variable "hostvars" which contains a hash for each host
|
||||
# if this "hostvars" exists at all then do not call --host for each
|
||||
# host. This is for efficiency and scripts should still return data
|
||||
# if called with --host for backwards compat with 1.2 and earlier.
|
||||
|
||||
if group_name == '_meta':
|
||||
if 'hostvars' in data:
|
||||
self.host_vars_from_top = data['hostvars']
|
||||
continue
|
||||
|
||||
if group_name != all.name:
|
||||
group = groups[group_name] = Group(group_name)
|
||||
else:
|
||||
group = all
|
||||
host = None
|
||||
|
||||
if not isinstance(data, dict):
|
||||
data = {'hosts': data}
|
||||
# is not those subkeys, then simplified syntax, host with vars
|
||||
elif not any(k in data for k in ('hosts','vars')):
|
||||
data = {'hosts': [group_name], 'vars': data}
|
||||
|
||||
if 'hosts' in data:
|
||||
if not isinstance(data['hosts'], list):
|
||||
raise errors.AnsibleError("You defined a group \"%s\" with bad "
|
||||
"data for the host list:\n %s" % (group_name, data))
|
||||
|
||||
for hostname in data['hosts']:
|
||||
if not hostname in all_hosts:
|
||||
all_hosts[hostname] = Host(hostname)
|
||||
host = all_hosts[hostname]
|
||||
group.add_host(host)
|
||||
|
||||
if 'vars' in data:
|
||||
if not isinstance(data['vars'], dict):
|
||||
raise errors.AnsibleError("You defined a group \"%s\" with bad "
|
||||
"data for variables:\n %s" % (group_name, data))
|
||||
|
||||
for k, v in data['vars'].iteritems():
|
||||
if group.name == all.name:
|
||||
all.set_variable(k, v)
|
||||
else:
|
||||
group.set_variable(k, v)
|
||||
|
||||
# Separate loop to ensure all groups are defined
|
||||
for (group_name, data) in self.raw.items():
|
||||
if group_name == '_meta':
|
||||
continue
|
||||
if isinstance(data, dict) and 'children' in data:
|
||||
for child_name in data['children']:
|
||||
if child_name in groups:
|
||||
groups[group_name].add_child_group(groups[child_name])
|
||||
|
||||
for group in groups.values():
|
||||
if group.depth == 0 and group.name != 'all':
|
||||
all.add_child_group(group)
|
||||
|
||||
return groups
|
||||
|
||||
def get_host_variables(self, host):
|
||||
""" Runs <script> --host <hostname> to determine additional host variables """
|
||||
if self.host_vars_from_top is not None:
|
||||
got = self.host_vars_from_top.get(host.name, {})
|
||||
return got
|
||||
|
||||
|
||||
cmd = [self.filename, "--host", host.name]
|
||||
try:
|
||||
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError, e:
|
||||
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
|
||||
(out, err) = sp.communicate()
|
||||
if out.strip() == '':
|
||||
return dict()
|
||||
try:
|
||||
return json_dict_unicode_to_bytes(utils.parse_json(out))
|
||||
except ValueError:
|
||||
raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
||||
|
@ -0,0 +1,48 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
class VarsModule(object):
|
||||
|
||||
"""
|
||||
Loads variables for groups and/or hosts
|
||||
"""
|
||||
|
||||
def __init__(self, inventory):
|
||||
|
||||
""" constructor """
|
||||
|
||||
self.inventory = inventory
|
||||
self.inventory_basedir = inventory.basedir()
|
||||
|
||||
|
||||
def run(self, host, vault_password=None):
|
||||
""" For backwards compatibility, when only vars per host were retrieved
|
||||
This method should return both host specific vars as well as vars
|
||||
calculated from groups it is a member of """
|
||||
return {}
|
||||
|
||||
|
||||
def get_host_vars(self, host, vault_password=None):
|
||||
""" Get host specific variables. """
|
||||
return {}
|
||||
|
||||
|
||||
def get_group_vars(self, group, vault_password=None):
|
||||
""" Get group specific variables. """
|
||||
return {}
|
||||
|
@ -0,0 +1,17 @@
|
||||
# 2013, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
@ -0,0 +1,103 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
AXAPI_PORT_PROTOCOLS = {
|
||||
'tcp': 2,
|
||||
'udp': 3,
|
||||
}
|
||||
|
||||
AXAPI_VPORT_PROTOCOLS = {
|
||||
'tcp': 2,
|
||||
'udp': 3,
|
||||
'fast-http': 9,
|
||||
'http': 11,
|
||||
'https': 12,
|
||||
}
|
||||
|
||||
def a10_argument_spec():
|
||||
return dict(
|
||||
host=dict(type='str', required=True),
|
||||
username=dict(type='str', aliases=['user', 'admin'], required=True),
|
||||
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
|
||||
write_config=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
def axapi_failure(result):
|
||||
if 'response' in result and result['response'].get('status') == 'fail':
|
||||
return True
|
||||
return False
|
||||
|
||||
def axapi_call(module, url, post=None):
|
||||
'''
|
||||
Returns a datastructure based on the result of the API call
|
||||
'''
|
||||
rsp, info = fetch_url(module, url, data=post)
|
||||
if not rsp or info['status'] >= 400:
|
||||
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
|
||||
try:
|
||||
raw_data = rsp.read()
|
||||
data = json.loads(raw_data)
|
||||
except ValueError:
|
||||
# at least one API call (system.action.write_config) returns
|
||||
# XML even when JSON is requested, so do some minimal handling
|
||||
# here to prevent failing even when the call succeeded
|
||||
if 'status="ok"' in raw_data.lower():
|
||||
data = {"response": {"status": "OK"}}
|
||||
else:
|
||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||
except:
|
||||
module.fail_json(msg="could not read the result from the host")
|
||||
finally:
|
||||
rsp.close()
|
||||
return data
|
||||
|
||||
def axapi_authenticate(module, base_url, username, password):
|
||||
url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
|
||||
result = axapi_call(module, url)
|
||||
if axapi_failure(result):
|
||||
return module.fail_json(msg=result['response']['err']['msg'])
|
||||
sessid = result['session_id']
|
||||
return base_url + '&session_id=' + sessid
|
||||
|
||||
def axapi_enabled_disabled(flag):
|
||||
'''
|
||||
The axapi uses 0/1 integer values for flags, rather than strings
|
||||
or booleans, so convert the given flag to a 0 or 1. For now, params
|
||||
are specified as strings only so thats what we check.
|
||||
'''
|
||||
if flag == 'enabled':
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def axapi_get_port_protocol(protocol):
|
||||
return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
|
||||
|
||||
def axapi_get_vport_protocol(protocol):
|
||||
return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,194 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
try:
|
||||
from distutils.version import LooseVersion
|
||||
HAS_LOOSE_VERSION = True
|
||||
except:
|
||||
HAS_LOOSE_VERSION = False
|
||||
|
||||
AWS_REGIONS = [
|
||||
'ap-northeast-1',
|
||||
'ap-southeast-1',
|
||||
'ap-southeast-2',
|
||||
'cn-north-1',
|
||||
'eu-central-1',
|
||||
'eu-west-1',
|
||||
'sa-east-1',
|
||||
'us-east-1',
|
||||
'us-west-1',
|
||||
'us-west-2',
|
||||
'us-gov-west-1',
|
||||
]
|
||||
|
||||
|
||||
def aws_common_argument_spec():
|
||||
return dict(
|
||||
ec2_url=dict(),
|
||||
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
|
||||
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
security_token=dict(no_log=True),
|
||||
profile=dict(),
|
||||
)
|
||||
|
||||
|
||||
def ec2_argument_spec():
|
||||
spec = aws_common_argument_spec()
|
||||
spec.update(
|
||||
dict(
|
||||
region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
|
||||
)
|
||||
)
|
||||
return spec
|
||||
|
||||
|
||||
def boto_supports_profile_name():
|
||||
return hasattr(boto.ec2.EC2Connection, 'profile_name')
|
||||
|
||||
|
||||
def get_aws_connection_info(module):
|
||||
|
||||
# Check module args for credentials, then check environment vars
|
||||
# access_key
|
||||
|
||||
ec2_url = module.params.get('ec2_url')
|
||||
access_key = module.params.get('aws_access_key')
|
||||
secret_key = module.params.get('aws_secret_key')
|
||||
security_token = module.params.get('security_token')
|
||||
region = module.params.get('region')
|
||||
profile_name = module.params.get('profile')
|
||||
validate_certs = module.params.get('validate_certs')
|
||||
|
||||
if not ec2_url:
|
||||
if 'EC2_URL' in os.environ:
|
||||
ec2_url = os.environ['EC2_URL']
|
||||
elif 'AWS_URL' in os.environ:
|
||||
ec2_url = os.environ['AWS_URL']
|
||||
|
||||
if not access_key:
|
||||
if 'EC2_ACCESS_KEY' in os.environ:
|
||||
access_key = os.environ['EC2_ACCESS_KEY']
|
||||
elif 'AWS_ACCESS_KEY_ID' in os.environ:
|
||||
access_key = os.environ['AWS_ACCESS_KEY_ID']
|
||||
elif 'AWS_ACCESS_KEY' in os.environ:
|
||||
access_key = os.environ['AWS_ACCESS_KEY']
|
||||
else:
|
||||
# in case access_key came in as empty string
|
||||
access_key = None
|
||||
|
||||
if not secret_key:
|
||||
if 'EC2_SECRET_KEY' in os.environ:
|
||||
secret_key = os.environ['EC2_SECRET_KEY']
|
||||
elif 'AWS_SECRET_ACCESS_KEY' in os.environ:
|
||||
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
|
||||
elif 'AWS_SECRET_KEY' in os.environ:
|
||||
secret_key = os.environ['AWS_SECRET_KEY']
|
||||
else:
|
||||
# in case secret_key came in as empty string
|
||||
secret_key = None
|
||||
|
||||
if not region:
|
||||
if 'EC2_REGION' in os.environ:
|
||||
region = os.environ['EC2_REGION']
|
||||
elif 'AWS_REGION' in os.environ:
|
||||
region = os.environ['AWS_REGION']
|
||||
else:
|
||||
# boto.config.get returns None if config not found
|
||||
region = boto.config.get('Boto', 'aws_region')
|
||||
if not region:
|
||||
region = boto.config.get('Boto', 'ec2_region')
|
||||
|
||||
if not security_token:
|
||||
if 'AWS_SECURITY_TOKEN' in os.environ:
|
||||
security_token = os.environ['AWS_SECURITY_TOKEN']
|
||||
else:
|
||||
# in case security_token came in as empty string
|
||||
security_token = None
|
||||
|
||||
boto_params = dict(aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
security_token=security_token)
|
||||
|
||||
# profile_name only works as a key in boto >= 2.24
|
||||
# so only set profile_name if passed as an argument
|
||||
if profile_name:
|
||||
if not boto_supports_profile_name():
|
||||
module.fail_json("boto does not support profile_name before 2.24")
|
||||
boto_params['profile_name'] = profile_name
|
||||
|
||||
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
|
||||
boto_params['validate_certs'] = validate_certs
|
||||
|
||||
return region, ec2_url, boto_params
|
||||
|
||||
|
||||
def get_ec2_creds(module):
|
||||
''' for compatibility mode with old modules that don't/can't yet
|
||||
use ec2_connect method '''
|
||||
region, ec2_url, boto_params = get_aws_connection_info(module)
|
||||
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
|
||||
|
||||
|
||||
def boto_fix_security_token_in_profile(conn, profile_name):
|
||||
''' monkey patch for boto issue boto/boto#2100 '''
|
||||
profile = 'profile ' + profile_name
|
||||
if boto.config.has_option(profile, 'aws_security_token'):
|
||||
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
|
||||
return conn
|
||||
|
||||
|
||||
def connect_to_aws(aws_module, region, **params):
|
||||
conn = aws_module.connect_to_region(region, **params)
|
||||
if params.get('profile_name'):
|
||||
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
|
||||
return conn
|
||||
|
||||
|
||||
def ec2_connect(module):
|
||||
|
||||
""" Return an ec2 connection"""
|
||||
|
||||
region, ec2_url, boto_params = get_aws_connection_info(module)
|
||||
|
||||
# If we have a region specified, connect to its endpoint.
|
||||
if region:
|
||||
try:
|
||||
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
# Otherwise, no region so we fallback to the old connection method
|
||||
elif ec2_url:
|
||||
try:
|
||||
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="Either region or ec2_url must be specified")
|
||||
|
||||
return ec2
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,87 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import pprint
|
||||
|
||||
USER_AGENT_PRODUCT="Ansible-gce"
|
||||
USER_AGENT_VERSION="v1"
|
||||
|
||||
def gce_connect(module):
|
||||
"""Return a Google Cloud Engine connection."""
|
||||
service_account_email = module.params.get('service_account_email', None)
|
||||
pem_file = module.params.get('pem_file', None)
|
||||
project_id = module.params.get('project_id', None)
|
||||
|
||||
# If any of the values are not given as parameters, check the appropriate
|
||||
# environment variables.
|
||||
if not service_account_email:
|
||||
service_account_email = os.environ.get('GCE_EMAIL', None)
|
||||
if not project_id:
|
||||
project_id = os.environ.get('GCE_PROJECT', None)
|
||||
if not pem_file:
|
||||
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
|
||||
|
||||
# If we still don't have one or more of our credentials, attempt to
|
||||
# get the remaining values from the libcloud secrets file.
|
||||
if service_account_email is None or pem_file is None:
|
||||
try:
|
||||
import secrets
|
||||
except ImportError:
|
||||
secrets = None
|
||||
|
||||
if hasattr(secrets, 'GCE_PARAMS'):
|
||||
if not service_account_email:
|
||||
service_account_email = secrets.GCE_PARAMS[0]
|
||||
if not pem_file:
|
||||
pem_file = secrets.GCE_PARAMS[1]
|
||||
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||
if not project_id:
|
||||
project_id = keyword_params.get('project', None)
|
||||
|
||||
# If we *still* don't have the credentials we need, then it's time to
|
||||
# just fail out.
|
||||
if service_account_email is None or pem_file is None or project_id is None:
|
||||
module.fail_json(msg='Missing GCE connection parameters in libcloud '
|
||||
'secrets file.')
|
||||
return None
|
||||
|
||||
try:
|
||||
gce = get_driver(Provider.GCE)(service_account_email, pem_file, datacenter=module.params.get('zone'), project=project_id)
|
||||
gce.connection.user_agent_append("%s/%s" % (
|
||||
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
|
||||
except (RuntimeError, ValueError), e:
|
||||
module.fail_json(msg=str(e), changed=False)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=unexpected_error_msg(e), changed=False)
|
||||
|
||||
return gce
|
||||
|
||||
def unexpected_error_msg(error):
|
||||
"""Create an error string based on passed in error."""
|
||||
return 'Unexpected response: ' + pprint.pformat(vars(error))
|
@ -0,0 +1,176 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import hmac
|
||||
import urlparse
|
||||
|
||||
try:
|
||||
from hashlib import sha1
|
||||
except ImportError:
|
||||
import sha as sha1
|
||||
|
||||
HASHED_KEY_MAGIC = "|1|"
|
||||
|
||||
def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
|
||||
|
||||
""" idempotently add a git url hostkey """
|
||||
|
||||
fqdn = get_fqdn(module.params['repo'])
|
||||
|
||||
if fqdn:
|
||||
known_host = check_hostkey(module, fqdn)
|
||||
if not known_host:
|
||||
if accept_hostkey:
|
||||
rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
|
||||
else:
|
||||
module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
|
||||
|
||||
def get_fqdn(repo_url):
|
||||
|
||||
""" chop the hostname out of a giturl """
|
||||
|
||||
result = None
|
||||
if "@" in repo_url and "://" not in repo_url:
|
||||
# most likely a git@ or ssh+git@ type URL
|
||||
repo_url = repo_url.split("@", 1)[1]
|
||||
if ":" in repo_url:
|
||||
repo_url = repo_url.split(":")[0]
|
||||
result = repo_url
|
||||
elif "/" in repo_url:
|
||||
repo_url = repo_url.split("/")[0]
|
||||
result = repo_url
|
||||
elif "://" in repo_url:
|
||||
# this should be something we can parse with urlparse
|
||||
parts = urlparse.urlparse(repo_url)
|
||||
if 'ssh' not in parts[0] and 'git' not in parts[0]:
|
||||
# don't try and scan a hostname that's not ssh
|
||||
return None
|
||||
# parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
|
||||
# ensure we actually have a parts[1] before continuing.
|
||||
if parts[1] != '':
|
||||
result = parts[1]
|
||||
if ":" in result:
|
||||
result = result.split(":")[0]
|
||||
if "@" in result:
|
||||
result = result.split("@", 1)[1]
|
||||
|
||||
return result
|
||||
|
||||
def check_hostkey(module, fqdn):
|
||||
return not not_in_host_file(module, fqdn)
|
||||
|
||||
# this is a variant of code found in connection_plugins/paramiko.py and we should modify
|
||||
# the paramiko code to import and use this.
|
||||
|
||||
def not_in_host_file(self, host):
|
||||
|
||||
|
||||
if 'USER' in os.environ:
|
||||
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||
else:
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_host_file = os.path.expanduser(user_host_file)
|
||||
|
||||
host_file_list = []
|
||||
host_file_list.append(user_host_file)
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts")
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts2")
|
||||
|
||||
hfiles_not_found = 0
|
||||
for hf in host_file_list:
|
||||
if not os.path.exists(hf):
|
||||
hfiles_not_found += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
host_fh = open(hf)
|
||||
except IOError, e:
|
||||
hfiles_not_found += 1
|
||||
continue
|
||||
else:
|
||||
data = host_fh.read()
|
||||
host_fh.close()
|
||||
|
||||
for line in data.split("\n"):
|
||||
if line is None or " " not in line:
|
||||
continue
|
||||
tokens = line.split()
|
||||
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
|
||||
# this is a hashed known host entry
|
||||
try:
|
||||
(kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
|
||||
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
|
||||
hash.update(host)
|
||||
if hash.digest() == kn_host.decode('base64'):
|
||||
return False
|
||||
except:
|
||||
# invalid hashed host key, skip it
|
||||
continue
|
||||
else:
|
||||
# standard host file entry
|
||||
if host in tokens[0]:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
|
||||
|
||||
""" use ssh-keyscan to add the hostkey """
|
||||
|
||||
result = False
|
||||
keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
|
||||
|
||||
if 'USER' in os.environ:
|
||||
user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
|
||||
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||
else:
|
||||
user_ssh_dir = "~/.ssh/"
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_ssh_dir = os.path.expanduser(user_ssh_dir)
|
||||
|
||||
if not os.path.exists(user_ssh_dir):
|
||||
if create_dir:
|
||||
try:
|
||||
os.makedirs(user_ssh_dir, 0700)
|
||||
except:
|
||||
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
|
||||
else:
|
||||
module.fail_json(msg="%s does not exist" % user_ssh_dir)
|
||||
elif not os.path.isdir(user_ssh_dir):
|
||||
module.fail_json(msg="%s is not a directory" % user_ssh_dir)
|
||||
|
||||
this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
|
||||
|
||||
rc, out, err = module.run_command(this_cmd)
|
||||
module.append_to_file(user_host_file, out)
|
||||
|
||||
return rc, out, err
|
||||
|
@ -0,0 +1,69 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def openstack_argument_spec():
|
||||
# Consume standard OpenStack environment variables.
|
||||
# This is mainly only useful for ad-hoc command line operation as
|
||||
# in playbooks one would assume variables would be used appropriately
|
||||
OS_AUTH_URL=os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
|
||||
OS_PASSWORD=os.environ.get('OS_PASSWORD', None)
|
||||
OS_REGION_NAME=os.environ.get('OS_REGION_NAME', None)
|
||||
OS_USERNAME=os.environ.get('OS_USERNAME', 'admin')
|
||||
OS_TENANT_NAME=os.environ.get('OS_TENANT_NAME', OS_USERNAME)
|
||||
|
||||
spec = dict(
|
||||
login_username = dict(default=OS_USERNAME),
|
||||
auth_url = dict(default=OS_AUTH_URL),
|
||||
region_name = dict(default=OS_REGION_NAME),
|
||||
availability_zone = dict(default=None),
|
||||
)
|
||||
if OS_PASSWORD:
|
||||
spec['login_password'] = dict(default=OS_PASSWORD)
|
||||
else:
|
||||
spec['login_password'] = dict(required=True)
|
||||
if OS_TENANT_NAME:
|
||||
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
|
||||
else:
|
||||
spec['login_tenant_name'] = dict(required=True)
|
||||
return spec
|
||||
|
||||
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
|
||||
|
||||
ret = []
|
||||
for (k, v) in addresses.iteritems():
|
||||
if key_name and k == key_name:
|
||||
ret.extend([addrs['addr'] for addrs in v])
|
||||
else:
|
||||
for interface_spec in v:
|
||||
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
|
||||
ret.append(interface_spec['addr'])
|
||||
return ret
|
||||
|
@ -0,0 +1,144 @@
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2014, and others
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
# Helper function to parse Ansible JSON arguments from a file passed as
|
||||
# the single argument to the module
|
||||
# Example: $params = Parse-Args $args
|
||||
Function Parse-Args($arguments)
|
||||
{
|
||||
$parameters = New-Object psobject;
|
||||
If ($arguments.Length -gt 0)
|
||||
{
|
||||
$parameters = Get-Content $arguments[0] | ConvertFrom-Json;
|
||||
}
|
||||
$parameters;
|
||||
}
|
||||
|
||||
# Helper function to set an "attribute" on a psobject instance in powershell.
|
||||
# This is a convenience to make adding Members to the object easier and
|
||||
# slightly more pythonic
|
||||
# Example: Set-Attr $result "changed" $true
|
||||
Function Set-Attr($obj, $name, $value)
|
||||
{
|
||||
# If the provided $obj is undefined, define one to be nice
|
||||
If (-not $obj.GetType)
|
||||
{
|
||||
$obj = New-Object psobject
|
||||
}
|
||||
|
||||
$obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
|
||||
}
|
||||
|
||||
# Helper function to convert a powershell object to JSON to echo it, exiting
|
||||
# the script
|
||||
# Example: Exit-Json $result
|
||||
Function Exit-Json($obj)
|
||||
{
|
||||
# If the provided $obj is undefined, define one to be nice
|
||||
If (-not $obj.GetType)
|
||||
{
|
||||
$obj = New-Object psobject
|
||||
}
|
||||
|
||||
echo $obj | ConvertTo-Json -Depth 99
|
||||
Exit
|
||||
}
|
||||
|
||||
# Helper function to add the "msg" property and "failed" property, convert the
|
||||
# powershell object to JSON and echo it, exiting the script
|
||||
# Example: Fail-Json $result "This is the failure message"
|
||||
Function Fail-Json($obj, $message = $null)
|
||||
{
|
||||
# If we weren't given 2 args, and the only arg was a string, create a new
|
||||
# psobject and use the arg as the failure message
|
||||
If ($message -eq $null -and $obj.GetType().Name -eq "String")
|
||||
{
|
||||
$message = $obj
|
||||
$obj = New-Object psobject
|
||||
}
|
||||
# If the first args is undefined or not an object, make it an object
|
||||
ElseIf (-not $obj.GetType -or $obj.GetType().Name -ne "PSCustomObject")
|
||||
{
|
||||
$obj = New-Object psobject
|
||||
}
|
||||
|
||||
Set-Attr $obj "msg" $message
|
||||
Set-Attr $obj "failed" $true
|
||||
echo $obj | ConvertTo-Json -Depth 99
|
||||
Exit 1
|
||||
}
|
||||
|
||||
# Helper function to get an "attribute" from a psobject instance in powershell.
|
||||
# This is a convenience to make getting Members from an object easier and
|
||||
# slightly more pythonic
|
||||
# Example: $attr = Get-Attr $response "code" -default "1"
|
||||
#Note that if you use the failifempty option, you do need to specify resultobject as well.
|
||||
Function Get-Attr($obj, $name, $default = $null,$resultobj, $failifempty=$false, $emptyattributefailmessage)
|
||||
{
|
||||
# Check if the provided Member $name exists in $obj and return it or the
|
||||
# default
|
||||
If ($obj.$name.GetType)
|
||||
{
|
||||
$obj.$name
|
||||
}
|
||||
Elseif($failifempty -eq $false)
|
||||
{
|
||||
$default
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!$emptyattributefailmessage) {$emptyattributefailmessage = "Missing required argument: $name"}
|
||||
Fail-Json -obj $resultobj -message $emptyattributefailmessage
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
# Helper filter/pipeline function to convert a value to boolean following current
|
||||
# Ansible practices
|
||||
# Example: $is_true = "true" | ConvertTo-Bool
|
||||
Function ConvertTo-Bool
|
||||
{
|
||||
param(
|
||||
[parameter(valuefrompipeline=$true)]
|
||||
$obj
|
||||
)
|
||||
|
||||
$boolean_strings = "yes", "on", "1", "true", 1
|
||||
$obj_string = [string]$obj
|
||||
|
||||
if (($obj.GetType().Name -eq "Boolean" -and $obj) -or $boolean_strings -contains $obj_string.ToLower())
|
||||
{
|
||||
$true
|
||||
}
|
||||
Else
|
||||
{
|
||||
$false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -0,0 +1,277 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by
|
||||
# Ansible still belong to the author of the module, and may assign their own
|
||||
# license to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from uuid import UUID
|
||||
|
||||
|
||||
FINAL_STATUSES = ('ACTIVE', 'ERROR')
|
||||
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
|
||||
'error', 'error_deleting')
|
||||
|
||||
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
|
||||
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
|
||||
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
|
||||
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
|
||||
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
|
||||
|
||||
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
|
||||
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
|
||||
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
|
||||
|
||||
|
||||
def rax_slugify(value):
|
||||
"""Prepend a key with rax_ and normalize the key name"""
|
||||
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
|
||||
|
||||
|
||||
def rax_clb_node_to_dict(obj):
|
||||
"""Function to convert a CLB Node object to a dict"""
|
||||
if not obj:
|
||||
return {}
|
||||
node = obj.to_dict()
|
||||
node['id'] = obj.id
|
||||
node['weight'] = obj.weight
|
||||
return node
|
||||
|
||||
|
||||
def rax_to_dict(obj, obj_type='standard'):
|
||||
"""Generic function to convert a pyrax object to a dict
|
||||
|
||||
obj_type values:
|
||||
standard
|
||||
clb
|
||||
server
|
||||
|
||||
"""
|
||||
instance = {}
|
||||
for key in dir(obj):
|
||||
value = getattr(obj, key)
|
||||
if obj_type == 'clb' and key == 'nodes':
|
||||
instance[key] = []
|
||||
for node in value:
|
||||
instance[key].append(rax_clb_node_to_dict(node))
|
||||
elif (isinstance(value, list) and len(value) > 0 and
|
||||
not isinstance(value[0], NON_CALLABLES)):
|
||||
instance[key] = []
|
||||
for item in value:
|
||||
instance[key].append(rax_to_dict(item))
|
||||
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
|
||||
if obj_type == 'server':
|
||||
key = rax_slugify(key)
|
||||
instance[key] = value
|
||||
|
||||
if obj_type == 'server':
|
||||
for attr in ['id', 'accessIPv4', 'name', 'status']:
|
||||
instance[attr] = instance.get(rax_slugify(attr))
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
def rax_find_image(module, rax_module, image):
|
||||
cs = rax_module.cloudservers
|
||||
try:
|
||||
UUID(image)
|
||||
except ValueError:
|
||||
try:
|
||||
image = cs.images.find(human_id=image)
|
||||
except(cs.exceptions.NotFound,
|
||||
cs.exceptions.NoUniqueMatch):
|
||||
try:
|
||||
image = cs.images.find(name=image)
|
||||
except (cs.exceptions.NotFound,
|
||||
cs.exceptions.NoUniqueMatch):
|
||||
module.fail_json(msg='No matching image found (%s)' %
|
||||
image)
|
||||
|
||||
return rax_module.utils.get_id(image)
|
||||
|
||||
|
||||
def rax_find_volume(module, rax_module, name):
|
||||
cbs = rax_module.cloud_blockstorage
|
||||
try:
|
||||
UUID(name)
|
||||
volume = cbs.get(name)
|
||||
except ValueError:
|
||||
try:
|
||||
volume = cbs.find(name=name)
|
||||
except rax_module.exc.NotFound:
|
||||
volume = None
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e)
|
||||
return volume
|
||||
|
||||
|
||||
def rax_find_network(module, rax_module, network):
|
||||
cnw = rax_module.cloud_networks
|
||||
try:
|
||||
UUID(network)
|
||||
except ValueError:
|
||||
if network.lower() == 'public':
|
||||
return cnw.get_server_networks(PUBLIC_NET_ID)
|
||||
elif network.lower() == 'private':
|
||||
return cnw.get_server_networks(SERVICE_NET_ID)
|
||||
else:
|
||||
try:
|
||||
network_obj = cnw.find_network_by_label(network)
|
||||
except (rax_module.exceptions.NetworkNotFound,
|
||||
rax_module.exceptions.NetworkLabelNotUnique):
|
||||
module.fail_json(msg='No matching network found (%s)' %
|
||||
network)
|
||||
else:
|
||||
return cnw.get_server_networks(network_obj)
|
||||
else:
|
||||
return cnw.get_server_networks(network)
|
||||
|
||||
|
||||
def rax_find_server(module, rax_module, server):
|
||||
cs = rax_module.cloudservers
|
||||
try:
|
||||
UUID(server)
|
||||
server = cs.servers.get(server)
|
||||
except ValueError:
|
||||
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
|
||||
if not servers:
|
||||
module.fail_json(msg='No Server was matched by name, '
|
||||
'try using the Server ID instead')
|
||||
if len(servers) > 1:
|
||||
module.fail_json(msg='Multiple servers matched by name, '
|
||||
'try using the Server ID instead')
|
||||
|
||||
# We made it this far, grab the first and hopefully only server
|
||||
# in the list
|
||||
server = servers[0]
|
||||
return server
|
||||
|
||||
|
||||
def rax_find_loadbalancer(module, rax_module, loadbalancer):
|
||||
clb = rax_module.cloud_loadbalancers
|
||||
try:
|
||||
found = clb.get(loadbalancer)
|
||||
except:
|
||||
found = []
|
||||
for lb in clb.list():
|
||||
if loadbalancer == lb.name:
|
||||
found.append(lb)
|
||||
|
||||
if not found:
|
||||
module.fail_json(msg='No loadbalancer was matched')
|
||||
|
||||
if len(found) > 1:
|
||||
module.fail_json(msg='Multiple loadbalancers matched')
|
||||
|
||||
# We made it this far, grab the first and hopefully only item
|
||||
# in the list
|
||||
found = found[0]
|
||||
|
||||
return found
|
||||
|
||||
|
||||
def rax_argument_spec():
|
||||
return dict(
|
||||
api_key=dict(type='str', aliases=['password'], no_log=True),
|
||||
auth_endpoint=dict(type='str'),
|
||||
credentials=dict(type='str', aliases=['creds_file']),
|
||||
env=dict(type='str'),
|
||||
identity_type=dict(type='str', default='rackspace'),
|
||||
region=dict(type='str'),
|
||||
tenant_id=dict(type='str'),
|
||||
tenant_name=dict(type='str'),
|
||||
username=dict(type='str'),
|
||||
verify_ssl=dict(choices=BOOLEANS, type='bool'),
|
||||
)
|
||||
|
||||
|
||||
def rax_required_together():
|
||||
return [['api_key', 'username']]
|
||||
|
||||
|
||||
def setup_rax_module(module, rax_module, region_required=True):
|
||||
rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
|
||||
rax_module.USER_AGENT)
|
||||
|
||||
api_key = module.params.get('api_key')
|
||||
auth_endpoint = module.params.get('auth_endpoint')
|
||||
credentials = module.params.get('credentials')
|
||||
env = module.params.get('env')
|
||||
identity_type = module.params.get('identity_type')
|
||||
region = module.params.get('region')
|
||||
tenant_id = module.params.get('tenant_id')
|
||||
tenant_name = module.params.get('tenant_name')
|
||||
username = module.params.get('username')
|
||||
verify_ssl = module.params.get('verify_ssl')
|
||||
|
||||
if env is not None:
|
||||
rax_module.set_environment(env)
|
||||
|
||||
rax_module.set_setting('identity_type', identity_type)
|
||||
if verify_ssl is not None:
|
||||
rax_module.set_setting('verify_ssl', verify_ssl)
|
||||
if auth_endpoint is not None:
|
||||
rax_module.set_setting('auth_endpoint', auth_endpoint)
|
||||
if tenant_id is not None:
|
||||
rax_module.set_setting('tenant_id', tenant_id)
|
||||
if tenant_name is not None:
|
||||
rax_module.set_setting('tenant_name', tenant_name)
|
||||
|
||||
try:
|
||||
username = username or os.environ.get('RAX_USERNAME')
|
||||
if not username:
|
||||
username = rax_module.get_setting('keyring_username')
|
||||
if username:
|
||||
api_key = 'USE_KEYRING'
|
||||
if not api_key:
|
||||
api_key = os.environ.get('RAX_API_KEY')
|
||||
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
|
||||
os.environ.get('RAX_CREDS_FILE'))
|
||||
region = (region or os.environ.get('RAX_REGION') or
|
||||
rax_module.get_setting('region'))
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
try:
|
||||
if api_key and username:
|
||||
if api_key == 'USE_KEYRING':
|
||||
rax_module.keyring_auth(username, region=region)
|
||||
else:
|
||||
rax_module.set_credentials(username, api_key=api_key,
|
||||
region=region)
|
||||
elif credentials:
|
||||
credentials = os.path.expanduser(credentials)
|
||||
rax_module.set_credential_file(credentials, region=region)
|
||||
else:
|
||||
raise Exception('No credentials supplied!')
|
||||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
if region_required and region not in rax_module.regions:
|
||||
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
|
||||
(region, ','.join(rax_module.regions)))
|
||||
|
||||
return rax_module
|
@ -0,0 +1,280 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), James Laska
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import os
|
||||
import re
|
||||
import types
|
||||
import ConfigParser
|
||||
import shlex
|
||||
|
||||
|
||||
class RegistrationBase(object):
|
||||
def __init__(self, module, username=None, password=None):
|
||||
self.module = module
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def configure(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def enable(self):
|
||||
# Remove any existing redhat.repo
|
||||
redhat_repo = '/etc/yum.repos.d/redhat.repo'
|
||||
if os.path.isfile(redhat_repo):
|
||||
os.unlink(redhat_repo)
|
||||
|
||||
def register(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def unregister(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def unsubscribe(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def update_plugin_conf(self, plugin, enabled=True):
|
||||
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
|
||||
if os.path.isfile(plugin_conf):
|
||||
cfg = ConfigParser.ConfigParser()
|
||||
cfg.read([plugin_conf])
|
||||
if enabled:
|
||||
cfg.set('main', 'enabled', 1)
|
||||
else:
|
||||
cfg.set('main', 'enabled', 0)
|
||||
fd = open(plugin_conf, 'rwa+')
|
||||
cfg.write(fd)
|
||||
fd.close()
|
||||
|
||||
def subscribe(self, **kwargs):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
|
||||
class Rhsm(RegistrationBase):
|
||||
def __init__(self, module, username=None, password=None):
|
||||
RegistrationBase.__init__(self, module, username, password)
|
||||
self.config = self._read_config()
|
||||
self.module = module
|
||||
|
||||
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
|
||||
'''
|
||||
Load RHSM configuration from /etc/rhsm/rhsm.conf.
|
||||
Returns:
|
||||
* ConfigParser object
|
||||
'''
|
||||
|
||||
# Read RHSM defaults ...
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.read(rhsm_conf)
|
||||
|
||||
# Add support for specifying a default value w/o having to standup some configuration
|
||||
# Yeah, I know this should be subclassed ... but, oh well
|
||||
def get_option_default(self, key, default=''):
|
||||
sect, opt = key.split('.', 1)
|
||||
if self.has_section(sect) and self.has_option(sect, opt):
|
||||
return self.get(sect, opt)
|
||||
else:
|
||||
return default
|
||||
|
||||
cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
|
||||
|
||||
return cp
|
||||
|
||||
def enable(self):
|
||||
'''
|
||||
Enable the system to receive updates from subscription-manager.
|
||||
This involves updating affected yum plugins and removing any
|
||||
conflicting yum repositories.
|
||||
'''
|
||||
RegistrationBase.enable(self)
|
||||
self.update_plugin_conf('rhnplugin', False)
|
||||
self.update_plugin_conf('subscription-manager', True)
|
||||
|
||||
def configure(self, **kwargs):
|
||||
'''
|
||||
Configure the system as directed for registration with RHN
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'config']
|
||||
|
||||
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
|
||||
# non-configuration parameters and replace '_' with '.'. For example,
|
||||
# 'server_hostname' becomes '--system.hostname'.
|
||||
for k,v in kwargs.items():
|
||||
if re.search(r'^(system|rhsm)_', k):
|
||||
args.append('--%s=%s' % (k.replace('_','.'), v))
|
||||
|
||||
self.module.run_command(args, check_rc=True)
|
||||
|
||||
@property
|
||||
def is_registered(self):
|
||||
'''
|
||||
Determine whether the current system
|
||||
Returns:
|
||||
* Boolean - whether the current system is currently registered to
|
||||
RHN.
|
||||
'''
|
||||
# Quick version...
|
||||
if False:
|
||||
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
|
||||
os.path.isfile('/etc/pki/consumer/key.pem')
|
||||
|
||||
args = ['subscription-manager', 'identity']
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def register(self, username, password, autosubscribe, activationkey):
|
||||
'''
|
||||
Register the current system to the provided RHN server
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'register']
|
||||
|
||||
# Generate command arguments
|
||||
if activationkey:
|
||||
args.append('--activationkey "%s"' % activationkey)
|
||||
else:
|
||||
if autosubscribe:
|
||||
args.append('--autosubscribe')
|
||||
if username:
|
||||
args.extend(['--username', username])
|
||||
if password:
|
||||
args.extend(['--password', password])
|
||||
|
||||
# Do the needful...
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def unsubscribe(self):
|
||||
'''
|
||||
Unsubscribe a system from all subscribed channels
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'unsubscribe', '--all']
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def unregister(self):
|
||||
'''
|
||||
Unregister a currently registered system
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'unregister']
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def subscribe(self, regexp):
|
||||
'''
|
||||
Subscribe current system to available pools matching the specified
|
||||
regular expression
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
|
||||
# Available pools ready for subscription
|
||||
available_pools = RhsmPools(self.module)
|
||||
|
||||
for pool in available_pools.filter(regexp):
|
||||
pool.subscribe()
|
||||
|
||||
|
||||
class RhsmPool(object):
|
||||
'''
|
||||
Convenience class for housing subscription information
|
||||
'''
|
||||
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
for k,v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__getattribute__('_name'))
|
||||
|
||||
def subscribe(self):
|
||||
args = "subscription-manager subscribe --pool %s" % self.PoolId
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class RhsmPools(object):
|
||||
"""
|
||||
This class is used for manipulating pools subscriptions with RHSM
|
||||
"""
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.products = self._load_product_list()
|
||||
|
||||
def __iter__(self):
|
||||
return self.products.__iter__()
|
||||
|
||||
def _load_product_list(self):
|
||||
"""
|
||||
Loads list of all available pools for system in data structure
|
||||
"""
|
||||
args = "subscription-manager list --available"
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
|
||||
products = []
|
||||
for line in stdout.split('\n'):
|
||||
# Remove leading+trailing whitespace
|
||||
line = line.strip()
|
||||
# An empty line implies the end of an output group
|
||||
if len(line) == 0:
|
||||
continue
|
||||
# If a colon ':' is found, parse
|
||||
elif ':' in line:
|
||||
(key, value) = line.split(':',1)
|
||||
key = key.strip().replace(" ", "") # To unify
|
||||
value = value.strip()
|
||||
if key in ['ProductName', 'SubscriptionName']:
|
||||
# Remember the name for later processing
|
||||
products.append(RhsmPool(self.module, _name=value, key=value))
|
||||
elif products:
|
||||
# Associate value with most recently recorded product
|
||||
products[-1].__setattr__(key, value)
|
||||
# FIXME - log some warning?
|
||||
#else:
|
||||
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
|
||||
return products
|
||||
|
||||
def filter(self, regexp='^$'):
|
||||
'''
|
||||
Return a list of RhsmPools whose name matches the provided regular expression
|
||||
'''
|
||||
r = re.compile(regexp)
|
||||
for product in self.products:
|
||||
if r.search(product._name):
|
||||
yield product
|
||||
|
@ -0,0 +1,201 @@
|
||||
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
def _get_quote_state(token, quote_char):
|
||||
'''
|
||||
the goal of this block is to determine if the quoted string
|
||||
is unterminated in which case it needs to be put back together
|
||||
'''
|
||||
# the char before the current one, used to see if
|
||||
# the current character is escaped
|
||||
prev_char = None
|
||||
for idx, cur_char in enumerate(token):
|
||||
if idx > 0:
|
||||
prev_char = token[idx-1]
|
||||
if cur_char in '"\'' and prev_char != '\\':
|
||||
if quote_char:
|
||||
if cur_char == quote_char:
|
||||
quote_char = None
|
||||
else:
|
||||
quote_char = cur_char
|
||||
return quote_char
|
||||
|
||||
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
|
||||
'''
|
||||
this function counts the number of opening/closing blocks for a
|
||||
given opening/closing type and adjusts the current depth for that
|
||||
block based on the difference
|
||||
'''
|
||||
num_open = token.count(open_token)
|
||||
num_close = token.count(close_token)
|
||||
if num_open != num_close:
|
||||
cur_depth += (num_open - num_close)
|
||||
if cur_depth < 0:
|
||||
cur_depth = 0
|
||||
return cur_depth
|
||||
|
||||
def split_args(args):
|
||||
'''
|
||||
Splits args on whitespace, but intelligently reassembles
|
||||
those that may have been split over a jinja2 block or quotes.
|
||||
|
||||
When used in a remote module, we won't ever have to be concerned about
|
||||
jinja2 blocks, however this function is/will be used in the
|
||||
core portions as well before the args are templated.
|
||||
|
||||
example input: a=b c="foo bar"
|
||||
example output: ['a=b', 'c="foo bar"']
|
||||
|
||||
Basically this is a variation shlex that has some more intelligence for
|
||||
how Ansible needs to use it.
|
||||
'''
|
||||
|
||||
# the list of params parsed out of the arg string
|
||||
# this is going to be the result value when we are donei
|
||||
params = []
|
||||
|
||||
# here we encode the args, so we have a uniform charset to
|
||||
# work with, and split on white space
|
||||
args = args.strip()
|
||||
try:
|
||||
args = args.encode('utf-8')
|
||||
do_decode = True
|
||||
except UnicodeDecodeError:
|
||||
do_decode = False
|
||||
items = args.split('\n')
|
||||
|
||||
# iterate over the tokens, and reassemble any that may have been
|
||||
# split on a space inside a jinja2 block.
|
||||
# ex if tokens are "{{", "foo", "}}" these go together
|
||||
|
||||
# These variables are used
|
||||
# to keep track of the state of the parsing, since blocks and quotes
|
||||
# may be nested within each other.
|
||||
|
||||
quote_char = None
|
||||
inside_quotes = False
|
||||
print_depth = 0 # used to count nested jinja2 {{ }} blocks
|
||||
block_depth = 0 # used to count nested jinja2 {% %} blocks
|
||||
comment_depth = 0 # used to count nested jinja2 {# #} blocks
|
||||
|
||||
# now we loop over each split chunk, coalescing tokens if the white space
|
||||
# split occurred within quotes or a jinja2 block of some kind
|
||||
for itemidx,item in enumerate(items):
|
||||
|
||||
# we split on spaces and newlines separately, so that we
|
||||
# can tell which character we split on for reassembly
|
||||
# inside quotation characters
|
||||
tokens = item.strip().split(' ')
|
||||
|
||||
line_continuation = False
|
||||
for idx,token in enumerate(tokens):
|
||||
|
||||
# if we hit a line continuation character, but
|
||||
# we're not inside quotes, ignore it and continue
|
||||
# on to the next token while setting a flag
|
||||
if token == '\\' and not inside_quotes:
|
||||
line_continuation = True
|
||||
continue
|
||||
|
||||
# store the previous quoting state for checking later
|
||||
was_inside_quotes = inside_quotes
|
||||
quote_char = _get_quote_state(token, quote_char)
|
||||
inside_quotes = quote_char is not None
|
||||
|
||||
# multiple conditions may append a token to the list of params,
|
||||
# so we keep track with this flag to make sure it only happens once
|
||||
# append means add to the end of the list, don't append means concatenate
|
||||
# it to the end of the last token
|
||||
appended = False
|
||||
|
||||
# if we're inside quotes now, but weren't before, append the token
|
||||
# to the end of the list, since we'll tack on more to it later
|
||||
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
|
||||
# inside quotes (but aren't now) concat this token to the last param
|
||||
if inside_quotes and not was_inside_quotes:
|
||||
params.append(token)
|
||||
appended = True
|
||||
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
|
||||
if idx == 0 and not inside_quotes and was_inside_quotes:
|
||||
params[-1] = "%s%s" % (params[-1], token)
|
||||
elif len(tokens) > 1:
|
||||
spacer = ''
|
||||
if idx > 0:
|
||||
spacer = ' '
|
||||
params[-1] = "%s%s%s" % (params[-1], spacer, token)
|
||||
else:
|
||||
spacer = ''
|
||||
if not params[-1].endswith('\n') and idx == 0:
|
||||
spacer = '\n'
|
||||
params[-1] = "%s%s%s" % (params[-1], spacer, token)
|
||||
appended = True
|
||||
|
||||
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
|
||||
# and may append the current token to the params (if we haven't previously done so)
|
||||
prev_print_depth = print_depth
|
||||
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
|
||||
if print_depth != prev_print_depth and not appended:
|
||||
params.append(token)
|
||||
appended = True
|
||||
|
||||
prev_block_depth = block_depth
|
||||
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
|
||||
if block_depth != prev_block_depth and not appended:
|
||||
params.append(token)
|
||||
appended = True
|
||||
|
||||
prev_comment_depth = comment_depth
|
||||
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
|
||||
if comment_depth != prev_comment_depth and not appended:
|
||||
params.append(token)
|
||||
appended = True
|
||||
|
||||
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
|
||||
# yet appended anything to the list of params, we do so now
|
||||
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
|
||||
params.append(token)
|
||||
|
||||
# if this was the last token in the list, and we have more than
|
||||
# one item (meaning we split on newlines), add a newline back here
|
||||
# to preserve the original structure
|
||||
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
|
||||
if not params[-1].endswith('\n') or item == '':
|
||||
params[-1] += '\n'
|
||||
|
||||
# always clear the line continuation flag
|
||||
line_continuation = False
|
||||
|
||||
# If we're done and things are not at zero depth or we're still inside quotes,
|
||||
# raise an error to indicate that the args were unbalanced
|
||||
if print_depth or block_depth or comment_depth or inside_quotes:
|
||||
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
|
||||
|
||||
# finally, we decode each param back to the unicode it was in the arg string
|
||||
if do_decode:
|
||||
params = [x.decode('utf-8') for x in params]
|
||||
|
||||
return params
|
||||
|
||||
def is_quoted(data):
|
||||
return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
|
||||
|
||||
def unquote(data):
|
||||
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
|
||||
if is_quoted(data):
|
||||
return data[1:-1]
|
||||
return data
|
||||
|
@ -0,0 +1,456 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
try:
|
||||
import urllib
|
||||
HAS_URLLIB = True
|
||||
except:
|
||||
HAS_URLLIB = False
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
HAS_URLLIB2 = True
|
||||
except:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
try:
|
||||
import urlparse
|
||||
HAS_URLPARSE = True
|
||||
except:
|
||||
HAS_URLPARSE = False
|
||||
|
||||
try:
|
||||
import ssl
|
||||
HAS_SSL=True
|
||||
except:
|
||||
HAS_SSL=False
|
||||
|
||||
import httplib
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import tempfile
|
||||
|
||||
|
||||
# This is a dummy cacert provided for Mac OS since you need at least 1
|
||||
# ca cert, regardless of validity, for Python on Mac OS to use the
|
||||
# keychain functionality in OpenSSL for validating SSL certificates.
|
||||
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
|
||||
DUMMY_CA_CERT = """-----BEGIN CERTIFICATE-----
|
||||
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
|
||||
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
|
||||
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
|
||||
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
|
||||
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
|
||||
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
|
||||
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
|
||||
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
|
||||
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
|
||||
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
|
||||
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
|
||||
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
|
||||
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
|
||||
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
|
||||
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
|
||||
class CustomHTTPSConnection(httplib.HTTPSConnection):
|
||||
def connect(self):
|
||||
"Connect to a host on a given (SSL) port."
|
||||
|
||||
if hasattr(self, 'source_address'):
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
|
||||
else:
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout)
|
||||
if self._tunnel_host:
|
||||
self.sock = sock
|
||||
self._tunnel()
|
||||
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
|
||||
|
||||
class CustomHTTPSHandler(urllib2.HTTPSHandler):
|
||||
|
||||
def https_open(self, req):
|
||||
return self.do_open(CustomHTTPSConnection, req)
|
||||
|
||||
https_request = urllib2.AbstractHTTPHandler.do_request_
|
||||
|
||||
def generic_urlparse(parts):
|
||||
'''
|
||||
Returns a dictionary of url parts as parsed by urlparse,
|
||||
but accounts for the fact that older versions of that
|
||||
library do not support named attributes (ie. .netloc)
|
||||
'''
|
||||
generic_parts = dict()
|
||||
if hasattr(parts, 'netloc'):
|
||||
# urlparse is newer, just read the fields straight
|
||||
# from the parts object
|
||||
generic_parts['scheme'] = parts.scheme
|
||||
generic_parts['netloc'] = parts.netloc
|
||||
generic_parts['path'] = parts.path
|
||||
generic_parts['params'] = parts.params
|
||||
generic_parts['query'] = parts.query
|
||||
generic_parts['fragment'] = parts.fragment
|
||||
generic_parts['username'] = parts.username
|
||||
generic_parts['password'] = parts.password
|
||||
generic_parts['hostname'] = parts.hostname
|
||||
generic_parts['port'] = parts.port
|
||||
else:
|
||||
# we have to use indexes, and then parse out
|
||||
# the other parts not supported by indexing
|
||||
generic_parts['scheme'] = parts[0]
|
||||
generic_parts['netloc'] = parts[1]
|
||||
generic_parts['path'] = parts[2]
|
||||
generic_parts['params'] = parts[3]
|
||||
generic_parts['query'] = parts[4]
|
||||
generic_parts['fragment'] = parts[5]
|
||||
# get the username, password, etc.
|
||||
try:
|
||||
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
|
||||
(auth, hostname, port) = netloc_re.match(parts[1])
|
||||
if port:
|
||||
# the capture group for the port will include the ':',
|
||||
# so remove it and convert the port to an integer
|
||||
port = int(port[1:])
|
||||
if auth:
|
||||
# the capture group above inclues the @, so remove it
|
||||
# and then split it up based on the first ':' found
|
||||
auth = auth[:-1]
|
||||
username, password = auth.split(':', 1)
|
||||
generic_parts['username'] = username
|
||||
generic_parts['password'] = password
|
||||
generic_parts['hostname'] = hostnme
|
||||
generic_parts['port'] = port
|
||||
except:
|
||||
generic_parts['username'] = None
|
||||
generic_parts['password'] = None
|
||||
generic_parts['hostname'] = None
|
||||
generic_parts['port'] = None
|
||||
return generic_parts
|
||||
|
||||
class RequestWithMethod(urllib2.Request):
|
||||
'''
|
||||
Workaround for using DELETE/PUT/etc with urllib2
|
||||
Originally contained in library/net_infrastructure/dnsmadeeasy
|
||||
'''
|
||||
|
||||
def __init__(self, url, method, data=None, headers={}):
|
||||
self._method = method
|
||||
urllib2.Request.__init__(self, url, data, headers)
|
||||
|
||||
def get_method(self):
|
||||
if self._method:
|
||||
return self._method
|
||||
else:
|
||||
return urllib2.Request.get_method(self)
|
||||
|
||||
|
||||
class SSLValidationHandler(urllib2.BaseHandler):
|
||||
'''
|
||||
A custom handler class for SSL validation.
|
||||
|
||||
Based on:
|
||||
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
|
||||
http://techknack.net/python-urllib2-handlers/
|
||||
'''
|
||||
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
|
||||
|
||||
def __init__(self, module, hostname, port):
|
||||
self.module = module
|
||||
self.hostname = hostname
|
||||
self.port = port
|
||||
|
||||
def get_ca_certs(self):
|
||||
# tries to find a valid CA cert in one of the
|
||||
# standard locations for the current distribution
|
||||
|
||||
ca_certs = []
|
||||
paths_checked = []
|
||||
platform = get_platform()
|
||||
distribution = get_distribution()
|
||||
|
||||
# build a list of paths to check for .crt/.pem files
|
||||
# based on the platform type
|
||||
paths_checked.append('/etc/ssl/certs')
|
||||
if platform == 'Linux':
|
||||
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
|
||||
paths_checked.append('/etc/pki/tls/certs')
|
||||
paths_checked.append('/usr/share/ca-certificates/cacert.org')
|
||||
elif platform == 'FreeBSD':
|
||||
paths_checked.append('/usr/local/share/certs')
|
||||
elif platform == 'OpenBSD':
|
||||
paths_checked.append('/etc/ssl')
|
||||
elif platform == 'NetBSD':
|
||||
ca_certs.append('/etc/openssl/certs')
|
||||
elif platform == 'SunOS':
|
||||
paths_checked.append('/opt/local/etc/openssl/certs')
|
||||
|
||||
# fall back to a user-deployed cert in a standard
|
||||
# location if the OS platform one is not available
|
||||
paths_checked.append('/etc/ansible')
|
||||
|
||||
tmp_fd, tmp_path = tempfile.mkstemp()
|
||||
|
||||
# Write the dummy ca cert if we are running on Mac OS X
|
||||
if platform == 'Darwin':
|
||||
os.write(tmp_fd, DUMMY_CA_CERT)
|
||||
# Default Homebrew path for OpenSSL certs
|
||||
paths_checked.append('/usr/local/etc/openssl')
|
||||
|
||||
# for all of the paths, find any .crt or .pem files
|
||||
# and compile them into single temp file for use
|
||||
# in the ssl check to speed up the test
|
||||
for path in paths_checked:
|
||||
if os.path.exists(path) and os.path.isdir(path):
|
||||
dir_contents = os.listdir(path)
|
||||
for f in dir_contents:
|
||||
full_path = os.path.join(path, f)
|
||||
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
|
||||
try:
|
||||
cert_file = open(full_path, 'r')
|
||||
os.write(tmp_fd, cert_file.read())
|
||||
os.write(tmp_fd, '\n')
|
||||
cert_file.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
return (tmp_path, paths_checked)
|
||||
|
||||
def validate_proxy_response(self, response, valid_codes=[200]):
|
||||
'''
|
||||
make sure we get back a valid code from the proxy
|
||||
'''
|
||||
try:
|
||||
(http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
|
||||
if int(resp_code) not in valid_codes:
|
||||
raise Exception
|
||||
except:
|
||||
self.module.fail_json(msg='Connection to proxy failed')
|
||||
|
||||
def http_request(self, req):
|
||||
tmp_ca_cert_path, paths_checked = self.get_ca_certs()
|
||||
https_proxy = os.environ.get('https_proxy')
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
if https_proxy:
|
||||
proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy))
|
||||
s.connect((proxy_parts.get('hostname'), proxy_parts.get('port')))
|
||||
if proxy_parts.get('scheme') == 'http':
|
||||
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
|
||||
if proxy_parts.get('username'):
|
||||
credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
|
||||
s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip())
|
||||
s.sendall('\r\n')
|
||||
connect_result = s.recv(4096)
|
||||
self.validate_proxy_response(connect_result)
|
||||
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
|
||||
else:
|
||||
self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
|
||||
else:
|
||||
s.connect((self.hostname, self.port))
|
||||
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
|
||||
# close the ssl connection
|
||||
#ssl_s.unwrap()
|
||||
s.close()
|
||||
except (ssl.SSLError, socket.error), e:
|
||||
# fail if we tried all of the certs but none worked
|
||||
if 'connection refused' in str(e).lower():
|
||||
self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port))
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \
|
||||
'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
|
||||
'Paths checked for this platform: %s' % ", ".join(paths_checked)
|
||||
)
|
||||
try:
|
||||
# cleanup the temp file created, don't worry
|
||||
# if it fails for some reason
|
||||
os.remove(tmp_ca_cert_path)
|
||||
except:
|
||||
pass
|
||||
|
||||
return req
|
||||
|
||||
https_request = http_request
|
||||
|
||||
|
||||
def url_argument_spec():
|
||||
'''
|
||||
Creates an argument spec that can be used with any module
|
||||
that will be requesting content via urllib/urllib2
|
||||
'''
|
||||
return dict(
|
||||
url = dict(),
|
||||
force = dict(default='no', aliases=['thirsty'], type='bool'),
|
||||
http_agent = dict(default='ansible-httpget'),
|
||||
use_proxy = dict(default='yes', type='bool'),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
url_username = dict(required=False),
|
||||
url_password = dict(required=False),
|
||||
)
|
||||
|
||||
|
||||
def fetch_url(module, url, data=None, headers=None, method=None,
|
||||
use_proxy=True, force=False, last_mod_time=None, timeout=10):
|
||||
'''
|
||||
Fetches a file from an HTTP/FTP server using urllib2
|
||||
'''
|
||||
|
||||
if not HAS_URLLIB:
|
||||
module.fail_json(msg='urllib is not installed')
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg='urllib2 is not installed')
|
||||
elif not HAS_URLPARSE:
|
||||
module.fail_json(msg='urlparse is not installed')
|
||||
|
||||
r = None
|
||||
handlers = []
|
||||
info = dict(url=url)
|
||||
|
||||
distribution = get_distribution()
|
||||
# Get validate_certs from the module params
|
||||
validate_certs = module.params.get('validate_certs', True)
|
||||
|
||||
# FIXME: change the following to use the generic_urlparse function
|
||||
# to remove the indexed references for 'parsed'
|
||||
parsed = urlparse.urlparse(url)
|
||||
if parsed[0] == 'https':
|
||||
if not HAS_SSL and validate_certs:
|
||||
if distribution == 'Redhat':
|
||||
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL')
|
||||
else:
|
||||
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
|
||||
|
||||
elif validate_certs:
|
||||
# do the cert validation
|
||||
netloc = parsed[1]
|
||||
if '@' in netloc:
|
||||
netloc = netloc.split('@', 1)[1]
|
||||
if ':' in netloc:
|
||||
hostname, port = netloc.split(':', 1)
|
||||
else:
|
||||
hostname = netloc
|
||||
port = 443
|
||||
# create the SSL validation handler and
|
||||
# add it to the list of handlers
|
||||
ssl_handler = SSLValidationHandler(module, hostname, port)
|
||||
handlers.append(ssl_handler)
|
||||
|
||||
if parsed[0] != 'ftp':
|
||||
username = module.params.get('url_username', '')
|
||||
if username:
|
||||
password = module.params.get('url_password', '')
|
||||
netloc = parsed[1]
|
||||
elif '@' in parsed[1]:
|
||||
credentials, netloc = parsed[1].split('@', 1)
|
||||
if ':' in credentials:
|
||||
username, password = credentials.split(':', 1)
|
||||
else:
|
||||
username = credentials
|
||||
password = ''
|
||||
|
||||
parsed = list(parsed)
|
||||
parsed[1] = netloc
|
||||
|
||||
# reconstruct url without credentials
|
||||
url = urlparse.urlunparse(parsed)
|
||||
|
||||
if username:
|
||||
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
|
||||
# this creates a password manager
|
||||
passman.add_password(None, netloc, username, password)
|
||||
|
||||
# because we have put None at the start it will always
|
||||
# use this username/password combination for urls
|
||||
# for which `theurl` is a super-url
|
||||
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||
|
||||
# create the AuthHandler
|
||||
handlers.append(authhandler)
|
||||
|
||||
if not use_proxy:
|
||||
proxyhandler = urllib2.ProxyHandler({})
|
||||
handlers.append(proxyhandler)
|
||||
|
||||
# pre-2.6 versions of python cannot use the custom https
|
||||
# handler, since the socket class is lacking this method
|
||||
if hasattr(socket, 'create_connection'):
|
||||
handlers.append(CustomHTTPSHandler)
|
||||
|
||||
opener = urllib2.build_opener(*handlers)
|
||||
urllib2.install_opener(opener)
|
||||
|
||||
if method:
|
||||
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
|
||||
module.fail_json(msg='invalid HTTP request method; %s' % method.upper())
|
||||
request = RequestWithMethod(url, method.upper(), data)
|
||||
else:
|
||||
request = urllib2.Request(url, data)
|
||||
|
||||
# add the custom agent header, to help prevent issues
|
||||
# with sites that block the default urllib agent string
|
||||
request.add_header('User-agent', module.params.get('http_agent'))
|
||||
|
||||
# if we're ok with getting a 304, set the timestamp in the
|
||||
# header, otherwise make sure we don't get a cached copy
|
||||
if last_mod_time and not force:
|
||||
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
|
||||
request.add_header('If-Modified-Since', tstamp)
|
||||
else:
|
||||
request.add_header('cache-control', 'no-cache')
|
||||
|
||||
# user defined headers now, which may override things we've set above
|
||||
if headers:
|
||||
if not isinstance(headers, dict):
|
||||
module.fail_json("headers provided to fetch_url() must be a dict")
|
||||
for header in headers:
|
||||
request.add_header(header, headers[header])
|
||||
|
||||
try:
|
||||
if sys.version_info < (2,6,0):
|
||||
# urlopen in python prior to 2.6.0 did not
|
||||
# have a timeout parameter
|
||||
r = urllib2.urlopen(request, None)
|
||||
else:
|
||||
r = urllib2.urlopen(request, None, timeout)
|
||||
info.update(r.info())
|
||||
info['url'] = r.geturl() # The URL goes in too, because of redirects.
|
||||
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
|
||||
except urllib2.HTTPError, e:
|
||||
info.update(dict(msg=str(e), status=e.code))
|
||||
except urllib2.URLError, e:
|
||||
code = int(getattr(e, 'code', -1))
|
||||
info.update(dict(msg="Request failed: %s" % str(e), status=code))
|
||||
except socket.error, e:
|
||||
info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
|
||||
except Exception, e:
|
||||
info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
|
||||
|
||||
return r, info
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit cb69744bcee4b4217d83b4a30006635ba69e2aa0
|
||||
Subproject commit c16601fffac87c941eb15263f24552e91641963d
|
@ -0,0 +1,341 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#############################################
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.aggregate import InventoryAggregateParser
|
||||
|
||||
class Inventory:
|
||||
'''
|
||||
Create hosts and groups from inventory
|
||||
|
||||
Retrieve the hosts and groups that ansible knows about from this class.
|
||||
|
||||
Retrieve raw variables (non-expanded) from the Group and Host classes
|
||||
returned from here.
|
||||
'''
|
||||
|
||||
def __init__(self, inventory_list=C.DEFAULT_HOST_LIST):
|
||||
'''
|
||||
:kwarg inventory_list: A list of inventory sources. This may be file
|
||||
names which will be parsed as ini-like files, executable scripts
|
||||
which return inventory data as json, directories of both of the above,
|
||||
or hostnames. Files and directories are
|
||||
:kwarg vault_password: Password to use if any of the inventory sources
|
||||
are in an ansible vault
|
||||
'''
|
||||
|
||||
self._restricted_to = None
|
||||
self._filter_pattern = None
|
||||
|
||||
parser = InventoryAggregateParser(inventory_list)
|
||||
parser.parse()
|
||||
|
||||
self._basedir = parser.basedir
|
||||
self._hosts = parser.hosts
|
||||
self._groups = parser.groups
|
||||
|
||||
def get_hosts(self):
|
||||
'''
|
||||
Return the list of hosts, after filtering based on any set pattern
|
||||
and restricting the results based on the set host restrictions.
|
||||
'''
|
||||
|
||||
if self._filter_pattern:
|
||||
hosts = self._filter_hosts()
|
||||
else:
|
||||
hosts = self._hosts[:]
|
||||
|
||||
if self._restricted_to is not None:
|
||||
# this will preserve the order of hosts after intersecting them
|
||||
res_set = set(hosts).intersection(self._restricted_to)
|
||||
return [h for h in hosts if h in res_set]
|
||||
else:
|
||||
return hosts[:]
|
||||
|
||||
def get_groups(self):
|
||||
'''
|
||||
Retrieve the Group objects known to the Inventory
|
||||
'''
|
||||
|
||||
return self._groups[:]
|
||||
|
||||
def get_host(self, hostname):
|
||||
'''
|
||||
Retrieve the Host object for a hostname
|
||||
'''
|
||||
|
||||
for host in self._hosts:
|
||||
if host.name == hostname:
|
||||
return host
|
||||
|
||||
return None
|
||||
|
||||
def get_group(self, groupname):
|
||||
'''
|
||||
Retrieve the Group object for a groupname
|
||||
'''
|
||||
|
||||
for group in self._groups:
|
||||
if group.name == group_name:
|
||||
return group
|
||||
|
||||
return None
|
||||
|
||||
def add_group(self, group):
|
||||
'''
|
||||
Add a new group to the inventory
|
||||
'''
|
||||
|
||||
if group not in self._groups:
|
||||
self._groups.append(group)
|
||||
|
||||
def set_filter_pattern(self, pattern='all'):
|
||||
'''
|
||||
Sets a pattern upon which hosts/groups will be filtered.
|
||||
This pattern can contain logical groupings such as unions,
|
||||
intersections and negations using special syntax.
|
||||
'''
|
||||
|
||||
self._filter_pattern = pattern
|
||||
|
||||
def set_host_restriction(self, restriction):
|
||||
'''
|
||||
Restrict operations to hosts in the given list
|
||||
'''
|
||||
|
||||
assert isinstance(restriction, list)
|
||||
self._restricted_to = restriction[:]
|
||||
|
||||
def remove_host_restriction(self):
|
||||
'''
|
||||
Remove the restriction on hosts, if any.
|
||||
'''
|
||||
|
||||
self._restricted_to = None
|
||||
|
||||
def _filter_hosts(self):
|
||||
"""
|
||||
Limits inventory results to a subset of inventory that matches a given
|
||||
list of patterns, such as to select a subset of a hosts selection that also
|
||||
belongs to a certain geographic group or numeric slice.
|
||||
|
||||
Corresponds to --limit parameter to ansible-playbook
|
||||
|
||||
:arg patterns: The pattern to limit with. If this is None it
|
||||
clears the subset. Multiple patterns may be specified as a comma,
|
||||
semicolon, or colon separated string.
|
||||
"""
|
||||
|
||||
hosts = []
|
||||
|
||||
pattern_regular = []
|
||||
pattern_intersection = []
|
||||
pattern_exclude = []
|
||||
|
||||
patterns = self._pattern.replace(";",":").split(":")
|
||||
for p in patterns:
|
||||
if p.startswith("!"):
|
||||
pattern_exclude.append(p)
|
||||
elif p.startswith("&"):
|
||||
pattern_intersection.append(p)
|
||||
elif p:
|
||||
pattern_regular.append(p)
|
||||
|
||||
# if no regular pattern was given, hence only exclude and/or intersection
|
||||
# make that magically work
|
||||
if pattern_regular == []:
|
||||
pattern_regular = ['all']
|
||||
|
||||
# when applying the host selectors, run those without the "&" or "!"
|
||||
# first, then the &s, then the !s.
|
||||
patterns = pattern_regular + pattern_intersection + pattern_exclude
|
||||
|
||||
for p in patterns:
|
||||
intersect = False
|
||||
negate = False
|
||||
if p.startswith('&'):
|
||||
intersect = True
|
||||
elif p.startswith('!'):
|
||||
p = p[1:]
|
||||
negate = True
|
||||
|
||||
target = self._resolve_pattern(p)
|
||||
if isinstance(target, Host):
|
||||
if negate and target in hosts:
|
||||
# remove it
|
||||
hosts.remove(target)
|
||||
elif target not in hosts:
|
||||
# for both union and intersections, we just append it
|
||||
hosts.append(target)
|
||||
else:
|
||||
if intersect:
|
||||
hosts = [ h for h in hosts if h not in target ]
|
||||
elif negate:
|
||||
hosts = [ h for h in hosts if h in target ]
|
||||
else:
|
||||
to_append = [ h for h in target if h.name not in [ y.name for y in hosts ] ]
|
||||
hosts.extend(to_append)
|
||||
|
||||
return hosts
|
||||
|
||||
def _resolve_pattern(self, pattern):
|
||||
target = self.get_host(pattern)
|
||||
if target:
|
||||
return target
|
||||
else:
|
||||
(name, enumeration_details) = self._enumeration_info(pattern)
|
||||
hpat = self._hosts_in_unenumerated_pattern(name)
|
||||
result = self._apply_ranges(pattern, hpat)
|
||||
return result
|
||||
|
||||
def _enumeration_info(self, pattern):
|
||||
"""
|
||||
returns (pattern, limits) taking a regular pattern and finding out
|
||||
which parts of it correspond to start/stop offsets. limits is
|
||||
a tuple of (start, stop) or None
|
||||
"""
|
||||
|
||||
# Do not parse regexes for enumeration info
|
||||
if pattern.startswith('~'):
|
||||
return (pattern, None)
|
||||
|
||||
# The regex used to match on the range, which can be [x] or [x-y].
|
||||
pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
|
||||
m = pattern_re.match(pattern)
|
||||
if m:
|
||||
(target, first, last, rest) = m.groups()
|
||||
first = int(first)
|
||||
if last:
|
||||
if first < 0:
|
||||
raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
|
||||
last = int(last)
|
||||
else:
|
||||
last = first
|
||||
return (target, (first, last))
|
||||
else:
|
||||
return (pattern, None)
|
||||
|
||||
def _apply_ranges(self, pat, hosts):
|
||||
"""
|
||||
given a pattern like foo, that matches hosts, return all of hosts
|
||||
given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
|
||||
"""
|
||||
|
||||
# If there are no hosts to select from, just return the
|
||||
# empty set. This prevents trying to do selections on an empty set.
|
||||
# issue#6258
|
||||
if not hosts:
|
||||
return hosts
|
||||
|
||||
(loose_pattern, limits) = self._enumeration_info(pat)
|
||||
if not limits:
|
||||
return hosts
|
||||
|
||||
(left, right) = limits
|
||||
|
||||
if left == '':
|
||||
left = 0
|
||||
if right == '':
|
||||
right = 0
|
||||
left=int(left)
|
||||
right=int(right)
|
||||
try:
|
||||
if left != right:
|
||||
return hosts[left:right]
|
||||
else:
|
||||
return [ hosts[left] ]
|
||||
except IndexError:
|
||||
raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
|
||||
|
||||
def _hosts_in_unenumerated_pattern(self, pattern):
|
||||
""" Get all host names matching the pattern """
|
||||
|
||||
results = []
|
||||
hosts = []
|
||||
hostnames = set()
|
||||
|
||||
# ignore any negative checks here, this is handled elsewhere
|
||||
pattern = pattern.replace("!","").replace("&", "")
|
||||
|
||||
def __append_host_to_results(host):
|
||||
if host not in results and host.name not in hostnames:
|
||||
hostnames.add(host.name)
|
||||
results.append(host)
|
||||
|
||||
groups = self.get_groups()
|
||||
for group in groups:
|
||||
if pattern == 'all':
|
||||
for host in group.get_hosts():
|
||||
__append_host_to_results(host)
|
||||
else:
|
||||
if self._match(group.name, pattern):
|
||||
for host in group.get_hosts():
|
||||
__append_host_to_results(host)
|
||||
else:
|
||||
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
|
||||
for host in matching_hosts:
|
||||
__append_host_to_results(host)
|
||||
|
||||
if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
|
||||
new_host = self._create_implicit_localhost(pattern)
|
||||
results.append(new_host)
|
||||
return results
|
||||
|
||||
def _create_implicit_localhost(self, pattern):
|
||||
new_host = Host(pattern)
|
||||
new_host._connection = 'local'
|
||||
new_host.set_variable("ansible_python_interpreter", sys.executable)
|
||||
ungrouped = self.get_group("ungrouped")
|
||||
if ungrouped is None:
|
||||
self.add_group(Group('ungrouped'))
|
||||
ungrouped = self.get_group('ungrouped')
|
||||
self.get_group('all').add_child_group(ungrouped)
|
||||
ungrouped.add_host(new_host)
|
||||
return new_host
|
||||
|
||||
def is_file(self):
|
||||
'''
|
||||
Did inventory come from a file?
|
||||
|
||||
:returns: True if the inventory is file based, False otherwise
|
||||
'''
|
||||
pass
|
||||
|
||||
def src(self):
|
||||
'''
|
||||
What's the complete path to the inventory file?
|
||||
|
||||
:returns: Complete path to the inventory file. None if inventory is
|
||||
not file-based
|
||||
'''
|
||||
pass
|
||||
|
||||
def basedir(self):
|
||||
'''
|
||||
What directory from which the inventory was read.
|
||||
'''
|
||||
|
||||
return self._basedir
|
||||
|
@ -0,0 +1,51 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
class Host:
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._connection = None
|
||||
self._ipv4_address = ''
|
||||
self._ipv6_address = ''
|
||||
self._port = 22
|
||||
self._vars = dict()
|
||||
|
||||
def __repr__(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self._name
|
||||
|
||||
def get_groups(self):
|
||||
return []
|
||||
|
||||
def set_variable(self, name, value):
|
||||
''' sets a variable for this host '''
|
||||
|
||||
self._vars[name] = value
|
||||
|
||||
def get_vars(self):
|
||||
''' returns all variables for this host '''
|
||||
|
||||
all_vars = self._vars.copy()
|
||||
all_vars.update(dict(inventory_hostname=self._name))
|
||||
return all_vars
|
||||
|
@ -0,0 +1,26 @@
|
||||
# FIXME: header
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
def jsonify(result, format=False):
|
||||
''' format JSON output (uncompressed or uncompressed) '''
|
||||
|
||||
if result is None:
|
||||
return "{}"
|
||||
result2 = result.copy()
|
||||
for key, value in result2.items():
|
||||
if type(value) is str:
|
||||
result2[key] = value.decode('utf-8', 'ignore')
|
||||
|
||||
indent = None
|
||||
if format:
|
||||
indent = 4
|
||||
|
||||
try:
|
||||
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
|
||||
except UnicodeDecodeError:
|
||||
return json.dumps(result2, sort_keys=True, indent=indent)
|
||||
|
@ -1,55 +0,0 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from errors import AnsibleError
|
||||
from ansible.utils import list_union
|
||||
|
||||
class Tag:
|
||||
def __init__(self, tags=[]):
|
||||
assert isinstance(tags, list)
|
||||
self._tags = tags
|
||||
|
||||
def push(self, tags):
|
||||
if not isinstance(tags, list):
|
||||
tags = [ tags ]
|
||||
for tag in tags:
|
||||
if not isinstance(tag, basestring):
|
||||
tag = str(tag)
|
||||
if tag not in self._tags:
|
||||
self._tags.append(tag)
|
||||
|
||||
def get_tags(self):
|
||||
return self._tags
|
||||
|
||||
def merge(self, tags):
|
||||
# returns a union of the tags, which can be a string,
|
||||
# a list of strings, or another Tag() class
|
||||
if isinstance(tags, basestring):
|
||||
tags = Tag([tags])
|
||||
elif isinstance(tags, list):
|
||||
tags = Tag(tags)
|
||||
elif not isinstance(tags, Tag):
|
||||
raise AnsibleError('expected a Tag() instance, instead got %s' % type(tags))
|
||||
return utils.list_union(self._tags, tags.get_tags())
|
||||
|
||||
def matches(self, tag):
|
||||
return tag in self._tags
|
||||
|
@ -0,0 +1,46 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.playbook.attribute import FieldAttribute
|
||||
|
||||
class Taggable:
|
||||
_tags = FieldAttribute(isa='list', default=[])
|
||||
|
||||
def __init__(self):
|
||||
super(Taggable, self).__init__()
|
||||
|
||||
def get_tags(self):
|
||||
return self._tags[:]
|
||||
|
||||
def evaluate_tags(self, only_tags, skip_tags):
|
||||
my_tags = self.get_tags()
|
||||
|
||||
if skip_tags:
|
||||
skipped_tags = my_tags.intersection(skip_tags)
|
||||
if len(skipped_tags) > 0:
|
||||
return False
|
||||
|
||||
matched_tags = my_tags.intersection(only_tags)
|
||||
if len(matched_tags) > 0 or 'all' in only_tags:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -0,0 +1,159 @@
|
||||
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Stephen Fromm <sfromm@gmail.com>
|
||||
# Brian Coca <briancoca+dev@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import pipes
|
||||
import shutil
|
||||
import tempfile
|
||||
import base64
|
||||
import re
|
||||
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.hashing import checksum_s
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
TRANSFERS_FILES = True
|
||||
|
||||
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
|
||||
''' assemble a file from a directory of fragments '''
|
||||
|
||||
tmpfd, temp_path = tempfile.mkstemp()
|
||||
tmp = os.fdopen(tmpfd,'w')
|
||||
delimit_me = False
|
||||
add_newline = False
|
||||
|
||||
for f in sorted(os.listdir(src_path)):
|
||||
if compiled_regexp and not compiled_regexp.search(f):
|
||||
continue
|
||||
fragment = "%s/%s" % (src_path, f)
|
||||
if not os.path.isfile(fragment):
|
||||
continue
|
||||
fragment_content = file(fragment).read()
|
||||
|
||||
# always put a newline between fragments if the previous fragment didn't end with a newline.
|
||||
if add_newline:
|
||||
tmp.write('\n')
|
||||
|
||||
# delimiters should only appear between fragments
|
||||
if delimit_me:
|
||||
if delimiter:
|
||||
# un-escape anything like newlines
|
||||
delimiter = delimiter.decode('unicode-escape')
|
||||
tmp.write(delimiter)
|
||||
# always make sure there's a newline after the
|
||||
# delimiter, so lines don't run together
|
||||
if delimiter[-1] != '\n':
|
||||
tmp.write('\n')
|
||||
|
||||
tmp.write(fragment_content)
|
||||
delimit_me = True
|
||||
if fragment_content.endswith('\n'):
|
||||
add_newline = False
|
||||
else:
|
||||
add_newline = True
|
||||
|
||||
tmp.close()
|
||||
return temp_path
|
||||
|
||||
def run(self, tmp=None, task_vars=dict()):
|
||||
|
||||
src = self._task.args.get('src', None)
|
||||
dest = self._task.args.get('dest', None)
|
||||
delimiter = self._task.args.get('delimiter', None)
|
||||
# FIXME: boolean needs to be moved out of utils
|
||||
#remote_src = utils.boolean(options.get('remote_src', 'yes'))
|
||||
remote_src = self._task.args.get('remote_src', 'yes')
|
||||
regexp = self._task.args.get('regexp', None)
|
||||
|
||||
if src is None or dest is None:
|
||||
return dict(failed=True, msg="src and dest are required")
|
||||
|
||||
# FIXME: this should be boolean, hard-coded to yes for testing
|
||||
if remote_src == 'yes':
|
||||
return self._execute_module(tmp=tmp)
|
||||
# FIXME: we don't do inject anymore, so not sure where the original
|
||||
# file stuff is going to end up at this time
|
||||
#elif '_original_file' in inject:
|
||||
# src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
|
||||
else:
|
||||
# the source is local, so expand it here
|
||||
src = os.path.expanduser(src)
|
||||
|
||||
_re = None
|
||||
if regexp is not None:
|
||||
_re = re.compile(regexp)
|
||||
|
||||
# Does all work assembling the file
|
||||
path = self._assemble_from_fragments(src, delimiter, _re)
|
||||
|
||||
path_checksum = checksum_s(path)
|
||||
dest = self._remote_expand_user(dest, tmp)
|
||||
remote_checksum = self._remote_checksum(tmp, dest)
|
||||
|
||||
if path_checksum != remote_checksum:
|
||||
resultant = file(path).read()
|
||||
# FIXME: diff needs to be moved somewhere else
|
||||
#if self.runner.diff:
|
||||
# dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), tmp=tmp, persist_files=True)
|
||||
# if 'content' in dest_result:
|
||||
# dest_contents = dest_result['content']
|
||||
# if dest_result['encoding'] == 'base64':
|
||||
# dest_contents = base64.b64decode(dest_contents)
|
||||
# else:
|
||||
# raise Exception("unknown encoding, failed: %s" % dest_result)
|
||||
xfered = self._transfer_data('src', resultant)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root':
|
||||
self._remote_chmod('a+r', xfered, tmp)
|
||||
|
||||
# run the copy module
|
||||
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=xfered,
|
||||
dest=dest,
|
||||
original_basename=os.path.basename(src),
|
||||
)
|
||||
)
|
||||
|
||||
# FIXME: checkmode stuff
|
||||
#if self.runner.noop_on_check(inject):
|
||||
# return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
|
||||
#else:
|
||||
# res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
|
||||
# res.diff = dict(after=resultant)
|
||||
# return res
|
||||
res = self._execute_module(module_name='copy', module_args=new_module_args, tmp=tmp)
|
||||
#res.diff = dict(after=resultant)
|
||||
return res
|
||||
else:
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=xfered,
|
||||
dest=dest,
|
||||
original_basename=os.path.basename(src),
|
||||
)
|
||||
)
|
||||
|
||||
return self._execute_module(module_name='file', module_args=new_module_args, tmp=tmp)
|
@ -0,0 +1,54 @@
|
||||
# Copyright 2012, Dag Wieers <dag@wieers.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
''' Fail with custom message '''
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
def run(self, tmp=None, task_vars=dict()):
|
||||
|
||||
# note: the fail module does not need to pay attention to check mode
|
||||
# it always runs.
|
||||
|
||||
msg = None
|
||||
if 'msg' in self._task.args:
|
||||
msg = self._task.args['msg']
|
||||
|
||||
if not 'that' in self._task.args:
|
||||
raise AnsibleError('conditional required in "that" string')
|
||||
|
||||
for that in self._task.args['that']:
|
||||
self._task.when = [ that ]
|
||||
test_result = self._task.evaluate_conditional(all_vars=task_vars)
|
||||
if not test_result:
|
||||
result = dict(
|
||||
failed = True,
|
||||
evaluated_to = test_result,
|
||||
assertion = that,
|
||||
)
|
||||
|
||||
if msg:
|
||||
result['msg'] = msg
|
||||
|
||||
return result
|
||||
|
||||
return dict(changed=False, msg='all assertions passed')
|
||||
|
@ -0,0 +1,384 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import pipes
|
||||
import stat
|
||||
import tempfile
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.boolean import boolean
|
||||
from ansible.utils.hashing import checksum
|
||||
|
||||
## fixes https://github.com/ansible/ansible/issues/3518
|
||||
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
|
||||
|
||||
import sys
|
||||
reload(sys)
|
||||
sys.setdefaultencoding("utf8")
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
def run(self, tmp=None, task_vars=dict()):
|
||||
''' handler for file transfer operations '''
|
||||
|
||||
source = self._task.args.get('src', None)
|
||||
content = self._task.args.get('content', None)
|
||||
dest = self._task.args.get('dest', None)
|
||||
raw = boolean(self._task.args.get('raw', 'no'))
|
||||
force = boolean(self._task.args.get('force', 'yes'))
|
||||
|
||||
# content with newlines is going to be escaped to safely load in yaml
|
||||
# now we need to unescape it so that the newlines are evaluated properly
|
||||
# when writing the file to disk
|
||||
if content:
|
||||
if isinstance(content, unicode):
|
||||
try:
|
||||
content = content.decode('unicode-escape')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# FIXME: first available file needs to be reworked somehow...
|
||||
#if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
|
||||
# result=dict(failed=True, msg="src (or content) and dest are required")
|
||||
# return ReturnData(conn=conn, result=result)
|
||||
#elif (source is not None or 'first_available_file' in inject) and content is not None:
|
||||
# result=dict(failed=True, msg="src and content are mutually exclusive")
|
||||
# return ReturnData(conn=conn, result=result)
|
||||
|
||||
# Check if the source ends with a "/"
|
||||
source_trailing_slash = False
|
||||
if source:
|
||||
source_trailing_slash = source.endswith("/")
|
||||
|
||||
# Define content_tempfile in case we set it after finding content populated.
|
||||
content_tempfile = None
|
||||
|
||||
# If content is defined make a temp file and write the content into it.
|
||||
if content is not None:
|
||||
try:
|
||||
# If content comes to us as a dict it should be decoded json.
|
||||
# We need to encode it back into a string to write it out.
|
||||
if type(content) is dict:
|
||||
content_tempfile = self._create_content_tempfile(json.dumps(content))
|
||||
else:
|
||||
content_tempfile = self._create_content_tempfile(content)
|
||||
source = content_tempfile
|
||||
except Exception, err:
|
||||
result = dict(failed=True, msg="could not write content temp file: %s" % err)
|
||||
return ReturnData(conn=conn, result=result)
|
||||
###############################################################################################
|
||||
# FIXME: first_available_file needs to be reworked?
|
||||
###############################################################################################
|
||||
# if we have first_available_file in our vars
|
||||
# look up the files and use the first one we find as src
|
||||
#elif 'first_available_file' in inject:
|
||||
# found = False
|
||||
# for fn in inject.get('first_available_file'):
|
||||
# fn_orig = fn
|
||||
# fnt = template.template(self.runner.basedir, fn, inject)
|
||||
# fnd = utils.path_dwim(self.runner.basedir, fnt)
|
||||
# if not os.path.exists(fnd) and '_original_file' in inject:
|
||||
# fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
|
||||
# if os.path.exists(fnd):
|
||||
# source = fnd
|
||||
# found = True
|
||||
# break
|
||||
# if not found:
|
||||
# results = dict(failed=True, msg="could not find src in first_available_file list")
|
||||
# return ReturnData(conn=conn, result=results)
|
||||
###############################################################################################
|
||||
else:
|
||||
# FIXME: templating needs to be worked out still
|
||||
#source = template.template(self.runner.basedir, source, inject)
|
||||
# FIXME: original_file stuff needs to be reworked - most likely
|
||||
# simply checking to see if the task has a role and using
|
||||
# using the role path as the dwim target and basedir would work
|
||||
#if '_original_file' in inject:
|
||||
# source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
|
||||
#else:
|
||||
# source = utils.path_dwim(self.runner.basedir, source)
|
||||
source = self._loader.path_dwim(source)
|
||||
|
||||
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
|
||||
source_files = []
|
||||
|
||||
# If source is a directory populate our list else source is a file and translate it to a tuple.
|
||||
if os.path.isdir(source):
|
||||
# Get the amount of spaces to remove to get the relative path.
|
||||
if source_trailing_slash:
|
||||
sz = len(source) + 1
|
||||
else:
|
||||
sz = len(source.rsplit('/', 1)[0]) + 1
|
||||
|
||||
# Walk the directory and append the file tuples to source_files.
|
||||
for base_path, sub_folders, files in os.walk(source):
|
||||
for file in files:
|
||||
full_path = os.path.join(base_path, file)
|
||||
rel_path = full_path[sz:]
|
||||
source_files.append((full_path, rel_path))
|
||||
|
||||
# If it's recursive copy, destination is always a dir,
|
||||
# explicitly mark it so (note - copy module relies on this).
|
||||
if not self._shell.path_has_trailing_slash(dest):
|
||||
dest = self._shell.join_path(dest, '')
|
||||
else:
|
||||
source_files.append((source, os.path.basename(source)))
|
||||
|
||||
changed = False
|
||||
diffs = []
|
||||
module_result = {"changed": False}
|
||||
|
||||
# A register for if we executed a module.
|
||||
# Used to cut down on command calls when not recursive.
|
||||
module_executed = False
|
||||
|
||||
# Tell _execute_module to delete the file if there is one file.
|
||||
delete_remote_tmp = (len(source_files) == 1)
|
||||
|
||||
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
|
||||
if not delete_remote_tmp:
|
||||
if tmp is None or "-tmp-" not in tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
|
||||
# expand any user home dir specifier
|
||||
dest = self._remote_expand_user(dest, tmp)
|
||||
|
||||
for source_full, source_rel in source_files:
|
||||
# Generate a hash of the local file.
|
||||
local_checksum = checksum(source_full)
|
||||
|
||||
# If local_checksum is not defined we can't find the file so we should fail out.
|
||||
if local_checksum is None:
|
||||
return dict(failed=True, msg="could not find src=%s" % source_full)
|
||||
|
||||
# This is kind of optimization - if user told us destination is
|
||||
# dir, do path manipulation right away, otherwise we still check
|
||||
# for dest being a dir via remote call below.
|
||||
if self._shell.path_has_trailing_slash(dest):
|
||||
dest_file = self._shell.join_path(dest, source_rel)
|
||||
else:
|
||||
dest_file = self._shell.join_path(dest)
|
||||
|
||||
# Attempt to get the remote checksum
|
||||
remote_checksum = self._remote_checksum(tmp, dest_file)
|
||||
|
||||
if remote_checksum == '3':
|
||||
# The remote_checksum was executed on a directory.
|
||||
if content is not None:
|
||||
# If source was defined as content remove the temporary file and fail out.
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
return dict(failed=True, msg="can not use content with a dir as dest")
|
||||
else:
|
||||
# Append the relative source location to the destination and retry remote_checksum
|
||||
dest_file = self._shell.join_path(dest, source_rel)
|
||||
remote_checksum = self._remote_checksum(tmp, dest_file)
|
||||
|
||||
if remote_checksum != '1' and not force:
|
||||
# remote_file does not exist so continue to next iteration.
|
||||
continue
|
||||
|
||||
if local_checksum != remote_checksum:
|
||||
# The checksums don't match and we will change or error out.
|
||||
changed = True
|
||||
|
||||
# Create a tmp path if missing only if this is not recursive.
|
||||
# If this is recursive we already have a tmp path.
|
||||
if delete_remote_tmp:
|
||||
if tmp is None or "-tmp-" not in tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
|
||||
# FIXME: runner shouldn't have the diff option there
|
||||
#if self.runner.diff and not raw:
|
||||
# diff = self._get_diff_data(tmp, dest_file, source_full)
|
||||
#else:
|
||||
# diff = {}
|
||||
diff = {}
|
||||
|
||||
# FIXME: noop stuff
|
||||
#if self.runner.noop_on_check(inject):
|
||||
# self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
# diffs.append(diff)
|
||||
# changed = True
|
||||
# module_result = dict(changed=True)
|
||||
# continue
|
||||
|
||||
# Define a remote directory that we will copy the file to.
|
||||
tmp_src = tmp + 'source'
|
||||
|
||||
if not raw:
|
||||
self._connection.put_file(source_full, tmp_src)
|
||||
else:
|
||||
self._connection.put_file(source_full, dest_file)
|
||||
|
||||
# We have copied the file remotely and no longer require our content_tempfile
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
if (self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root') and not raw:
|
||||
self._remote_chmod('a+r', tmp_src, tmp)
|
||||
|
||||
if raw:
|
||||
# Continue to next iteration if raw is defined.
|
||||
continue
|
||||
|
||||
# Run the copy module
|
||||
|
||||
# src and dest here come after original and override them
|
||||
# we pass dest only to make sure it includes trailing slash in case of recursive copy
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=tmp_src,
|
||||
dest=dest,
|
||||
original_basename=source_rel,
|
||||
)
|
||||
)
|
||||
|
||||
# FIXME: checkmode and no_log stuff
|
||||
#if self.runner.noop_on_check(inject):
|
||||
# new_module_args['CHECKMODE'] = True
|
||||
#if self.runner.no_log:
|
||||
# new_module_args['NO_LOG'] = True
|
||||
|
||||
module_return = self._execute_module(module_name='copy', module_args=new_module_args, tmp=tmp, delete_remote_tmp=delete_remote_tmp)
|
||||
module_executed = True
|
||||
|
||||
else:
|
||||
# no need to transfer the file, already correct hash, but still need to call
|
||||
# the file module in case we want to change attributes
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
|
||||
if raw:
|
||||
# Continue to next iteration if raw is defined.
|
||||
# self._remove_tmp_path(tmp)
|
||||
continue
|
||||
|
||||
# Build temporary module_args.
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=source_rel,
|
||||
dest=dest,
|
||||
original_basename=source_rel
|
||||
)
|
||||
)
|
||||
# FIXME: checkmode and no_log stuff
|
||||
#if self.runner.noop_on_check(inject):
|
||||
# new_module_args['CHECKMODE'] = True
|
||||
#if self.runner.no_log:
|
||||
# new_module_args['NO_LOG'] = True
|
||||
|
||||
# Execute the file module.
|
||||
module_return = self._execute_module(module_name='file', module_args=new_module_args, tmp=tmp, delete_remote_tmp=delete_remote_tmp)
|
||||
module_executed = True
|
||||
|
||||
if not module_return.get('checksum'):
|
||||
module_return['checksum'] = local_checksum
|
||||
if module_return.get('failed') == True:
|
||||
return module_return
|
||||
if module_return.get('changed') == True:
|
||||
changed = True
|
||||
|
||||
# Delete tmp path if we were recursive or if we did not execute a module.
|
||||
if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
|
||||
or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
|
||||
self._remove_tmp_path(tmp)
|
||||
|
||||
# the file module returns the file path as 'path', but
|
||||
# the copy module uses 'dest', so add it if it's not there
|
||||
if 'path' in module_return and 'dest' not in module_return:
|
||||
module_return['dest'] = module_return['path']
|
||||
|
||||
# TODO: Support detailed status/diff for multiple files
|
||||
if len(source_files) == 1:
|
||||
result = module_return
|
||||
else:
|
||||
result = dict(dest=dest, src=source, changed=changed)
|
||||
|
||||
# FIXME: move diffs into the result?
|
||||
#if len(diffs) == 1:
|
||||
# return ReturnData(conn=conn, result=result, diff=diffs[0])
|
||||
#else:
|
||||
# return ReturnData(conn=conn, result=result)
|
||||
|
||||
return result
|
||||
|
||||
def _create_content_tempfile(self, content):
|
||||
''' Create a tempfile containing defined content '''
|
||||
fd, content_tempfile = tempfile.mkstemp()
|
||||
f = os.fdopen(fd, 'w')
|
||||
try:
|
||||
f.write(content)
|
||||
except Exception, err:
|
||||
os.remove(content_tempfile)
|
||||
raise Exception(err)
|
||||
finally:
|
||||
f.close()
|
||||
return content_tempfile
|
||||
|
||||
def _get_diff_data(self, tmp, destination, source):
|
||||
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), persist_files=True)
|
||||
if 'failed' in peek_result and peek_result['failed'] or peek_result.get('rc', 0) != 0:
|
||||
return {}
|
||||
|
||||
diff = {}
|
||||
if peek_result['state'] == 'absent':
|
||||
diff['before'] = ''
|
||||
elif peek_result['appears_binary']:
|
||||
diff['dst_binary'] = 1
|
||||
# FIXME: this should not be in utils..
|
||||
#elif peek_result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
|
||||
# diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
|
||||
else:
|
||||
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), tmp=tmp, persist_files=True)
|
||||
if 'content' in dest_result:
|
||||
dest_contents = dest_result['content']
|
||||
if dest_result['encoding'] == 'base64':
|
||||
dest_contents = base64.b64decode(dest_contents)
|
||||
else:
|
||||
raise Exception("unknown encoding, failed: %s" % dest_result)
|
||||
diff['before_header'] = destination
|
||||
diff['before'] = dest_contents
|
||||
|
||||
src = open(source)
|
||||
src_contents = src.read(8192)
|
||||
st = os.stat(source)
|
||||
if "\x00" in src_contents:
|
||||
diff['src_binary'] = 1
|
||||
# FIXME: this should not be in utils
|
||||
#elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
|
||||
# diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
|
||||
else:
|
||||
src.seek(0)
|
||||
diff['after_header'] = source
|
||||
diff['after'] = src.read()
|
||||
|
||||
return diff
|
||||
|
||||
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
|
||||
if content is not None:
|
||||
os.remove(content_tempfile)
|
||||
|
@ -0,0 +1,46 @@
|
||||
# Copyright 2012, Dag Wieers <dag@wieers.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.boolean import boolean
|
||||
from ansible.template import Templar
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
''' Print statements during execution '''
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
def run(self, tmp=None, task_vars=dict()):
|
||||
|
||||
if 'msg' in self._task.args:
|
||||
if 'fail' in self._task.args and boolean(self._task.args['fail']):
|
||||
result = dict(failed=True, msg=self._task.args['msg'])
|
||||
else:
|
||||
result = dict(msg=self._task.args['msg'])
|
||||
# FIXME: move the LOOKUP_REGEX somewhere else
|
||||
elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
|
||||
templar = Templar(variables=task_vars)
|
||||
results = templar.template(self._task.args['var'], convert_bare=True)
|
||||
result = dict()
|
||||
result[self._task.args['var']] = results
|
||||
else:
|
||||
result = dict(msg='here we are')
|
||||
|
||||
# force flag to make debug output module always verbose
|
||||
result['verbose_always'] = True
|
||||
|
||||
return result
|
@ -0,0 +1,48 @@
|
||||
# (c) 2013-2014, Benno Joy <benno@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
|
||||
from types import NoneType
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.parsing import DataLoader
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
def run(self, tmp=None, task_vars=dict()):
|
||||
|
||||
source = self._task.args.get('_raw_params')
|
||||
|
||||
if self._task._role:
|
||||
source = self._loader.path_dwim_relative(self._task._role.get('_role_path',''), 'vars', source)
|
||||
else:
|
||||
source = self._loader.path_dwim(source)
|
||||
|
||||
if os.path.exists(source):
|
||||
data = self._loader.load_from_file(source)
|
||||
if data is None:
|
||||
data = {}
|
||||
if not isinstance(data, dict):
|
||||
raise AnsibleError("%s must be stored as a dictionary/hash" % source)
|
||||
return dict(ansible_facts=data)
|
||||
else:
|
||||
return dict(failed=True, msg="Source file not found.", file=source)
|
||||
|
@ -0,0 +1,40 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
def run(self, tmp=None, task_vars=dict()):
|
||||
|
||||
# FIXME: a lot of this should pretty much go away with module
|
||||
# args being stored within the task being run itself
|
||||
|
||||
#if self.runner.noop_on_check(inject):
|
||||
# if module_name in [ 'shell', 'command' ]:
|
||||
# return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for %s' % module_name))
|
||||
# # else let the module parsing code decide, though this will only be allowed for AnsibleModuleCommon using
|
||||
# # python modules for now
|
||||
# module_args += " CHECKMODE=True"
|
||||
|
||||
#if self.runner.no_log:
|
||||
# module_args += " NO_LOG=True"
|
||||
|
||||
#vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
|
||||
return self._execute_module(tmp)
|
||||
|
||||
|
@ -0,0 +1,120 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
'''
|
||||
This is the default callback interface, which simply prints messages
|
||||
to stdout when new callback events are received.
|
||||
'''
|
||||
|
||||
def _print_banner(self, msg, color=None):
|
||||
'''
|
||||
Prints a header-looking line with stars taking up to 80 columns
|
||||
of width (3 columns, minimum)
|
||||
'''
|
||||
msg = msg.strip()
|
||||
star_len = (80 - len(msg))
|
||||
if star_len < 0:
|
||||
star_len = 3
|
||||
stars = "*" * star_len
|
||||
self._display.display("\n%s %s" % (msg, stars), color=color)
|
||||
|
||||
def on_any(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def runner_on_failed(self, task, result, ignore_errors=False):
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), result._result), color='red')
|
||||
|
||||
def runner_on_ok(self, task, result):
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
if self._display._verbosity > 0 or 'verbose_always' in result._result:
|
||||
if 'verbose_always' in result._result:
|
||||
del result._result['verbose_always']
|
||||
msg += " => %s" % result._result
|
||||
self._display.display(msg, color='green')
|
||||
|
||||
def runner_on_skipped(self, task, result):
|
||||
msg = "SKIPPED: [%s]" % result._host.get_name()
|
||||
if self._display._verbosity > 0 or 'verbose_always' in result._result:
|
||||
if 'verbose_always' in result._result:
|
||||
del result._result['verbose_always']
|
||||
msg += " => %s" % result._result
|
||||
self._display.display(msg)
|
||||
|
||||
def runner_on_unreachable(self, task, result):
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red')
|
||||
|
||||
def runner_on_no_hosts(self, task):
|
||||
pass
|
||||
|
||||
def runner_on_async_poll(self, host, res, jid, clock):
|
||||
pass
|
||||
|
||||
def runner_on_async_ok(self, host, res, jid):
|
||||
pass
|
||||
|
||||
def runner_on_async_failed(self, host, res, jid):
|
||||
pass
|
||||
|
||||
def playbook_on_start(self):
|
||||
pass
|
||||
|
||||
def playbook_on_notify(self, host, handler):
|
||||
pass
|
||||
|
||||
def playbook_on_no_hosts_matched(self):
|
||||
pass
|
||||
|
||||
def playbook_on_no_hosts_remaining(self):
|
||||
self._print_banner("NO MORE HOSTS LEFT")
|
||||
|
||||
def playbook_on_task_start(self, name, is_conditional):
|
||||
self._print_banner("TASK [%s]" % name.strip())
|
||||
|
||||
def playbook_on_cleanup_task_start(self, name):
|
||||
self._print_banner("CLEANUP TASK [%s]" % name.strip())
|
||||
|
||||
def playbook_on_handler_task_start(self, name):
|
||||
self._print_banner("RUNNING HANDLER [%s]" % name.strip())
|
||||
|
||||
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||
pass
|
||||
|
||||
def playbook_on_setup(self):
|
||||
pass
|
||||
|
||||
def playbook_on_import_for_host(self, host, imported_file):
|
||||
pass
|
||||
|
||||
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||
pass
|
||||
|
||||
def playbook_on_play_start(self, name):
|
||||
self._print_banner("PLAY [%s]" % name.strip())
|
||||
|
||||
def playbook_on_stats(self, stats):
|
||||
pass
|
||||
|
@ -0,0 +1,111 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
|
||||
'''
|
||||
This is the default callback interface, which simply prints messages
|
||||
to stdout when new callback events are received.
|
||||
'''
|
||||
|
||||
def _print_banner(self, msg):
|
||||
'''
|
||||
Prints a header-looking line with stars taking up to 80 columns
|
||||
of width (3 columns, minimum)
|
||||
'''
|
||||
msg = msg.strip()
|
||||
star_len = (80 - len(msg))
|
||||
if star_len < 0:
|
||||
star_len = 3
|
||||
stars = "*" * star_len
|
||||
self._display.display("\n%s %s\n" % (msg, stars))
|
||||
|
||||
def on_any(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def runner_on_failed(self, task, result, ignore_errors=False):
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red')
|
||||
|
||||
def runner_on_ok(self, task, result):
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), json.dumps(result._result, indent=4)), color='green')
|
||||
|
||||
def runner_on_skipped(self, task, result):
|
||||
pass
|
||||
|
||||
def runner_on_unreachable(self, task, result):
|
||||
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
|
||||
|
||||
def runner_on_no_hosts(self, task):
|
||||
pass
|
||||
|
||||
def runner_on_async_poll(self, host, res, jid, clock):
|
||||
pass
|
||||
|
||||
def runner_on_async_ok(self, host, res, jid):
|
||||
pass
|
||||
|
||||
def runner_on_async_failed(self, host, res, jid):
|
||||
pass
|
||||
|
||||
def playbook_on_start(self):
|
||||
pass
|
||||
|
||||
def playbook_on_notify(self, host, handler):
|
||||
pass
|
||||
|
||||
def playbook_on_no_hosts_matched(self):
|
||||
pass
|
||||
|
||||
def playbook_on_no_hosts_remaining(self):
|
||||
pass
|
||||
|
||||
def playbook_on_task_start(self, name, is_conditional):
|
||||
pass
|
||||
|
||||
def playbook_on_cleanup_task_start(self, name):
|
||||
pass
|
||||
|
||||
def playbook_on_handler_task_start(self, name):
|
||||
pass
|
||||
|
||||
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
|
||||
pass
|
||||
|
||||
def playbook_on_setup(self):
|
||||
pass
|
||||
|
||||
def playbook_on_import_for_host(self, host, imported_file):
|
||||
pass
|
||||
|
||||
def playbook_on_not_import_for_host(self, host, missing_file):
|
||||
pass
|
||||
|
||||
def playbook_on_play_start(self, name):
|
||||
pass
|
||||
|
||||
def playbook_on_stats(self, stats):
|
||||
pass
|
||||
|
@ -0,0 +1,371 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import json
|
||||
import os
|
||||
import base64
|
||||
import socket
|
||||
import struct
|
||||
import time
|
||||
from ansible.callbacks import vvv, vvvv
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.runner.connection_plugins.ssh import Connection as SSHConnection
|
||||
from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection
|
||||
from ansible import utils
|
||||
from ansible import constants
|
||||
|
||||
# the chunk size to read and send, assuming mtu 1500 and
|
||||
# leaving room for base64 (+33%) encoding and header (8 bytes)
|
||||
# ((1400-8)/4)*3) = 1044
|
||||
# which leaves room for the TCP/IP header. We set this to a
|
||||
# multiple of the value to speed up file reads.
|
||||
CHUNK_SIZE=1044*20
|
||||
|
||||
class Connection(object):
|
||||
''' raw socket accelerated connection '''
|
||||
|
||||
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
|
||||
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
self.context = None
|
||||
self.conn = None
|
||||
self.user = user
|
||||
self.key = utils.key_for_hostname(host)
|
||||
self.port = port[0]
|
||||
self.accport = port[1]
|
||||
self.is_connected = False
|
||||
self.has_pipelining = False
|
||||
|
||||
if not self.port:
|
||||
self.port = constants.DEFAULT_REMOTE_PORT
|
||||
elif not isinstance(self.port, int):
|
||||
self.port = int(self.port)
|
||||
|
||||
if not self.accport:
|
||||
self.accport = constants.ACCELERATE_PORT
|
||||
elif not isinstance(self.accport, int):
|
||||
self.accport = int(self.accport)
|
||||
|
||||
if self.runner.original_transport == "paramiko":
|
||||
self.ssh = ParamikoConnection(
|
||||
runner=self.runner,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
user=self.user,
|
||||
password=password,
|
||||
private_key_file=private_key_file
|
||||
)
|
||||
else:
|
||||
self.ssh = SSHConnection(
|
||||
runner=self.runner,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
user=self.user,
|
||||
password=password,
|
||||
private_key_file=private_key_file
|
||||
)
|
||||
|
||||
if not getattr(self.ssh, 'shell', None):
|
||||
self.ssh.shell = utils.plugins.shell_loader.get('sh')
|
||||
|
||||
# attempt to work around shared-memory funness
|
||||
if getattr(self.runner, 'aes_keys', None):
|
||||
utils.AES_KEYS = self.runner.aes_keys
|
||||
|
||||
def _execute_accelerate_module(self):
|
||||
args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
|
||||
base64.b64encode(self.key.__str__()),
|
||||
str(self.accport),
|
||||
constants.ACCELERATE_DAEMON_TIMEOUT,
|
||||
int(utils.VERBOSITY),
|
||||
self.runner.accelerate_ipv6,
|
||||
)
|
||||
if constants.ACCELERATE_MULTI_KEY:
|
||||
args += " multi_key=yes"
|
||||
inject = dict(password=self.key)
|
||||
if getattr(self.runner, 'accelerate_inventory_host', False):
|
||||
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
|
||||
else:
|
||||
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
|
||||
vvvv("attempting to start up the accelerate daemon...")
|
||||
self.ssh.connect()
|
||||
tmp_path = self.runner._make_tmp_path(self.ssh)
|
||||
return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
|
||||
|
||||
def connect(self, allow_ssh=True):
|
||||
''' activates the connection object '''
|
||||
|
||||
try:
|
||||
if not self.is_connected:
|
||||
wrong_user = False
|
||||
tries = 3
|
||||
self.conn = socket.socket()
|
||||
self.conn.settimeout(constants.ACCELERATE_CONNECT_TIMEOUT)
|
||||
vvvv("attempting connection to %s via the accelerated port %d" % (self.host,self.accport))
|
||||
while tries > 0:
|
||||
try:
|
||||
self.conn.connect((self.host,self.accport))
|
||||
break
|
||||
except socket.error:
|
||||
vvvv("connection to %s failed, retrying..." % self.host)
|
||||
time.sleep(0.1)
|
||||
tries -= 1
|
||||
if tries == 0:
|
||||
vvv("Could not connect via the accelerated connection, exceeded # of tries")
|
||||
raise AnsibleError("FAILED")
|
||||
elif wrong_user:
|
||||
vvv("Restarting daemon with a different remote_user")
|
||||
raise AnsibleError("WRONG_USER")
|
||||
|
||||
self.conn.settimeout(constants.ACCELERATE_TIMEOUT)
|
||||
if not self.validate_user():
|
||||
# the accelerated daemon was started with a
|
||||
# different remote_user. The above command
|
||||
# should have caused the accelerate daemon to
|
||||
# shutdown, so we'll reconnect.
|
||||
wrong_user = True
|
||||
|
||||
except AnsibleError, e:
|
||||
if allow_ssh:
|
||||
if "WRONG_USER" in e:
|
||||
vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
|
||||
time.sleep(5)
|
||||
vvv("Falling back to ssh to startup accelerated mode")
|
||||
res = self._execute_accelerate_module()
|
||||
if not res.is_successful():
|
||||
raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg')))
|
||||
return self.connect(allow_ssh=False)
|
||||
else:
|
||||
raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport))
|
||||
self.is_connected = True
|
||||
return self
|
||||
|
||||
def send_data(self, data):
|
||||
packed_len = struct.pack('!Q',len(data))
|
||||
return self.conn.sendall(packed_len + data)
|
||||
|
||||
def recv_data(self):
|
||||
header_len = 8 # size of a packed unsigned long long
|
||||
data = b""
|
||||
try:
|
||||
vvvv("%s: in recv_data(), waiting for the header" % self.host)
|
||||
while len(data) < header_len:
|
||||
d = self.conn.recv(header_len - len(data))
|
||||
if not d:
|
||||
vvvv("%s: received nothing, bailing out" % self.host)
|
||||
return None
|
||||
data += d
|
||||
vvvv("%s: got the header, unpacking" % self.host)
|
||||
data_len = struct.unpack('!Q',data[:header_len])[0]
|
||||
data = data[header_len:]
|
||||
vvvv("%s: data received so far (expecting %d): %d" % (self.host,data_len,len(data)))
|
||||
while len(data) < data_len:
|
||||
d = self.conn.recv(data_len - len(data))
|
||||
if not d:
|
||||
vvvv("%s: received nothing, bailing out" % self.host)
|
||||
return None
|
||||
vvvv("%s: received %d bytes" % (self.host, len(d)))
|
||||
data += d
|
||||
vvvv("%s: received all of the data, returning" % self.host)
|
||||
return data
|
||||
except socket.timeout:
|
||||
raise AnsibleError("timed out while waiting to receive data")
|
||||
|
||||
def validate_user(self):
|
||||
'''
|
||||
Checks the remote uid of the accelerated daemon vs. the
|
||||
one specified for this play and will cause the accel
|
||||
daemon to exit if they don't match
|
||||
'''
|
||||
|
||||
vvvv("%s: sending request for validate_user" % self.host)
|
||||
data = dict(
|
||||
mode='validate_user',
|
||||
username=self.user,
|
||||
)
|
||||
data = utils.jsonify(data)
|
||||
data = utils.encrypt(self.key, data)
|
||||
if self.send_data(data):
|
||||
raise AnsibleError("Failed to send command to %s" % self.host)
|
||||
|
||||
vvvv("%s: waiting for validate_user response" % self.host)
|
||||
while True:
|
||||
# we loop here while waiting for the response, because a
|
||||
# long running command may cause us to receive keepalive packets
|
||||
# ({"pong":"true"}) rather than the response we want.
|
||||
response = self.recv_data()
|
||||
if not response:
|
||||
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
if "pong" in response:
|
||||
# it's a keepalive, go back to waiting
|
||||
vvvv("%s: received a keepalive packet" % self.host)
|
||||
continue
|
||||
else:
|
||||
vvvv("%s: received the validate_user response: %s" % (self.host, response))
|
||||
break
|
||||
|
||||
if response.get('failed'):
|
||||
return False
|
||||
else:
|
||||
return response.get('rc') == 0
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||
''' run a command on the remote host '''
|
||||
|
||||
if su or su_user:
|
||||
raise AnsibleError("Internal Error: this module does not support running commands via su")
|
||||
|
||||
if in_data:
|
||||
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
if executable == "":
|
||||
executable = constants.DEFAULT_EXECUTABLE
|
||||
|
||||
if self.runner.sudo and sudoable and sudo_user:
|
||||
cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
|
||||
|
||||
vvv("EXEC COMMAND %s" % cmd)
|
||||
|
||||
data = dict(
|
||||
mode='command',
|
||||
cmd=cmd,
|
||||
tmp_path=tmp_path,
|
||||
executable=executable,
|
||||
)
|
||||
data = utils.jsonify(data)
|
||||
data = utils.encrypt(self.key, data)
|
||||
if self.send_data(data):
|
||||
raise AnsibleError("Failed to send command to %s" % self.host)
|
||||
|
||||
while True:
|
||||
# we loop here while waiting for the response, because a
|
||||
# long running command may cause us to receive keepalive packets
|
||||
# ({"pong":"true"}) rather than the response we want.
|
||||
response = self.recv_data()
|
||||
if not response:
|
||||
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
if "pong" in response:
|
||||
# it's a keepalive, go back to waiting
|
||||
vvvv("%s: received a keepalive packet" % self.host)
|
||||
continue
|
||||
else:
|
||||
vvvv("%s: received the response" % self.host)
|
||||
break
|
||||
|
||||
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
|
||||
''' transfer a file from local to remote '''
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
|
||||
if not os.path.exists(in_path):
|
||||
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
|
||||
fd = file(in_path, 'rb')
|
||||
fstat = os.stat(in_path)
|
||||
try:
|
||||
vvv("PUT file is %d bytes" % fstat.st_size)
|
||||
last = False
|
||||
while fd.tell() <= fstat.st_size and not last:
|
||||
vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
|
||||
data = fd.read(CHUNK_SIZE)
|
||||
if fd.tell() >= fstat.st_size:
|
||||
last = True
|
||||
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
|
||||
if self.runner.sudo:
|
||||
data['user'] = self.runner.sudo_user
|
||||
data = utils.jsonify(data)
|
||||
data = utils.encrypt(self.key, data)
|
||||
|
||||
if self.send_data(data):
|
||||
raise AnsibleError("failed to send the file to %s" % self.host)
|
||||
|
||||
response = self.recv_data()
|
||||
if not response:
|
||||
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
|
||||
if response.get('failed',False):
|
||||
raise AnsibleError("failed to put the file in the requested location")
|
||||
finally:
|
||||
fd.close()
|
||||
vvvv("waiting for final response after PUT")
|
||||
response = self.recv_data()
|
||||
if not response:
|
||||
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
|
||||
if response.get('failed',False):
|
||||
raise AnsibleError("failed to put the file in the requested location")
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' save a remote file to the specified path '''
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
|
||||
data = dict(mode='fetch', in_path=in_path)
|
||||
data = utils.jsonify(data)
|
||||
data = utils.encrypt(self.key, data)
|
||||
if self.send_data(data):
|
||||
raise AnsibleError("failed to initiate the file fetch with %s" % self.host)
|
||||
|
||||
fh = open(out_path, "w")
|
||||
try:
|
||||
bytes = 0
|
||||
while True:
|
||||
response = self.recv_data()
|
||||
if not response:
|
||||
raise AnsibleError("Failed to get a response from %s" % self.host)
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
if response.get('failed', False):
|
||||
raise AnsibleError("Error during file fetch, aborting")
|
||||
out = base64.b64decode(response['data'])
|
||||
fh.write(out)
|
||||
bytes += len(out)
|
||||
# send an empty response back to signify we
|
||||
# received the last chunk without errors
|
||||
data = utils.jsonify(dict())
|
||||
data = utils.encrypt(self.key, data)
|
||||
if self.send_data(data):
|
||||
raise AnsibleError("failed to send ack during file fetch")
|
||||
if response.get('last', False):
|
||||
break
|
||||
finally:
|
||||
# we don't currently care about this final response,
|
||||
# we just receive it and drop it. It may be used at some
|
||||
# point in the future or we may just have the put/fetch
|
||||
# operations not send back a final response at all
|
||||
response = self.recv_data()
|
||||
vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
|
||||
fh.close()
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection '''
|
||||
# Be a good citizen
|
||||
try:
|
||||
self.conn.close()
|
||||
except:
|
||||
pass
|
||||
|
@ -0,0 +1,130 @@
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import distutils.spawn
|
||||
import traceback
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from ansible import errors
|
||||
from ansible import utils
|
||||
from ansible.callbacks import vvv
|
||||
|
||||
class Connection(object):
|
||||
''' Local chroot based connections '''
|
||||
|
||||
def __init__(self, runner, host, port, *args, **kwargs):
|
||||
self.chroot = host
|
||||
self.has_pipelining = False
|
||||
|
||||
if os.geteuid() != 0:
|
||||
raise errors.AnsibleError("chroot connection requires running as root")
|
||||
|
||||
# we're running as root on the local system so do some
|
||||
# trivial checks for ensuring 'host' is actually a chroot'able dir
|
||||
if not os.path.isdir(self.chroot):
|
||||
raise errors.AnsibleError("%s is not a directory" % self.chroot)
|
||||
|
||||
chrootsh = os.path.join(self.chroot, 'bin/sh')
|
||||
if not utils.is_executable(chrootsh):
|
||||
raise errors.AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
|
||||
|
||||
self.chroot_cmd = distutils.spawn.find_executable('chroot')
|
||||
if not self.chroot_cmd:
|
||||
raise errors.AnsibleError("chroot command not found in PATH")
|
||||
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
# port is unused, since this is local
|
||||
self.port = port
|
||||
|
||||
def connect(self, port=None):
|
||||
''' connect to the chroot; nothing to do here '''
|
||||
|
||||
vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
|
||||
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||
''' run a command on the chroot '''
|
||||
|
||||
if su or su_user:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
# We enter chroot as root so sudo stuff can be ignored
|
||||
|
||||
if executable:
|
||||
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
|
||||
else:
|
||||
local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd)
|
||||
|
||||
vvv("EXEC %s" % (local_cmd), host=self.chroot)
|
||||
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
|
||||
cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
return (p.returncode, '', stdout, stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to chroot '''
|
||||
|
||||
if not out_path.startswith(os.path.sep):
|
||||
out_path = os.path.join(os.path.sep, out_path)
|
||||
normpath = os.path.normpath(out_path)
|
||||
out_path = os.path.join(self.chroot, normpath[1:])
|
||||
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
if not os.path.exists(in_path):
|
||||
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
try:
|
||||
shutil.copyfile(in_path, out_path)
|
||||
except shutil.Error:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||
except IOError:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from chroot to local '''
|
||||
|
||||
if not in_path.startswith(os.path.sep):
|
||||
in_path = os.path.join(os.path.sep, in_path)
|
||||
normpath = os.path.normpath(in_path)
|
||||
in_path = os.path.join(self.chroot, normpath[1:])
|
||||
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
if not os.path.exists(in_path):
|
||||
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
try:
|
||||
shutil.copyfile(in_path, out_path)
|
||||
except shutil.Error:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||
except IOError:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
@ -0,0 +1,151 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import json
|
||||
import os
|
||||
import base64
|
||||
from ansible.callbacks import vvv
|
||||
from ansible import utils
|
||||
from ansible import errors
|
||||
from ansible import constants
|
||||
|
||||
HAVE_ZMQ=False
|
||||
|
||||
try:
|
||||
import zmq
|
||||
HAVE_ZMQ=True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
class Connection(object):
|
||||
''' ZeroMQ accelerated connection '''
|
||||
|
||||
def __init__(self, runner, host, port, *args, **kwargs):
|
||||
|
||||
self.runner = runner
|
||||
self.has_pipelining = False
|
||||
|
||||
# attempt to work around shared-memory funness
|
||||
if getattr(self.runner, 'aes_keys', None):
|
||||
utils.AES_KEYS = self.runner.aes_keys
|
||||
|
||||
self.host = host
|
||||
self.key = utils.key_for_hostname(host)
|
||||
self.context = None
|
||||
self.socket = None
|
||||
|
||||
if port is None:
|
||||
self.port = constants.ZEROMQ_PORT
|
||||
else:
|
||||
self.port = port
|
||||
|
||||
def connect(self):
|
||||
''' activates the connection object '''
|
||||
|
||||
if not HAVE_ZMQ:
|
||||
raise errors.AnsibleError("zmq is not installed")
|
||||
|
||||
# this is rough/temporary and will likely be optimized later ...
|
||||
self.context = zmq.Context()
|
||||
socket = self.context.socket(zmq.REQ)
|
||||
addr = "tcp://%s:%s" % (self.host, self.port)
|
||||
socket.connect(addr)
|
||||
self.socket = socket
|
||||
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=None):
|
||||
''' run a command on the remote host '''
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
vvv("EXEC COMMAND %s" % cmd)
|
||||
|
||||
if (self.runner.sudo and sudoable) or (self.runner.su and su):
|
||||
raise errors.AnsibleError(
|
||||
"When using fireball, do not specify sudo or su to run your tasks. " +
|
||||
"Instead sudo the fireball action with sudo. " +
|
||||
"Task will communicate with the fireball already running in sudo mode."
|
||||
)
|
||||
|
||||
data = dict(
|
||||
mode='command',
|
||||
cmd=cmd,
|
||||
tmp_path=tmp_path,
|
||||
executable=executable,
|
||||
)
|
||||
data = utils.jsonify(data)
|
||||
data = utils.encrypt(self.key, data)
|
||||
self.socket.send(data)
|
||||
|
||||
response = self.socket.recv()
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
|
||||
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
|
||||
''' transfer a file from local to remote '''
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
|
||||
if not os.path.exists(in_path):
|
||||
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
data = file(in_path).read()
|
||||
data = base64.b64encode(data)
|
||||
|
||||
data = dict(mode='put', data=data, out_path=out_path)
|
||||
# TODO: support chunked file transfer
|
||||
data = utils.jsonify(data)
|
||||
data = utils.encrypt(self.key, data)
|
||||
self.socket.send(data)
|
||||
|
||||
response = self.socket.recv()
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
|
||||
# no meaningful response needed for this
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' save a remote file to the specified path '''
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
|
||||
data = dict(mode='fetch', in_path=in_path)
|
||||
data = utils.jsonify(data)
|
||||
data = utils.encrypt(self.key, data)
|
||||
self.socket.send(data)
|
||||
|
||||
response = self.socket.recv()
|
||||
response = utils.decrypt(self.key, response)
|
||||
response = utils.parse_json(response)
|
||||
response = response['data']
|
||||
response = base64.b64decode(response)
|
||||
|
||||
fh = open(out_path, "w")
|
||||
fh.write(response)
|
||||
fh.close()
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection '''
|
||||
# Be a good citizen
|
||||
try:
|
||||
self.socket.close()
|
||||
self.context.term()
|
||||
except:
|
||||
pass
|
||||
|
@ -0,0 +1,99 @@
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# ---
|
||||
# The func transport permit to use ansible over func. For people who have already setup
|
||||
# func and that wish to play with ansible, this permit to move gradually to ansible
|
||||
# without having to redo completely the setup of the network.
|
||||
|
||||
HAVE_FUNC=False
|
||||
try:
|
||||
import func.overlord.client as fc
|
||||
HAVE_FUNC=True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import os
|
||||
from ansible.callbacks import vvv
|
||||
from ansible import errors
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
|
||||
class Connection(object):
|
||||
''' Func-based connections '''
|
||||
|
||||
def __init__(self, runner, host, port, *args, **kwargs):
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
self.has_pipelining = False
|
||||
# port is unused, this go on func
|
||||
self.port = port
|
||||
|
||||
def connect(self, port=None):
|
||||
if not HAVE_FUNC:
|
||||
raise errors.AnsibleError("func is not installed")
|
||||
|
||||
self.client = fc.Client(self.host)
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False,
|
||||
executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||
''' run a command on the remote minion '''
|
||||
|
||||
if su or su_user:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
vvv("EXEC %s" % (cmd), host=self.host)
|
||||
p = self.client.command.run(cmd)[self.host]
|
||||
return (p[0], '', p[1], p[2])
|
||||
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to remote '''
|
||||
|
||||
out_path = self._normalize_path(out_path, '/')
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
self.client.local.copyfile.send(in_path, out_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from remote to local '''
|
||||
|
||||
in_path = self._normalize_path(in_path, '/')
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
# need to use a tmp dir due to difference of semantic for getfile
|
||||
# ( who take a # directory as destination) and fetch_file, who
|
||||
# take a file directly
|
||||
tmpdir = tempfile.mkdtemp(prefix="func_ansible")
|
||||
self.client.local.getfile.get(in_path, tmpdir)
|
||||
shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)),
|
||||
out_path)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
@ -0,0 +1,151 @@
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import distutils.spawn
|
||||
import traceback
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from ansible import errors
|
||||
from ansible.callbacks import vvv
|
||||
|
||||
class Connection(object):
|
||||
''' Local chroot based connections '''
|
||||
|
||||
def _search_executable(self, executable):
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
raise errors.AnsibleError("%s command not found in PATH") % executable
|
||||
return cmd
|
||||
|
||||
def list_jails(self):
|
||||
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
|
||||
cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
return stdout.split()
|
||||
|
||||
def get_jail_path(self):
|
||||
p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
|
||||
cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
# remove \n
|
||||
return stdout[:-1]
|
||||
|
||||
|
||||
|
||||
def __init__(self, runner, host, port, *args, **kwargs):
|
||||
self.jail = host
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
self.has_pipelining = False
|
||||
|
||||
if os.geteuid() != 0:
|
||||
raise errors.AnsibleError("jail connection requires running as root")
|
||||
|
||||
self.jls_cmd = self._search_executable('jls')
|
||||
self.jexec_cmd = self._search_executable('jexec')
|
||||
|
||||
if not self.jail in self.list_jails():
|
||||
raise errors.AnsibleError("incorrect jail name %s" % self.jail)
|
||||
|
||||
|
||||
self.host = host
|
||||
# port is unused, since this is local
|
||||
self.port = port
|
||||
|
||||
def connect(self, port=None):
|
||||
''' connect to the chroot; nothing to do here '''
|
||||
|
||||
vvv("THIS IS A LOCAL CHROOT DIR", host=self.jail)
|
||||
|
||||
return self
|
||||
|
||||
# a modifier
|
||||
def _generate_cmd(self, executable, cmd):
|
||||
if executable:
|
||||
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
|
||||
else:
|
||||
local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd)
|
||||
return local_cmd
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||
''' run a command on the chroot '''
|
||||
|
||||
if su or su_user:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
# We enter chroot as root so sudo stuff can be ignored
|
||||
local_cmd = self._generate_cmd(executable, cmd)
|
||||
|
||||
vvv("EXEC %s" % (local_cmd), host=self.jail)
|
||||
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
|
||||
cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
return (p.returncode, '', stdout, stderr)
|
||||
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def _copy_file(self, in_path, out_path):
|
||||
if not os.path.exists(in_path):
|
||||
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
try:
|
||||
shutil.copyfile(in_path, out_path)
|
||||
except shutil.Error:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||
except IOError:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to chroot '''
|
||||
|
||||
out_path = self._normalize_path(out_path, self.get_jail_path())
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
|
||||
|
||||
self._copy_file(in_path, out_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from chroot to local '''
|
||||
|
||||
in_path = self._normalize_path(in_path, self.get_jail_path())
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
|
||||
|
||||
self._copy_file(in_path, out_path)
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
@ -0,0 +1,127 @@
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import distutils.spawn
|
||||
import os
|
||||
import subprocess
|
||||
from ansible import errors
|
||||
from ansible.callbacks import vvv
|
||||
|
||||
class Connection(object):
|
||||
''' Local lxc based connections '''
|
||||
|
||||
def _search_executable(self, executable):
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
raise errors.AnsibleError("%s command not found in PATH") % executable
|
||||
return cmd
|
||||
|
||||
def _check_domain(self, domain):
|
||||
p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
if p.returncode:
|
||||
raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
|
||||
|
||||
def __init__(self, runner, host, port, *args, **kwargs):
|
||||
self.lxc = host
|
||||
|
||||
self.cmd = self._search_executable('virsh')
|
||||
|
||||
self._check_domain(host)
|
||||
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
# port is unused, since this is local
|
||||
self.port = port
|
||||
|
||||
def connect(self, port=None):
|
||||
''' connect to the lxc; nothing to do here '''
|
||||
|
||||
vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
|
||||
|
||||
return self
|
||||
|
||||
def _generate_cmd(self, executable, cmd):
|
||||
if executable:
|
||||
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
|
||||
else:
|
||||
local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
|
||||
return local_cmd
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||
''' run a command on the chroot '''
|
||||
|
||||
if su or su_user:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
# We enter lxc as root so sudo stuff can be ignored
|
||||
local_cmd = self._generate_cmd(executable, cmd)
|
||||
|
||||
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
|
||||
cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
return (p.returncode, '', stdout, stderr)
|
||||
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to lxc '''
|
||||
|
||||
out_path = self._normalize_path(out_path, '/')
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
|
||||
|
||||
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
|
||||
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||
|
||||
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = p.communicate(open(in_path,'rb').read())
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from lxc to local '''
|
||||
|
||||
in_path = self._normalize_path(in_path, '/')
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
|
||||
|
||||
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
|
||||
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||
|
||||
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
open(out_path,'wb').write(stdout)
|
||||
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
@ -0,0 +1,138 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import traceback
|
||||
import os
|
||||
import pipes
|
||||
import shutil
|
||||
import subprocess
|
||||
import select
|
||||
import fcntl
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connections import ConnectionBase
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local based connections '''
|
||||
|
||||
def get_transport(self):
|
||||
''' used to identify this connection object '''
|
||||
return 'local'
|
||||
|
||||
def connect(self, port=None):
|
||||
''' connect to the local host; nothing to do here '''
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||
''' run a command on the local host '''
|
||||
|
||||
debug("in local.exec_command()")
|
||||
# su requires to be run from a terminal, and therefore isn't supported here (yet?)
|
||||
if su or su_user:
|
||||
raise AnsibleError("Internal Error: this module does not support running commands via su")
|
||||
|
||||
if in_data:
|
||||
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
# FIXME: su/sudo stuff needs to be generalized
|
||||
#if not self.runner.sudo or not sudoable:
|
||||
# if executable:
|
||||
# local_cmd = executable.split() + ['-c', cmd]
|
||||
# else:
|
||||
# local_cmd = cmd
|
||||
#else:
|
||||
# local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
|
||||
if executable:
|
||||
local_cmd = executable.split() + ['-c', cmd]
|
||||
else:
|
||||
local_cmd = cmd
|
||||
|
||||
executable = executable.split()[0] if executable else None
|
||||
|
||||
self._display.vvv("%s EXEC %s" % (self._host, local_cmd))
|
||||
# FIXME: cwd= needs to be set to the basedir of the playbook
|
||||
debug("opening command with Popen()")
|
||||
p = subprocess.Popen(
|
||||
local_cmd,
|
||||
shell=isinstance(local_cmd, basestring),
|
||||
executable=executable, #cwd=...
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
debug("done running command with Popen()")
|
||||
|
||||
# FIXME: more su/sudo stuff
|
||||
#if self.runner.sudo and sudoable and self.runner.sudo_pass:
|
||||
# fcntl.fcntl(p.stdout, fcntl.F_SETFL,
|
||||
# fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
# fcntl.fcntl(p.stderr, fcntl.F_SETFL,
|
||||
# fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
# sudo_output = ''
|
||||
# while not sudo_output.endswith(prompt) and success_key not in sudo_output:
|
||||
# rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
|
||||
# [p.stdout, p.stderr], self.runner.timeout)
|
||||
# if p.stdout in rfd:
|
||||
# chunk = p.stdout.read()
|
||||
# elif p.stderr in rfd:
|
||||
# chunk = p.stderr.read()
|
||||
# else:
|
||||
# stdout, stderr = p.communicate()
|
||||
# raise AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output)
|
||||
# if not chunk:
|
||||
# stdout, stderr = p.communicate()
|
||||
# raise AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output)
|
||||
# sudo_output += chunk
|
||||
# if success_key not in sudo_output:
|
||||
# p.stdin.write(self.runner.sudo_pass + '\n')
|
||||
# fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
# fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate()
|
||||
debug("done communicating")
|
||||
|
||||
debug("done with local.exec_command()")
|
||||
return (p.returncode, '', stdout, stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to local '''
|
||||
|
||||
#vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
self._display.vvv("%s PUT %s TO %s" % (self._host, in_path, out_path))
|
||||
if not os.path.exists(in_path):
|
||||
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
try:
|
||||
shutil.copyfile(in_path, out_path)
|
||||
except shutil.Error:
|
||||
traceback.print_exc()
|
||||
raise AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
|
||||
except IOError:
|
||||
traceback.print_exc()
|
||||
raise AnsibleError("failed to transfer file to %s" % out_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
#vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
self._display.vvv("%s FETCH %s TO %s" % (self._host, in_path, out_path))
|
||||
''' fetch a file from local to local -- for copatibility '''
|
||||
self.put_file(in_path, out_path)
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
@ -0,0 +1,417 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
# ---
|
||||
# The paramiko transport is provided because many distributions, in particular EL6 and before
|
||||
# do not support ControlPersist in their SSH implementations. This is needed on the Ansible
|
||||
# control machine to be reasonably efficient with connections. Thus paramiko is faster
|
||||
# for most users on these platforms. Users with ControlPersist capability can consider
|
||||
# using -c ssh or configuring the transport in ansible.cfg.
|
||||
|
||||
import warnings
|
||||
import os
|
||||
import pipes
|
||||
import socket
|
||||
import random
|
||||
import logging
|
||||
import tempfile
|
||||
import traceback
|
||||
import fcntl
|
||||
import re
|
||||
import sys
|
||||
from termios import tcflush, TCIFLUSH
|
||||
from binascii import hexlify
|
||||
from ansible.callbacks import vvv
|
||||
from ansible import errors
|
||||
from ansible import utils
|
||||
from ansible import constants as C
|
||||
|
||||
AUTHENTICITY_MSG="""
|
||||
paramiko: The authenticity of host '%s' can't be established.
|
||||
The %s key fingerprint is %s.
|
||||
Are you sure you want to continue connecting (yes/no)?
|
||||
"""
|
||||
|
||||
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
|
||||
HAVE_PARAMIKO=False
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
try:
|
||||
import paramiko
|
||||
HAVE_PARAMIKO=True
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
class MyAddPolicy(object):
|
||||
"""
|
||||
Based on AutoAddPolicy in paramiko so we can determine when keys are added
|
||||
and also prompt for input.
|
||||
|
||||
Policy for automatically adding the hostname and new host key to the
|
||||
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
|
||||
"""
|
||||
|
||||
def __init__(self, runner):
|
||||
self.runner = runner
|
||||
|
||||
def missing_host_key(self, client, hostname, key):
|
||||
|
||||
if C.HOST_KEY_CHECKING:
|
||||
|
||||
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
|
||||
|
||||
old_stdin = sys.stdin
|
||||
sys.stdin = self.runner._new_stdin
|
||||
fingerprint = hexlify(key.get_fingerprint())
|
||||
ktype = key.get_name()
|
||||
|
||||
# clear out any premature input on sys.stdin
|
||||
tcflush(sys.stdin, TCIFLUSH)
|
||||
|
||||
inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
|
||||
sys.stdin = old_stdin
|
||||
if inp not in ['yes','y','']:
|
||||
fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
|
||||
fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||
raise errors.AnsibleError("host connection rejected by user")
|
||||
|
||||
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
|
||||
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||
|
||||
|
||||
key._added_by_ansible_this_time = True
|
||||
|
||||
# existing implementation below:
|
||||
client._host_keys.add(hostname, key.get_name(), key)
|
||||
|
||||
# host keys are actually saved in close() function below
|
||||
# in order to control ordering.
|
||||
|
||||
|
||||
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
|
||||
|
||||
SSH_CONNECTION_CACHE = {}
|
||||
SFTP_CONNECTION_CACHE = {}
|
||||
|
||||
class Connection(object):
|
||||
''' SSH based connections with Paramiko '''
|
||||
|
||||
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
|
||||
|
||||
self.ssh = None
|
||||
self.sftp = None
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
self.port = port or 22
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.private_key_file = private_key_file
|
||||
self.has_pipelining = False
|
||||
|
||||
def _cache_key(self):
|
||||
return "%s__%s__" % (self.host, self.user)
|
||||
|
||||
def connect(self):
|
||||
cache_key = self._cache_key()
|
||||
if cache_key in SSH_CONNECTION_CACHE:
|
||||
self.ssh = SSH_CONNECTION_CACHE[cache_key]
|
||||
else:
|
||||
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
|
||||
return self
|
||||
|
||||
def _connect_uncached(self):
|
||||
''' activates the connection object '''
|
||||
|
||||
if not HAVE_PARAMIKO:
|
||||
raise errors.AnsibleError("paramiko is not installed")
|
||||
|
||||
vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host)
|
||||
|
||||
ssh = paramiko.SSHClient()
|
||||
|
||||
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
|
||||
|
||||
if C.HOST_KEY_CHECKING:
|
||||
ssh.load_system_host_keys()
|
||||
|
||||
ssh.set_missing_host_key_policy(MyAddPolicy(self.runner))
|
||||
|
||||
allow_agent = True
|
||||
|
||||
if self.password is not None:
|
||||
allow_agent = False
|
||||
|
||||
try:
|
||||
|
||||
if self.private_key_file:
|
||||
key_filename = os.path.expanduser(self.private_key_file)
|
||||
elif self.runner.private_key_file:
|
||||
key_filename = os.path.expanduser(self.runner.private_key_file)
|
||||
else:
|
||||
key_filename = None
|
||||
ssh.connect(self.host, username=self.user, allow_agent=allow_agent, look_for_keys=True,
|
||||
key_filename=key_filename, password=self.password,
|
||||
timeout=self.runner.timeout, port=self.port)
|
||||
|
||||
except Exception, e:
|
||||
|
||||
msg = str(e)
|
||||
if "PID check failed" in msg:
|
||||
raise errors.AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
|
||||
elif "Private key file is encrypted" in msg:
|
||||
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
|
||||
self.user, self.host, self.port, msg)
|
||||
raise errors.AnsibleConnectionFailed(msg)
|
||||
else:
|
||||
raise errors.AnsibleConnectionFailed(msg)
|
||||
|
||||
return ssh
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
|
||||
''' run a command on the remote host '''
|
||||
|
||||
if in_data:
|
||||
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
bufsize = 4096
|
||||
|
||||
try:
|
||||
|
||||
self.ssh.get_transport().set_keepalive(5)
|
||||
chan = self.ssh.get_transport().open_session()
|
||||
|
||||
except Exception, e:
|
||||
|
||||
msg = "Failed to open session"
|
||||
if len(str(e)) > 0:
|
||||
msg += ": %s" % str(e)
|
||||
raise errors.AnsibleConnectionFailed(msg)
|
||||
|
||||
no_prompt_out = ''
|
||||
no_prompt_err = ''
|
||||
if not (self.runner.sudo and sudoable) and not (self.runner.su and su):
|
||||
|
||||
if executable:
|
||||
quoted_command = executable + ' -c ' + pipes.quote(cmd)
|
||||
else:
|
||||
quoted_command = cmd
|
||||
vvv("EXEC %s" % quoted_command, host=self.host)
|
||||
chan.exec_command(quoted_command)
|
||||
|
||||
else:
|
||||
|
||||
# sudo usually requires a PTY (cf. requiretty option), therefore
|
||||
# we give it one by default (pty=True in ansble.cfg), and we try
|
||||
# to initialise from the calling environment
|
||||
if C.PARAMIKO_PTY:
|
||||
chan.get_pty(term=os.getenv('TERM', 'vt100'),
|
||||
width=int(os.getenv('COLUMNS', 0)),
|
||||
height=int(os.getenv('LINES', 0)))
|
||||
if self.runner.sudo or sudoable:
|
||||
shcmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
|
||||
elif self.runner.su or su:
|
||||
shcmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
|
||||
|
||||
vvv("EXEC %s" % shcmd, host=self.host)
|
||||
sudo_output = ''
|
||||
|
||||
try:
|
||||
|
||||
chan.exec_command(shcmd)
|
||||
|
||||
if self.runner.sudo_pass or self.runner.su_pass:
|
||||
|
||||
while True:
|
||||
|
||||
if success_key in sudo_output or \
|
||||
(self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
|
||||
(self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
|
||||
break
|
||||
chunk = chan.recv(bufsize)
|
||||
|
||||
if not chunk:
|
||||
if 'unknown user' in sudo_output:
|
||||
raise errors.AnsibleError(
|
||||
'user %s does not exist' % sudo_user)
|
||||
else:
|
||||
raise errors.AnsibleError('ssh connection ' +
|
||||
'closed waiting for password prompt')
|
||||
sudo_output += chunk
|
||||
|
||||
if success_key not in sudo_output:
|
||||
|
||||
if sudoable:
|
||||
chan.sendall(self.runner.sudo_pass + '\n')
|
||||
elif su:
|
||||
chan.sendall(self.runner.su_pass + '\n')
|
||||
else:
|
||||
no_prompt_out += sudo_output
|
||||
no_prompt_err += sudo_output
|
||||
|
||||
except socket.timeout:
|
||||
|
||||
raise errors.AnsibleError('ssh timed out waiting for sudo.\n' + sudo_output)
|
||||
|
||||
stdout = ''.join(chan.makefile('rb', bufsize))
|
||||
stderr = ''.join(chan.makefile_stderr('rb', bufsize))
|
||||
|
||||
return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to remote '''
|
||||
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
|
||||
if not os.path.exists(in_path):
|
||||
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
|
||||
try:
|
||||
self.sftp = self.ssh.open_sftp()
|
||||
except Exception, e:
|
||||
raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e)
|
||||
|
||||
try:
|
||||
self.sftp.put(in_path, out_path)
|
||||
except IOError:
|
||||
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||
|
||||
def _connect_sftp(self):
|
||||
|
||||
cache_key = "%s__%s__" % (self.host, self.user)
|
||||
if cache_key in SFTP_CONNECTION_CACHE:
|
||||
return SFTP_CONNECTION_CACHE[cache_key]
|
||||
else:
|
||||
result = SFTP_CONNECTION_CACHE[cache_key] = self.connect().ssh.open_sftp()
|
||||
return result
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' save a remote file to the specified path '''
|
||||
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
|
||||
try:
|
||||
self.sftp = self._connect_sftp()
|
||||
except Exception, e:
|
||||
raise errors.AnsibleError("failed to open a SFTP connection (%s)", e)
|
||||
|
||||
try:
|
||||
self.sftp.get(in_path, out_path)
|
||||
except IOError:
|
||||
raise errors.AnsibleError("failed to transfer file from %s" % in_path)
|
||||
|
||||
def _any_keys_added(self):
|
||||
|
||||
added_any = False
|
||||
for hostname, keys in self.ssh._host_keys.iteritems():
|
||||
for keytype, key in keys.iteritems():
|
||||
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
|
||||
if added_this_time:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _save_ssh_host_keys(self, filename):
|
||||
'''
|
||||
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
|
||||
don't complain about it :)
|
||||
'''
|
||||
|
||||
if not self._any_keys_added():
|
||||
return False
|
||||
|
||||
path = os.path.expanduser("~/.ssh")
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
f = open(filename, 'w')
|
||||
|
||||
for hostname, keys in self.ssh._host_keys.iteritems():
|
||||
|
||||
for keytype, key in keys.iteritems():
|
||||
|
||||
# was f.write
|
||||
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
|
||||
if not added_this_time:
|
||||
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
|
||||
|
||||
for hostname, keys in self.ssh._host_keys.iteritems():
|
||||
|
||||
for keytype, key in keys.iteritems():
|
||||
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
|
||||
if added_this_time:
|
||||
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
|
||||
|
||||
f.close()
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection '''
|
||||
|
||||
cache_key = self._cache_key()
|
||||
SSH_CONNECTION_CACHE.pop(cache_key, None)
|
||||
SFTP_CONNECTION_CACHE.pop(cache_key, None)
|
||||
|
||||
if self.sftp is not None:
|
||||
self.sftp.close()
|
||||
|
||||
if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
|
||||
|
||||
# add any new SSH host keys -- warning -- this could be slow
|
||||
lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
|
||||
dirname = os.path.dirname(self.keyfile)
|
||||
if not os.path.exists(dirname):
|
||||
os.makedirs(dirname)
|
||||
|
||||
KEY_LOCK = open(lockfile, 'w')
|
||||
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
|
||||
|
||||
try:
|
||||
# just in case any were added recently
|
||||
|
||||
self.ssh.load_system_host_keys()
|
||||
self.ssh._host_keys.update(self.ssh._system_host_keys)
|
||||
|
||||
# gather information about the current key file, so
|
||||
# we can ensure the new file has the correct mode/owner
|
||||
|
||||
key_dir = os.path.dirname(self.keyfile)
|
||||
key_stat = os.stat(self.keyfile)
|
||||
|
||||
# Save the new keys to a temporary file and move it into place
|
||||
# rather than rewriting the file. We set delete=False because
|
||||
# the file will be moved into place rather than cleaned up.
|
||||
|
||||
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
|
||||
os.chmod(tmp_keyfile.name, key_stat.st_mode & 07777)
|
||||
os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid)
|
||||
|
||||
self._save_ssh_host_keys(tmp_keyfile.name)
|
||||
tmp_keyfile.close()
|
||||
|
||||
os.rename(tmp_keyfile.name, self.keyfile)
|
||||
|
||||
except:
|
||||
|
||||
# unable to save keys, including scenario when key was invalid
|
||||
# and caught earlier
|
||||
traceback.print_exc()
|
||||
pass
|
||||
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
|
||||
|
||||
self.ssh.close()
|
||||
|
@ -0,0 +1,487 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import shlex
|
||||
import pipes
|
||||
import random
|
||||
import select
|
||||
import fcntl
|
||||
import hmac
|
||||
import pwd
|
||||
import gettext
|
||||
import pty
|
||||
from hashlib import sha1
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
from ansible.plugins.connections import ConnectionBase
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' ssh based connections '''
|
||||
|
||||
def __init__(self, host, connection_info, *args, **kwargs):
|
||||
super(Connection, self).__init__(host, connection_info)
|
||||
|
||||
# SSH connection specific init stuff
|
||||
self.HASHED_KEY_MAGIC = "|1|"
|
||||
self._has_pipelining = True
|
||||
|
||||
# FIXME: move the lockfile locations to ActionBase?
|
||||
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||
#self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
|
||||
self._cp_dir = '/tmp'
|
||||
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||
|
||||
def get_transport(self):
|
||||
''' used to identify this connection object from other classes '''
|
||||
return 'ssh'
|
||||
|
||||
def connect(self):
|
||||
''' connect to the remote host '''
|
||||
|
||||
self._display.vvv("ESTABLISH CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._host)
|
||||
|
||||
self._common_args = []
|
||||
extra_args = C.ANSIBLE_SSH_ARGS
|
||||
if extra_args is not None:
|
||||
# make sure there is no empty string added as this can produce weird errors
|
||||
self._common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
|
||||
else:
|
||||
self._common_args += [
|
||||
"-o", "ControlMaster=auto",
|
||||
"-o", "ControlPersist=60s",
|
||||
"-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)),
|
||||
]
|
||||
|
||||
cp_in_use = False
|
||||
cp_path_set = False
|
||||
for arg in self._common_args:
|
||||
if "ControlPersist" in arg:
|
||||
cp_in_use = True
|
||||
if "ControlPath" in arg:
|
||||
cp_path_set = True
|
||||
|
||||
if cp_in_use and not cp_path_set:
|
||||
self._common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir))]
|
||||
|
||||
if not C.HOST_KEY_CHECKING:
|
||||
self._common_args += ["-o", "StrictHostKeyChecking=no"]
|
||||
|
||||
if self._connection_info.port is not None:
|
||||
self._common_args += ["-o", "Port=%d" % (self._connection_info.port)]
|
||||
#if self.private_key_file is not None:
|
||||
# self._common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
|
||||
#elif self.runner.private_key_file is not None:
|
||||
# self._common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
|
||||
if self._connection_info.password:
|
||||
self._common_args += ["-o", "GSSAPIAuthentication=no",
|
||||
"-o", "PubkeyAuthentication=no"]
|
||||
else:
|
||||
self._common_args += ["-o", "KbdInteractiveAuthentication=no",
|
||||
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
|
||||
"-o", "PasswordAuthentication=no"]
|
||||
if self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]:
|
||||
self._common_args += ["-o", "User="+self._connection_info.remote_user]
|
||||
# FIXME: figure out where this goes
|
||||
#self._common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
|
||||
self._common_args += ["-o", "ConnectTimeout=15"]
|
||||
|
||||
return self
|
||||
|
||||
def _run(self, cmd, indata):
|
||||
if indata:
|
||||
# do not use pseudo-pty
|
||||
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdin = p.stdin
|
||||
else:
|
||||
# try to use upseudo-pty
|
||||
try:
|
||||
# Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
|
||||
master, slave = pty.openpty()
|
||||
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdin = os.fdopen(master, 'w', 0)
|
||||
os.close(slave)
|
||||
except:
|
||||
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdin = p.stdin
|
||||
|
||||
return (p, stdin)
|
||||
|
||||
def _password_cmd(self):
|
||||
if self._connection_info.password:
|
||||
try:
|
||||
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
except OSError:
|
||||
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
|
||||
(self.rfd, self.wfd) = os.pipe()
|
||||
return ["sshpass", "-d%d" % self.rfd]
|
||||
return []
|
||||
|
||||
def _send_password(self):
|
||||
if self._connection_info.password:
|
||||
os.close(self.rfd)
|
||||
os.write(self.wfd, "%s\n" % self._connection_info.password)
|
||||
os.close(self.wfd)
|
||||
|
||||
def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
# We can't use p.communicate here because the ControlMaster may have stdout open as well
|
||||
stdout = ''
|
||||
stderr = ''
|
||||
rpipes = [p.stdout, p.stderr]
|
||||
if indata:
|
||||
try:
|
||||
stdin.write(indata)
|
||||
stdin.close()
|
||||
except:
|
||||
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
|
||||
# Read stdout/stderr from process
|
||||
while True:
|
||||
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
|
||||
|
||||
# FIXME: su/sudo stuff
|
||||
# fail early if the sudo/su password is wrong
|
||||
#if self.runner.sudo and sudoable:
|
||||
# if self.runner.sudo_pass:
|
||||
# incorrect_password = gettext.dgettext(
|
||||
# "sudo", "Sorry, try again.")
|
||||
# if stdout.endswith("%s\r\n%s" % (incorrect_password,
|
||||
# prompt)):
|
||||
# raise AnsibleError('Incorrect sudo password')
|
||||
#
|
||||
# if stdout.endswith(prompt):
|
||||
# raise AnsibleError('Missing sudo password')
|
||||
#
|
||||
#if self.runner.su and su and self.runner.su_pass:
|
||||
# incorrect_password = gettext.dgettext(
|
||||
# "su", "Sorry")
|
||||
# if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
|
||||
# raise AnsibleError('Incorrect su password')
|
||||
|
||||
if p.stdout in rfd:
|
||||
dat = os.read(p.stdout.fileno(), 9000)
|
||||
stdout += dat
|
||||
if dat == '':
|
||||
rpipes.remove(p.stdout)
|
||||
if p.stderr in rfd:
|
||||
dat = os.read(p.stderr.fileno(), 9000)
|
||||
stderr += dat
|
||||
if dat == '':
|
||||
rpipes.remove(p.stderr)
|
||||
# only break out if no pipes are left to read or
|
||||
# the pipes are completely read and
|
||||
# the process is terminated
|
||||
if (not rpipes or not rfd) and p.poll() is not None:
|
||||
break
|
||||
# No pipes are left to read but process is not yet terminated
|
||||
# Only then it is safe to wait for the process to be finished
|
||||
# NOTE: Actually p.poll() is always None here if rpipes is empty
|
||||
elif not rpipes and p.poll() == None:
|
||||
p.wait()
|
||||
# The process is terminated. Since no pipes to read from are
|
||||
# left, there is no need to call select() again.
|
||||
break
|
||||
# close stdin after process is terminated and stdout/stderr are read
|
||||
# completely (see also issue #848)
|
||||
stdin.close()
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def not_in_host_file(self, host):
|
||||
if 'USER' in os.environ:
|
||||
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
|
||||
else:
|
||||
user_host_file = "~/.ssh/known_hosts"
|
||||
user_host_file = os.path.expanduser(user_host_file)
|
||||
|
||||
host_file_list = []
|
||||
host_file_list.append(user_host_file)
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts")
|
||||
host_file_list.append("/etc/ssh/ssh_known_hosts2")
|
||||
|
||||
hfiles_not_found = 0
|
||||
for hf in host_file_list:
|
||||
if not os.path.exists(hf):
|
||||
hfiles_not_found += 1
|
||||
continue
|
||||
try:
|
||||
host_fh = open(hf)
|
||||
except IOError, e:
|
||||
hfiles_not_found += 1
|
||||
continue
|
||||
else:
|
||||
data = host_fh.read()
|
||||
host_fh.close()
|
||||
|
||||
for line in data.split("\n"):
|
||||
if line is None or " " not in line:
|
||||
continue
|
||||
tokens = line.split()
|
||||
if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
|
||||
# this is a hashed known host entry
|
||||
try:
|
||||
(kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
|
||||
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
|
||||
hash.update(host)
|
||||
if hash.digest() == kn_host.decode('base64'):
|
||||
return False
|
||||
except:
|
||||
# invalid hashed host key, skip it
|
||||
continue
|
||||
else:
|
||||
# standard host file entry
|
||||
if host in tokens[0]:
|
||||
return False
|
||||
|
||||
if (hfiles_not_found == len(host_file_list)):
|
||||
self._display.vvv("EXEC previous known host file not found for %s" % host)
|
||||
return True
|
||||
|
||||
def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoable=False):
|
||||
''' run a command on the remote host '''
|
||||
|
||||
ssh_cmd = self._password_cmd()
|
||||
ssh_cmd += ["ssh", "-C"]
|
||||
if not in_data:
|
||||
# we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
|
||||
# inside a tty automatically invokes the python interactive-mode but the modules are not
|
||||
# compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
|
||||
ssh_cmd += ["-tt"]
|
||||
# FIXME: verbosity needs to move, most likely into connection info or
|
||||
# whatever other context we pass around instead of runner objects
|
||||
#if utils.VERBOSITY > 3:
|
||||
# ssh_cmd += ["-vvv"]
|
||||
#else:
|
||||
# ssh_cmd += ["-q"]
|
||||
ssh_cmd += ["-q"]
|
||||
ssh_cmd += self._common_args
|
||||
|
||||
#if self._ipv6:
|
||||
# ssh_cmd += ['-6']
|
||||
ssh_cmd += [self._host.ipv4_address]
|
||||
|
||||
if not (self._connection_info.sudo or self._connection_info.su) or not sudoable:
|
||||
prompt = None
|
||||
if executable:
|
||||
ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
|
||||
else:
|
||||
ssh_cmd.append(cmd)
|
||||
elif self._connection_info.su and self._connection_info.su_user:
|
||||
su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd)
|
||||
ssh_cmd.append(su_cmd)
|
||||
else:
|
||||
# FIXME: hard-coded sudo_exe here
|
||||
sudo_cmd, prompt, success_key = self._connection_info.make_sudo_cmd('/usr/bin/sudo', executable, cmd)
|
||||
ssh_cmd.append(sudo_cmd)
|
||||
|
||||
self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._host)
|
||||
|
||||
not_in_host_file = self.not_in_host_file(self._host.get_name())
|
||||
|
||||
# FIXME: move the locations of these lock files, same as init above
|
||||
#if C.HOST_KEY_CHECKING and not_in_host_file:
|
||||
# # lock around the initial SSH connectivity so the user prompt about whether to add
|
||||
# # the host to known hosts is not intermingled with multiprocess output.
|
||||
# fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||
# fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
|
||||
|
||||
# create process
|
||||
(p, stdin) = self._run(ssh_cmd, in_data)
|
||||
|
||||
self._send_password()
|
||||
|
||||
no_prompt_out = ''
|
||||
no_prompt_err = ''
|
||||
# FIXME: su/sudo stuff
|
||||
#if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
|
||||
# (self.runner.su and su and self.runner.su_pass):
|
||||
# # several cases are handled for sudo privileges with password
|
||||
# # * NOPASSWD (tty & no-tty): detect success_key on stdout
|
||||
# # * without NOPASSWD:
|
||||
# # * detect prompt on stdout (tty)
|
||||
# # * detect prompt on stderr (no-tty)
|
||||
# fcntl.fcntl(p.stdout, fcntl.F_SETFL,
|
||||
# fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
# fcntl.fcntl(p.stderr, fcntl.F_SETFL,
|
||||
# fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
# sudo_output = ''
|
||||
# sudo_errput = ''
|
||||
#
|
||||
# while True:
|
||||
# if success_key in sudo_output or \
|
||||
# (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
|
||||
# (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
|
||||
# break
|
||||
self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._host)
|
||||
|
||||
not_in_host_file = self.not_in_host_file(self._host.get_name())
|
||||
|
||||
# FIXME: file locations
|
||||
#if C.HOST_KEY_CHECKING and not_in_host_file:
|
||||
# # lock around the initial SSH connectivity so the user prompt about whether to add
|
||||
# # the host to known hosts is not intermingled with multiprocess output.
|
||||
# fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
|
||||
# fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
|
||||
|
||||
# create process
|
||||
(p, stdin) = self._run(ssh_cmd, in_data)
|
||||
|
||||
self._send_password()
|
||||
|
||||
no_prompt_out = ''
|
||||
no_prompt_err = ''
|
||||
# FIXME: su/sudo stuff
|
||||
#if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
|
||||
# (self.runner.su and su and self.runner.su_pass):
|
||||
# # several cases are handled for sudo privileges with password
|
||||
# # * NOPASSWD (tty & no-tty): detect success_key on stdout
|
||||
# # * without NOPASSWD:
|
||||
# # * detect prompt on stdout (tty)
|
||||
# # * detect prompt on stderr (no-tty)
|
||||
# fcntl.fcntl(p.stdout, fcntl.F_SETFL,
|
||||
# fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
# fcntl.fcntl(p.stderr, fcntl.F_SETFL,
|
||||
# fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
# sudo_output = ''
|
||||
# sudo_errput = ''
|
||||
#
|
||||
# while True:
|
||||
# if success_key in sudo_output or \
|
||||
# (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
|
||||
# (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
|
||||
# break
|
||||
#
|
||||
# rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
|
||||
# [p.stdout], self.runner.timeout)
|
||||
# if p.stderr in rfd:
|
||||
# chunk = p.stderr.read()
|
||||
# if not chunk:
|
||||
# raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
|
||||
# sudo_errput += chunk
|
||||
# incorrect_password = gettext.dgettext(
|
||||
# "sudo", "Sorry, try again.")
|
||||
# if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
|
||||
# raise AnsibleError('Incorrect sudo password')
|
||||
# elif sudo_errput.endswith(prompt):
|
||||
# stdin.write(self.runner.sudo_pass + '\n')
|
||||
#
|
||||
# if p.stdout in rfd:
|
||||
# chunk = p.stdout.read()
|
||||
# if not chunk:
|
||||
# raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
|
||||
# sudo_output += chunk
|
||||
#
|
||||
# if not rfd:
|
||||
# # timeout. wrap up process communication
|
||||
# stdout = p.communicate()
|
||||
# raise AnsibleError('ssh connection error waiting for sudo or su password prompt')
|
||||
#
|
||||
# if success_key not in sudo_output:
|
||||
# if sudoable:
|
||||
# stdin.write(self.runner.sudo_pass + '\n')
|
||||
# elif su:
|
||||
# stdin.write(self.runner.su_pass + '\n')
|
||||
# else:
|
||||
# no_prompt_out += sudo_output
|
||||
# no_prompt_err += sudo_errput
|
||||
|
||||
#(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
|
||||
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt)
|
||||
|
||||
#if C.HOST_KEY_CHECKING and not_in_host_file:
|
||||
# # lock around the initial SSH connectivity so the user prompt about whether to add
|
||||
# # the host to known hosts is not intermingled with multiprocess output.
|
||||
# fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
|
||||
# fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
|
||||
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
|
||||
|
||||
if C.HOST_KEY_CHECKING:
|
||||
if ssh_cmd[0] == "sshpass" and p.returncode == 6:
|
||||
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
|
||||
|
||||
if p.returncode != 0 and controlpersisterror:
|
||||
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
|
||||
# FIXME: module name isn't in runner
|
||||
#if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
|
||||
if p.returncode == 255 and in_data:
|
||||
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
|
||||
|
||||
return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to remote '''
|
||||
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._host)
|
||||
if not os.path.exists(in_path):
|
||||
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
cmd = self._password_cmd()
|
||||
|
||||
# FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
|
||||
host = self._host.ipv4_address
|
||||
#if self._ipv6:
|
||||
# host = '[%s]' % host
|
||||
|
||||
if C.DEFAULT_SCP_IF_SSH:
|
||||
cmd += ["scp"] + self._common_args
|
||||
cmd += [in_path,host + ":" + pipes.quote(out_path)]
|
||||
indata = None
|
||||
else:
|
||||
cmd += ["sftp"] + self._common_args + [host]
|
||||
indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
|
||||
|
||||
(p, stdin) = self._run(cmd, indata)
|
||||
|
||||
self._send_password()
|
||||
|
||||
(returncode, stdout, stderr) = self._communicate(p, stdin, indata)
|
||||
|
||||
if returncode != 0:
|
||||
raise AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from remote to local '''
|
||||
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._host)
|
||||
cmd = self._password_cmd()
|
||||
|
||||
# FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
|
||||
host = self._host.ipv4_address
|
||||
#if self._ipv6:
|
||||
# host = '[%s]' % self._host
|
||||
|
||||
if C.DEFAULT_SCP_IF_SSH:
|
||||
cmd += ["scp"] + self._common_args
|
||||
cmd += [host + ":" + in_path, out_path]
|
||||
indata = None
|
||||
else:
|
||||
cmd += ["sftp"] + self._common_args + [host]
|
||||
indata = "get %s %s\n" % (in_path, out_path)
|
||||
|
||||
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
self._send_password()
|
||||
stdout, stderr = p.communicate(indata)
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
|
||||
|
||||
def close(self):
|
||||
''' not applicable since we're executing openssh binaries '''
|
||||
pass
|
||||
|
@ -0,0 +1,258 @@
|
||||
# (c) 2014, Chris Church <chris@ninemoreminutes.com>
|
||||
#
|
||||
# This file is part of Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import imp
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import traceback
|
||||
import urlparse
|
||||
from ansible import errors
|
||||
from ansible import utils
|
||||
from ansible.callbacks import vvv, vvvv, verbose
|
||||
from ansible.runner.shell_plugins import powershell
|
||||
|
||||
try:
|
||||
from winrm import Response
|
||||
from winrm.exceptions import WinRMTransportError
|
||||
from winrm.protocol import Protocol
|
||||
except ImportError:
|
||||
raise errors.AnsibleError("winrm is not installed")
|
||||
|
||||
_winrm_cache = {
|
||||
# 'user:pwhash@host:port': <protocol instance>
|
||||
}
|
||||
|
||||
def vvvvv(msg, host=None):
|
||||
verbose(msg, host=host, caplevel=4)
|
||||
|
||||
class Connection(object):
|
||||
'''WinRM connections over HTTP/HTTPS.'''
|
||||
|
||||
def __init__(self, runner, host, port, user, password, *args, **kwargs):
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.has_pipelining = False
|
||||
self.default_shell = 'powershell'
|
||||
self.default_suffixes = ['.ps1', '']
|
||||
self.protocol = None
|
||||
self.shell_id = None
|
||||
self.delegate = None
|
||||
|
||||
def _winrm_connect(self):
|
||||
'''
|
||||
Establish a WinRM connection over HTTP/HTTPS.
|
||||
'''
|
||||
port = self.port or 5986
|
||||
vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
|
||||
(self.user, port, self.host), host=self.host)
|
||||
netloc = '%s:%d' % (self.host, port)
|
||||
cache_key = '%s:%s@%s:%d' % (self.user, hashlib.md5(self.password).hexdigest(), self.host, port)
|
||||
if cache_key in _winrm_cache:
|
||||
vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host)
|
||||
return _winrm_cache[cache_key]
|
||||
transport_schemes = [('plaintext', 'https'), ('plaintext', 'http')] # FIXME: ssl/kerberos
|
||||
if port == 5985:
|
||||
transport_schemes = reversed(transport_schemes)
|
||||
exc = None
|
||||
for transport, scheme in transport_schemes:
|
||||
endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
|
||||
vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
|
||||
host=self.host)
|
||||
protocol = Protocol(endpoint, transport=transport,
|
||||
username=self.user, password=self.password)
|
||||
try:
|
||||
protocol.send_message('')
|
||||
_winrm_cache[cache_key] = protocol
|
||||
return protocol
|
||||
except WinRMTransportError, exc:
|
||||
err_msg = str(exc)
|
||||
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
|
||||
raise errors.AnsibleError("the connection attempt timed out")
|
||||
m = re.search(r'Code\s+?(\d{3})', err_msg)
|
||||
if m:
|
||||
code = int(m.groups()[0])
|
||||
if code == 401:
|
||||
raise errors.AnsibleError("the username/password specified for this server was incorrect")
|
||||
elif code == 411:
|
||||
_winrm_cache[cache_key] = protocol
|
||||
return protocol
|
||||
vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
|
||||
continue
|
||||
if exc:
|
||||
raise errors.AnsibleError(str(exc))
|
||||
|
||||
def _winrm_exec(self, command, args=(), from_exec=False):
|
||||
if from_exec:
|
||||
vvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
|
||||
else:
|
||||
vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
|
||||
if not self.protocol:
|
||||
self.protocol = self._winrm_connect()
|
||||
if not self.shell_id:
|
||||
self.shell_id = self.protocol.open_shell()
|
||||
command_id = None
|
||||
try:
|
||||
command_id = self.protocol.run_command(self.shell_id, command, args)
|
||||
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
|
||||
if from_exec:
|
||||
vvvv('WINRM RESULT %r' % response, host=self.host)
|
||||
else:
|
||||
vvvvv('WINRM RESULT %r' % response, host=self.host)
|
||||
vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host)
|
||||
vvvvv('WINRM STDERR %s' % response.std_err, host=self.host)
|
||||
return response
|
||||
finally:
|
||||
if command_id:
|
||||
self.protocol.cleanup_command(self.shell_id, command_id)
|
||||
|
||||
def connect(self):
|
||||
if not self.protocol:
|
||||
self.protocol = self._winrm_connect()
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None):
|
||||
cmd = cmd.encode('utf-8')
|
||||
cmd_parts = shlex.split(cmd, posix=False)
|
||||
if '-EncodedCommand' in cmd_parts:
|
||||
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
|
||||
decoded_cmd = base64.b64decode(encoded_cmd)
|
||||
vvv("EXEC %s" % decoded_cmd, host=self.host)
|
||||
else:
|
||||
vvv("EXEC %s" % cmd, host=self.host)
|
||||
# For script/raw support.
|
||||
if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
|
||||
script = powershell._build_file_cmd(cmd_parts)
|
||||
cmd_parts = powershell._encode_script(script, as_list=True)
|
||||
try:
|
||||
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
|
||||
except Exception, e:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to exec cmd %s" % cmd)
|
||||
return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
|
||||
if not os.path.exists(in_path):
|
||||
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
with open(in_path) as in_file:
|
||||
in_size = os.path.getsize(in_path)
|
||||
script_template = '''
|
||||
$s = [System.IO.File]::OpenWrite("%s");
|
||||
[void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
|
||||
$b = [System.Convert]::FromBase64String("%s");
|
||||
[void]$s.Write($b, 0, $b.length);
|
||||
[void]$s.SetLength(%d);
|
||||
[void]$s.Close();
|
||||
'''
|
||||
# Determine max size of data we can pass per command.
|
||||
script = script_template % (powershell._escape(out_path), in_size, '', in_size)
|
||||
cmd = powershell._encode_script(script)
|
||||
# Encode script with no data, subtract its length from 8190 (max
|
||||
# windows command length), divide by 2.67 (UTF16LE base64 command
|
||||
# encoding), then by 1.35 again (data base64 encoding).
|
||||
buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
|
||||
for offset in xrange(0, in_size, buffer_size):
|
||||
try:
|
||||
out_data = in_file.read(buffer_size)
|
||||
if offset == 0:
|
||||
if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
|
||||
out_path = out_path + '.ps1'
|
||||
b64_data = base64.b64encode(out_data)
|
||||
script = script_template % (powershell._escape(out_path), offset, b64_data, in_size)
|
||||
vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host)
|
||||
cmd_parts = powershell._encode_script(script, as_list=True)
|
||||
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
|
||||
if result.status_code != 0:
|
||||
raise IOError(result.std_err.encode('utf-8'))
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
out_path = out_path.replace('\\', '/')
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
|
||||
buffer_size = 2**20 # 1MB chunks
|
||||
if not os.path.exists(os.path.dirname(out_path)):
|
||||
os.makedirs(os.path.dirname(out_path))
|
||||
out_file = None
|
||||
try:
|
||||
offset = 0
|
||||
while True:
|
||||
try:
|
||||
script = '''
|
||||
If (Test-Path -PathType Leaf "%(path)s")
|
||||
{
|
||||
$stream = [System.IO.File]::OpenRead("%(path)s");
|
||||
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
|
||||
$buffer = New-Object Byte[] %(buffer_size)d;
|
||||
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
|
||||
$bytes = $buffer[0..($bytesRead-1)];
|
||||
[System.Convert]::ToBase64String($bytes);
|
||||
$stream.Close() | Out-Null;
|
||||
}
|
||||
ElseIf (Test-Path -PathType Container "%(path)s")
|
||||
{
|
||||
Write-Host "[DIR]";
|
||||
}
|
||||
Else
|
||||
{
|
||||
Write-Error "%(path)s does not exist";
|
||||
Exit 1;
|
||||
}
|
||||
''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset)
|
||||
vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host)
|
||||
cmd_parts = powershell._encode_script(script, as_list=True)
|
||||
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
|
||||
if result.status_code != 0:
|
||||
raise IOError(result.std_err.encode('utf-8'))
|
||||
if result.std_out.strip() == '[DIR]':
|
||||
data = None
|
||||
else:
|
||||
data = base64.b64decode(result.std_out.strip())
|
||||
if data is None:
|
||||
if not os.path.exists(out_path):
|
||||
os.makedirs(out_path)
|
||||
break
|
||||
else:
|
||||
if not out_file:
|
||||
# If out_path is a directory and we're expecting a file, bail out now.
|
||||
if os.path.isdir(out_path):
|
||||
break
|
||||
out_file = open(out_path, 'wb')
|
||||
out_file.write(data)
|
||||
if len(data) < buffer_size:
|
||||
break
|
||||
offset += len(data)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
|
||||
finally:
|
||||
if out_file:
|
||||
out_file.close()
|
||||
|
||||
def close(self):
|
||||
if self.protocol and self.shell_id:
|
||||
self.protocol.close_shell(self.shell_id)
|
||||
self.shell_id = None
|
@ -0,0 +1,323 @@
|
||||
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os.path
|
||||
import yaml
|
||||
import types
|
||||
import pipes
|
||||
import glob
|
||||
import re
|
||||
import collections
|
||||
import operator as py_operator
|
||||
from distutils.version import LooseVersion, StrictVersion
|
||||
from random import SystemRandom, shuffle
|
||||
from jinja2.filters import environmentfilter
|
||||
|
||||
from ansible.errors import *
|
||||
from ansible.utils.hashing import md5s, checksum_s
|
||||
|
||||
def to_nice_yaml(*a, **kw):
|
||||
'''Make verbose, human readable yaml'''
|
||||
return yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw)
|
||||
|
||||
def to_json(a, *args, **kw):
|
||||
''' Convert the value to JSON '''
|
||||
return json.dumps(a, *args, **kw)
|
||||
|
||||
def to_nice_json(a, *args, **kw):
|
||||
'''Make verbose, human readable JSON'''
|
||||
return json.dumps(a, indent=4, sort_keys=True, *args, **kw)
|
||||
|
||||
def failed(*a, **kw):
|
||||
''' Test if task result yields failed '''
|
||||
item = a[0]
|
||||
if type(item) != dict:
|
||||
raise errors.AnsibleFilterError("|failed expects a dictionary")
|
||||
rc = item.get('rc',0)
|
||||
failed = item.get('failed',False)
|
||||
if rc != 0 or failed:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def success(*a, **kw):
|
||||
''' Test if task result yields success '''
|
||||
return not failed(*a, **kw)
|
||||
|
||||
def changed(*a, **kw):
|
||||
''' Test if task result yields changed '''
|
||||
item = a[0]
|
||||
if type(item) != dict:
|
||||
raise errors.AnsibleFilterError("|changed expects a dictionary")
|
||||
if not 'changed' in item:
|
||||
changed = False
|
||||
if ('results' in item # some modules return a 'results' key
|
||||
and type(item['results']) == list
|
||||
and type(item['results'][0]) == dict):
|
||||
for result in item['results']:
|
||||
changed = changed or result.get('changed', False)
|
||||
else:
|
||||
changed = item.get('changed', False)
|
||||
return changed
|
||||
|
||||
def skipped(*a, **kw):
|
||||
''' Test if task result yields skipped '''
|
||||
item = a[0]
|
||||
if type(item) != dict:
|
||||
raise errors.AnsibleFilterError("|skipped expects a dictionary")
|
||||
skipped = item.get('skipped', False)
|
||||
return skipped
|
||||
|
||||
def mandatory(a):
|
||||
''' Make a variable mandatory '''
|
||||
try:
|
||||
a
|
||||
except NameError:
|
||||
raise errors.AnsibleFilterError('Mandatory variable not defined.')
|
||||
else:
|
||||
return a
|
||||
|
||||
def bool(a):
|
||||
''' return a bool for the arg '''
|
||||
if a is None or type(a) == bool:
|
||||
return a
|
||||
if type(a) in types.StringTypes:
|
||||
a = a.lower()
|
||||
if a in ['yes', 'on', '1', 'true', 1]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def quote(a):
|
||||
''' return its argument quoted for shell usage '''
|
||||
return pipes.quote(a)
|
||||
|
||||
def fileglob(pathname):
|
||||
''' return list of matched files for glob '''
|
||||
return glob.glob(pathname)
|
||||
|
||||
def regex(value='', pattern='', ignorecase=False, match_type='search'):
|
||||
''' Expose `re` as a boolean filter using the `search` method by default.
|
||||
This is likely only useful for `search` and `match` which already
|
||||
have their own filters.
|
||||
'''
|
||||
if ignorecase:
|
||||
flags = re.I
|
||||
else:
|
||||
flags = 0
|
||||
_re = re.compile(pattern, flags=flags)
|
||||
_bool = __builtins__.get('bool')
|
||||
return _bool(getattr(_re, match_type, 'search')(value))
|
||||
|
||||
def match(value, pattern='', ignorecase=False):
|
||||
''' Perform a `re.match` returning a boolean '''
|
||||
return regex(value, pattern, ignorecase, 'match')
|
||||
|
||||
def search(value, pattern='', ignorecase=False):
|
||||
''' Perform a `re.search` returning a boolean '''
|
||||
return regex(value, pattern, ignorecase, 'search')
|
||||
|
||||
def regex_replace(value='', pattern='', replacement='', ignorecase=False):
|
||||
''' Perform a `re.sub` returning a string '''
|
||||
|
||||
if not isinstance(value, basestring):
|
||||
value = str(value)
|
||||
|
||||
if ignorecase:
|
||||
flags = re.I
|
||||
else:
|
||||
flags = 0
|
||||
_re = re.compile(pattern, flags=flags)
|
||||
return _re.sub(replacement, value)
|
||||
|
||||
def unique(a):
|
||||
if isinstance(a,collections.Hashable):
|
||||
c = set(a)
|
||||
else:
|
||||
c = []
|
||||
for x in a:
|
||||
if x not in c:
|
||||
c.append(x)
|
||||
return c
|
||||
|
||||
def intersect(a, b):
|
||||
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||
c = set(a) & set(b)
|
||||
else:
|
||||
c = unique(filter(lambda x: x in b, a))
|
||||
return c
|
||||
|
||||
def difference(a, b):
|
||||
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||
c = set(a) - set(b)
|
||||
else:
|
||||
c = unique(filter(lambda x: x not in b, a))
|
||||
return c
|
||||
|
||||
def symmetric_difference(a, b):
|
||||
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||
c = set(a) ^ set(b)
|
||||
else:
|
||||
c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
|
||||
return c
|
||||
|
||||
def union(a, b):
|
||||
if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
|
||||
c = set(a) | set(b)
|
||||
else:
|
||||
c = unique(a + b)
|
||||
return c
|
||||
|
||||
def min(a):
|
||||
_min = __builtins__.get('min')
|
||||
return _min(a);
|
||||
|
||||
def max(a):
|
||||
_max = __builtins__.get('max')
|
||||
return _max(a);
|
||||
|
||||
def version_compare(value, version, operator='eq', strict=False):
|
||||
''' Perform a version comparison on a value '''
|
||||
op_map = {
|
||||
'==': 'eq', '=': 'eq', 'eq': 'eq',
|
||||
'<': 'lt', 'lt': 'lt',
|
||||
'<=': 'le', 'le': 'le',
|
||||
'>': 'gt', 'gt': 'gt',
|
||||
'>=': 'ge', 'ge': 'ge',
|
||||
'!=': 'ne', '<>': 'ne', 'ne': 'ne'
|
||||
}
|
||||
|
||||
if strict:
|
||||
Version = StrictVersion
|
||||
else:
|
||||
Version = LooseVersion
|
||||
|
||||
if operator in op_map:
|
||||
operator = op_map[operator]
|
||||
else:
|
||||
raise errors.AnsibleFilterError('Invalid operator type')
|
||||
|
||||
try:
|
||||
method = getattr(py_operator, operator)
|
||||
return method(Version(str(value)), Version(str(version)))
|
||||
except Exception, e:
|
||||
raise errors.AnsibleFilterError('Version comparison: %s' % e)
|
||||
|
||||
@environmentfilter
|
||||
def rand(environment, end, start=None, step=None):
|
||||
r = SystemRandom()
|
||||
if isinstance(end, (int, long)):
|
||||
if not start:
|
||||
start = 0
|
||||
if not step:
|
||||
step = 1
|
||||
return r.randrange(start, end, step)
|
||||
elif hasattr(end, '__iter__'):
|
||||
if start or step:
|
||||
raise errors.AnsibleFilterError('start and step can only be used with integer values')
|
||||
return r.choice(end)
|
||||
else:
|
||||
raise errors.AnsibleFilterError('random can only be used on sequences and integers')
|
||||
|
||||
def randomize_list(mylist):
|
||||
try:
|
||||
mylist = list(mylist)
|
||||
shuffle(mylist)
|
||||
except:
|
||||
pass
|
||||
return mylist
|
||||
|
||||
class FilterModule(object):
|
||||
''' Ansible core jinja2 filters '''
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
# base 64
|
||||
'b64decode': base64.b64decode,
|
||||
'b64encode': base64.b64encode,
|
||||
|
||||
# json
|
||||
'to_json': to_json,
|
||||
'to_nice_json': to_nice_json,
|
||||
'from_json': json.loads,
|
||||
|
||||
# yaml
|
||||
'to_yaml': yaml.safe_dump,
|
||||
'to_nice_yaml': to_nice_yaml,
|
||||
'from_yaml': yaml.safe_load,
|
||||
|
||||
# path
|
||||
'basename': os.path.basename,
|
||||
'dirname': os.path.dirname,
|
||||
'expanduser': os.path.expanduser,
|
||||
'realpath': os.path.realpath,
|
||||
'relpath': os.path.relpath,
|
||||
|
||||
# failure testing
|
||||
'failed' : failed,
|
||||
'success' : success,
|
||||
|
||||
# changed testing
|
||||
'changed' : changed,
|
||||
|
||||
# skip testing
|
||||
'skipped' : skipped,
|
||||
|
||||
# variable existence
|
||||
'mandatory': mandatory,
|
||||
|
||||
# value as boolean
|
||||
'bool': bool,
|
||||
|
||||
# quote string for shell usage
|
||||
'quote': quote,
|
||||
|
||||
# hash filters
|
||||
# md5 hex digest of string
|
||||
'md5': md5s,
|
||||
# sha1 hex digeset of string
|
||||
'sha1': checksum_s,
|
||||
# checksum of string as used by ansible for checksuming files
|
||||
'checksum': checksum_s,
|
||||
|
||||
# file glob
|
||||
'fileglob': fileglob,
|
||||
|
||||
# regex
|
||||
'match': match,
|
||||
'search': search,
|
||||
'regex': regex,
|
||||
'regex_replace': regex_replace,
|
||||
|
||||
# list
|
||||
'unique' : unique,
|
||||
'intersect': intersect,
|
||||
'difference': difference,
|
||||
'symmetric_difference': symmetric_difference,
|
||||
'union': union,
|
||||
'min' : min,
|
||||
'max' : max,
|
||||
|
||||
# version comparison
|
||||
'version_compare': version_compare,
|
||||
|
||||
# random stuff
|
||||
'random': rand,
|
||||
'shuffle': randomize_list,
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible import utils, errors
|
||||
import os
|
||||
import codecs
|
||||
import csv
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def read_csv(self, filename, key, delimiter, dflt=None, col=1):
|
||||
|
||||
try:
|
||||
f = codecs.open(filename, 'r', encoding='utf-8')
|
||||
creader = csv.reader(f, delimiter=delimiter)
|
||||
|
||||
for row in creader:
|
||||
if row[0] == key:
|
||||
return row[int(col)]
|
||||
except Exception, e:
|
||||
raise errors.AnsibleError("csvfile: %s" % str(e))
|
||||
|
||||
return dflt
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
if isinstance(terms, basestring):
|
||||
terms = [ terms ]
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
params = term.split()
|
||||
key = params[0]
|
||||
|
||||
paramvals = {
|
||||
'file' : 'ansible.csv',
|
||||
'default' : None,
|
||||
'delimiter' : "TAB",
|
||||
'col' : "1", # column to return
|
||||
}
|
||||
|
||||
# parameters specified?
|
||||
try:
|
||||
for param in params[1:]:
|
||||
name, value = param.split('=')
|
||||
assert(name in paramvals)
|
||||
paramvals[name] = value
|
||||
except (ValueError, AssertionError), e:
|
||||
raise errors.AnsibleError(e)
|
||||
|
||||
if paramvals['delimiter'] == 'TAB':
|
||||
paramvals['delimiter'] = "\t"
|
||||
|
||||
path = utils.path_dwim(self.basedir, paramvals['file'])
|
||||
|
||||
var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
|
||||
if var is not None:
|
||||
if type(var) is list:
|
||||
for v in var:
|
||||
ret.append(v)
|
||||
else:
|
||||
ret.append(var)
|
||||
return ret
|
@ -1,39 +0,0 @@
|
||||
# (c) 2014, Kent R. Spillner <kspillner@acm.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible.utils import safe_eval
|
||||
import ansible.utils as utils
|
||||
import ansible.errors as errors
|
||||
|
||||
def flatten_hash_to_list(terms):
|
||||
ret = []
|
||||
for key in terms:
|
||||
ret.append({'key': key, 'value': terms[key]})
|
||||
return ret
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
if not isinstance(terms, dict):
|
||||
raise errors.AnsibleError("with_dict expects a dict")
|
||||
|
||||
return flatten_hash_to_list(terms)
|
@ -1,68 +0,0 @@
|
||||
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible import utils, errors
|
||||
import os
|
||||
HAVE_DNS=False
|
||||
try:
|
||||
import dns.resolver
|
||||
from dns.exception import DNSException
|
||||
HAVE_DNS=True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# ==============================================================
|
||||
# DNSTXT: DNS TXT records
|
||||
#
|
||||
# key=domainname
|
||||
# TODO: configurable resolver IPs
|
||||
# --------------------------------------------------------------
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
if HAVE_DNS == False:
|
||||
raise errors.AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
if isinstance(terms, basestring):
|
||||
terms = [ terms ]
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
domain = term.split()[0]
|
||||
string = []
|
||||
try:
|
||||
answers = dns.resolver.query(domain, 'TXT')
|
||||
for rdata in answers:
|
||||
s = rdata.to_text()
|
||||
string.append(s[1:-1]) # Strip outside quotes on TXT rdata
|
||||
|
||||
except dns.resolver.NXDOMAIN:
|
||||
string = 'NXDOMAIN'
|
||||
except dns.resolver.Timeout:
|
||||
string = ''
|
||||
except dns.exception.DNSException, e:
|
||||
raise errors.AnsibleError("dns.resolver unhandled exception", e)
|
||||
|
||||
ret.append(''.join(string))
|
||||
return ret
|
@ -1,78 +0,0 @@
|
||||
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible import utils
|
||||
import os
|
||||
import urllib2
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
# this can be made configurable, not should not use ansible.cfg
|
||||
ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
|
||||
if os.getenv('ANSIBLE_ETCD_URL') is not None:
|
||||
ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
|
||||
|
||||
class etcd():
|
||||
def __init__(self, url=ANSIBLE_ETCD_URL):
|
||||
self.url = url
|
||||
self.baseurl = '%s/v1/keys' % (self.url)
|
||||
|
||||
def get(self, key):
|
||||
url = "%s/%s" % (self.baseurl, key)
|
||||
|
||||
data = None
|
||||
value = ""
|
||||
try:
|
||||
r = urllib2.urlopen(url)
|
||||
data = r.read()
|
||||
except:
|
||||
return value
|
||||
|
||||
try:
|
||||
# {"action":"get","key":"/name","value":"Jane Jolie","index":5}
|
||||
item = json.loads(data)
|
||||
if 'value' in item:
|
||||
value = item['value']
|
||||
if 'errorCode' in item:
|
||||
value = "ENOENT"
|
||||
except:
|
||||
raise
|
||||
pass
|
||||
|
||||
return value
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
self.etcd = etcd()
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
if isinstance(terms, basestring):
|
||||
terms = [ terms ]
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
key = term.split()[0]
|
||||
value = self.etcd.get(key)
|
||||
ret.append(value)
|
||||
return ret
|
@ -1,59 +0,0 @@
|
||||
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible import utils, errors
|
||||
import os
|
||||
import codecs
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
ret = []
|
||||
|
||||
# this can happen if the variable contains a string, strictly not desired for lookup
|
||||
# plugins, but users may try it, so make it work.
|
||||
if not isinstance(terms, list):
|
||||
terms = [ terms ]
|
||||
|
||||
for term in terms:
|
||||
basedir_path = utils.path_dwim(self.basedir, term)
|
||||
relative_path = None
|
||||
playbook_path = None
|
||||
|
||||
# Special handling of the file lookup, used primarily when the
|
||||
# lookup is done from a role. If the file isn't found in the
|
||||
# basedir of the current file, use dwim_relative to look in the
|
||||
# role/files/ directory, and finally the playbook directory
|
||||
# itself (which will be relative to the current working dir)
|
||||
if '_original_file' in inject:
|
||||
relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', term, self.basedir, check=False)
|
||||
if 'playbook_dir' in inject:
|
||||
playbook_path = os.path.join(inject['playbook_dir'], term)
|
||||
|
||||
for path in (basedir_path, relative_path, playbook_path):
|
||||
if path and os.path.exists(path):
|
||||
ret.append(codecs.open(path, encoding="utf8").read().rstrip())
|
||||
break
|
||||
else:
|
||||
raise errors.AnsibleError("could not locate file in lookup: %s" % term)
|
||||
|
||||
return ret
|
@ -1,194 +0,0 @@
|
||||
# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
# take a list of files and (optionally) a list of paths
|
||||
# return the first existing file found in the paths
|
||||
# [file1, file2, file3], [path1, path2, path3]
|
||||
# search order is:
|
||||
# path1/file1
|
||||
# path1/file2
|
||||
# path1/file3
|
||||
# path2/file1
|
||||
# path2/file2
|
||||
# path2/file3
|
||||
# path3/file1
|
||||
# path3/file2
|
||||
# path3/file3
|
||||
|
||||
# first file found with os.path.exists() is returned
|
||||
# no file matches raises ansibleerror
|
||||
# EXAMPLES
|
||||
# - name: copy first existing file found to /some/file
|
||||
# action: copy src=$item dest=/some/file
|
||||
# with_first_found:
|
||||
# - files: foo ${inventory_hostname} bar
|
||||
# paths: /tmp/production /tmp/staging
|
||||
|
||||
# that will look for files in this order:
|
||||
# /tmp/production/foo
|
||||
# ${inventory_hostname}
|
||||
# bar
|
||||
# /tmp/staging/foo
|
||||
# ${inventory_hostname}
|
||||
# bar
|
||||
|
||||
# - name: copy first existing file found to /some/file
|
||||
# action: copy src=$item dest=/some/file
|
||||
# with_first_found:
|
||||
# - files: /some/place/foo ${inventory_hostname} /some/place/else
|
||||
|
||||
# that will look for files in this order:
|
||||
# /some/place/foo
|
||||
# $relative_path/${inventory_hostname}
|
||||
# /some/place/else
|
||||
|
||||
# example - including tasks:
|
||||
# tasks:
|
||||
# - include: $item
|
||||
# with_first_found:
|
||||
# - files: generic
|
||||
# paths: tasks/staging tasks/production
|
||||
# this will include the tasks in the file generic where it is found first (staging or production)
|
||||
|
||||
# example simple file lists
|
||||
#tasks:
|
||||
#- name: first found file
|
||||
# action: copy src=$item dest=/etc/file.cfg
|
||||
# with_first_found:
|
||||
# - files: foo.${inventory_hostname} foo
|
||||
|
||||
|
||||
# example skipping if no matched files
|
||||
# First_found also offers the ability to control whether or not failing
|
||||
# to find a file returns an error or not
|
||||
#
|
||||
#- name: first found file - or skip
|
||||
# action: copy src=$item dest=/etc/file.cfg
|
||||
# with_first_found:
|
||||
# - files: foo.${inventory_hostname}
|
||||
# skip: true
|
||||
|
||||
# example a role with default configuration and configuration per host
|
||||
# you can set multiple terms with their own files and paths to look through.
|
||||
# consider a role that sets some configuration per host falling back on a default config.
|
||||
#
|
||||
#- name: some configuration template
|
||||
# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
|
||||
# with_first_found:
|
||||
# - files:
|
||||
# - ${inventory_hostname}/etc/file.cfg
|
||||
# paths:
|
||||
# - ../../../templates.overwrites
|
||||
# - ../../../templates
|
||||
# - files:
|
||||
# - etc/file.cfg
|
||||
# paths:
|
||||
# - templates
|
||||
|
||||
# the above will return an empty list if the files cannot be found at all
|
||||
# if skip is unspecificed or if it is set to false then it will return a list
|
||||
# error which can be caught bye ignore_errors: true for that action.
|
||||
|
||||
# finally - if you want you can use it, in place to replace first_available_file:
|
||||
# you simply cannot use the - files, path or skip options. simply replace
|
||||
# first_available_file with with_first_found and leave the file listing in place
|
||||
#
|
||||
#
|
||||
# - name: with_first_found like first_available_file
|
||||
# action: copy src=$item dest=/tmp/faftest
|
||||
# with_first_found:
|
||||
# - ../files/foo
|
||||
# - ../files/bar
|
||||
# - ../files/baz
|
||||
# ignore_errors: true
|
||||
|
||||
|
||||
from ansible import utils, errors
|
||||
import os
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
result = None
|
||||
anydict = False
|
||||
skip = False
|
||||
|
||||
for term in terms:
|
||||
if isinstance(term, dict):
|
||||
anydict = True
|
||||
|
||||
total_search = []
|
||||
if anydict:
|
||||
for term in terms:
|
||||
if isinstance(term, dict):
|
||||
files = term.get('files', [])
|
||||
paths = term.get('paths', [])
|
||||
skip = utils.boolean(term.get('skip', False))
|
||||
|
||||
filelist = files
|
||||
if isinstance(files, basestring):
|
||||
files = files.replace(',', ' ')
|
||||
files = files.replace(';', ' ')
|
||||
filelist = files.split(' ')
|
||||
|
||||
pathlist = paths
|
||||
if paths:
|
||||
if isinstance(paths, basestring):
|
||||
paths = paths.replace(',', ' ')
|
||||
paths = paths.replace(':', ' ')
|
||||
paths = paths.replace(';', ' ')
|
||||
pathlist = paths.split(' ')
|
||||
|
||||
if not pathlist:
|
||||
total_search = filelist
|
||||
else:
|
||||
for path in pathlist:
|
||||
for fn in filelist:
|
||||
f = os.path.join(path, fn)
|
||||
total_search.append(f)
|
||||
else:
|
||||
total_search.append(term)
|
||||
else:
|
||||
total_search = terms
|
||||
|
||||
for fn in total_search:
|
||||
if inject and '_original_file' in inject:
|
||||
# check the templates and vars directories too,
|
||||
# if they exist
|
||||
for roledir in ('templates', 'vars'):
|
||||
path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn)
|
||||
if os.path.exists(path):
|
||||
return [path]
|
||||
# if none of the above were found, just check the
|
||||
# current filename against the basedir (this will already
|
||||
# have ../files from runner, if it's a role task
|
||||
path = utils.path_dwim(self.basedir, fn)
|
||||
if os.path.exists(path):
|
||||
return [path]
|
||||
else:
|
||||
if skip:
|
||||
return []
|
||||
else:
|
||||
return [None]
|
||||
|
@ -1,78 +0,0 @@
|
||||
# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import ansible.utils as utils
|
||||
import ansible.errors as errors
|
||||
|
||||
|
||||
def check_list_of_one_list(term):
|
||||
# make sure term is not a list of one (list of one..) item
|
||||
# return the final non list item if so
|
||||
|
||||
if isinstance(term,list) and len(term) == 1:
|
||||
term = term[0]
|
||||
if isinstance(term,list):
|
||||
term = check_list_of_one_list(term)
|
||||
|
||||
return term
|
||||
|
||||
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
|
||||
def flatten(self, terms, inject):
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
term = check_list_of_one_list(term)
|
||||
|
||||
if term == 'None' or term == 'null':
|
||||
# ignore undefined items
|
||||
break
|
||||
|
||||
if isinstance(term, basestring):
|
||||
# convert a variable to a list
|
||||
term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject)
|
||||
# but avoid converting a plain string to a list of one string
|
||||
if term2 != [ term ]:
|
||||
term = term2
|
||||
|
||||
if isinstance(term, list):
|
||||
# if it's a list, check recursively for items that are a list
|
||||
term = self.flatten(term, inject)
|
||||
ret.extend(term)
|
||||
else:
|
||||
ret.append(term)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
# see if the string represents a list and convert to list if so
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
if not isinstance(terms, list):
|
||||
raise errors.AnsibleError("with_flattened expects a list")
|
||||
|
||||
ret = self.flatten(terms, inject)
|
||||
return ret
|
||||
|
@ -1,48 +0,0 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2013, Steven Dossett <sdossett@panath.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ansible.utils import safe_eval
|
||||
import ansible.utils as utils
|
||||
import ansible.errors as errors
|
||||
import ansible.inventory as inventory
|
||||
|
||||
def flatten(terms):
|
||||
ret = []
|
||||
for term in terms:
|
||||
if isinstance(term, list):
|
||||
ret.extend(term)
|
||||
else:
|
||||
ret.append(term)
|
||||
return ret
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
if 'runner' in kwargs:
|
||||
self.host_list = kwargs['runner'].inventory.host_list
|
||||
else:
|
||||
raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
if not isinstance(terms, list):
|
||||
raise errors.AnsibleError("with_inventory_hostnames expects a list")
|
||||
return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
|
||||
|
@ -1,38 +0,0 @@
|
||||
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import subprocess
|
||||
from ansible import utils, errors
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
|
||||
ret = []
|
||||
for term in terms:
|
||||
p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
(stdout, stderr) = p.communicate()
|
||||
if p.returncode == 0:
|
||||
ret.extend(stdout.splitlines())
|
||||
else:
|
||||
raise errors.AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
|
||||
return ret
|
@ -1,73 +0,0 @@
|
||||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import ansible.utils as utils
|
||||
from ansible.utils import safe_eval
|
||||
import ansible.errors as errors
|
||||
|
||||
def flatten(terms):
|
||||
ret = []
|
||||
for term in terms:
|
||||
if isinstance(term, list):
|
||||
ret.extend(term)
|
||||
elif isinstance(term, tuple):
|
||||
ret.extend(term)
|
||||
else:
|
||||
ret.append(term)
|
||||
return ret
|
||||
|
||||
def combine(a,b):
|
||||
results = []
|
||||
for x in a:
|
||||
for y in b:
|
||||
results.append(flatten([x,y]))
|
||||
return results
|
||||
|
||||
class LookupModule(object):
|
||||
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
self.basedir = basedir
|
||||
|
||||
def __lookup_injects(self, terms, inject):
|
||||
results = []
|
||||
for x in terms:
|
||||
intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
|
||||
results.append(intermediate)
|
||||
return results
|
||||
|
||||
def run(self, terms, inject=None, **kwargs):
|
||||
|
||||
# this code is common with 'items.py' consider moving to utils if we need it again
|
||||
|
||||
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
|
||||
terms = self.__lookup_injects(terms, inject)
|
||||
|
||||
my_list = terms[:]
|
||||
my_list.reverse()
|
||||
result = []
|
||||
if len(my_list) == 0:
|
||||
raise errors.AnsibleError("with_nested requires at least one element in the nested list")
|
||||
result = my_list.pop()
|
||||
while len(my_list) > 0:
|
||||
result2 = combine(result, my_list.pop())
|
||||
result = result2
|
||||
new_result = []
|
||||
for x in result:
|
||||
new_result.append(flatten(x))
|
||||
return new_result
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue