Merge pull request #219 from dw/dmw

Doc updates, connection delegation v1.
pull/225/head
dw 7 years ago committed by GitHub
commit 09473da3e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,5 @@
2018-04-30 v0.0.1
* Initial release to support the Mitogen extension for Ansible.

@ -33,6 +33,8 @@ import shlex
import sys import sys
import time import time
import jinja2.runtime
import ansible.constants as C
import ansible.errors import ansible.errors
import ansible.plugins.connection import ansible.plugins.connection
@ -47,6 +49,135 @@ import ansible_mitogen.services
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def _connect_local(spec):
return {
'method': 'local',
'kwargs': {
'python_path': spec['python_path'],
}
}
def _connect_ssh(spec):
return {
'method': 'ssh',
'kwargs': {
'check_host_keys': False, # TODO
'hostname': spec['remote_addr'],
'username': spec['remote_user'],
'password': spec['password'],
'port': spec['port'],
'python_path': spec['python_path'],
'identity_file': spec['private_key_file'],
'ssh_path': spec['ssh_executable'],
'connect_timeout': spec['ansible_ssh_timeout'],
'ssh_args': spec['ssh_args'],
}
}
def _connect_docker(spec):
return {
'method': 'docker',
'kwargs': {
'username': spec['remote_user'],
'container': spec['remote_addr'],
'python_path': spec['python_path'],
'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'],
}
}
def _connect_sudo(spec):
return {
'method': 'sudo',
'kwargs': {
'username': spec['become_user'],
'password': spec['become_pass'],
'python_path': spec['python_path'],
'sudo_path': spec['become_exe'],
'connect_timeout': spec['timeout'],
'sudo_args': spec['sudo_args'],
}
}
CONNECTION_METHOD = {
'sudo': _connect_sudo,
'ssh': _connect_ssh,
'local': _connect_local,
'docker': _connect_docker,
}
def config_from_play_context(transport, inventory_name, connection):
"""
Return a dict representing all important connection configuration, allowing
the same functions to work regardless of whether configuration came from
play_context (direct connection) or host vars (mitogen_via=).
"""
return {
'transport': transport,
'inventory_name': inventory_name,
'remote_addr': connection._play_context.remote_addr,
'remote_user': connection._play_context.remote_user,
'become': connection._play_context.become,
'become_method': connection._play_context.become_method,
'become_user': connection._play_context.become_user,
'become_pass': connection._play_context.become_pass,
'password': connection._play_context.password,
'port': connection._play_context.port,
'python_path': connection.python_path,
'private_key_file': connection._play_context.private_key_file,
'ssh_executable': connection._play_context.ssh_executable,
'timeout': connection._play_context.timeout,
'ansible_ssh_timeout': connection.ansible_ssh_timeout,
'ssh_args': [
term
for s in (
getattr(connection._play_context, 'ssh_args', ''),
getattr(connection._play_context, 'ssh_common_args', ''),
getattr(connection._play_context, 'ssh_extra_args', '')
)
for term in shlex.split(s or '')
],
'become_exe': connection._play_context.become_exe,
'sudo_args': [
term
for s in (
connection._play_context.sudo_flags,
connection._play_context.become_flags
)
for term in shlex.split(s or '')
],
'mitogen_via': connection.mitogen_via,
}
def config_from_hostvars(transport, inventory_name, connection,
hostvars, become_user):
"""
Override config_from_play_context() to take equivalent information from
host vars.
"""
config = config_from_play_context(transport, inventory_name, connection)
hostvars = dict(hostvars)
return dict(config, **{
'remote_addr': hostvars.get('ansible_hostname', inventory_name),
'become': bool(become_user),
'become_user': become_user,
'become_pass': None,
'remote_user': hostvars.get('ansible_user'), # TODO
'password': (hostvars.get('ansible_ssh_pass') or
hostvars.get('ansible_password')),
'port': hostvars.get('ansible_port'),
'python_path': hostvars.get('ansible_python_interpreter'),
'private_key_file': (hostvars.get('ansible_ssh_private_key_file') or
hostvars.get('ansible_private_key_file')),
'mitogen_via': hostvars.get('mitogen_via'),
})
class Connection(ansible.plugins.connection.ConnectionBase): class Connection(ansible.plugins.connection.ConnectionBase):
#: mitogen.master.Broker for this worker. #: mitogen.master.Broker for this worker.
broker = None broker = None
@ -58,45 +189,36 @@ class Connection(ansible.plugins.connection.ConnectionBase):
#: presently always the master process. #: presently always the master process.
parent = None parent = None
#: mitogen.master.Context connected to the target machine's initial SSH
#: account.
host = None
#: mitogen.master.Context connected to the target user account on the #: mitogen.master.Context connected to the target user account on the
#: target machine (i.e. via sudo), or simply a copy of :attr:`host` if #: target machine (i.e. via sudo).
#: become is not in use.
context = None context = None
#: Only sudo is supported for now. #: Only sudo is supported for now.
become_methods = ['sudo'] become_methods = ['sudo']
#: Set by the constructor according to whichever connection type this
#: connection should emulate. We emulate the original connection type to
#: work around artificial limitations in e.g. the synchronize action, which
#: hard-codes 'local' and 'ssh' as the only allowable connection types.
transport = None
#: Set to 'ansible_python_interpreter' by on_action_run(). #: Set to 'ansible_python_interpreter' by on_action_run().
python_path = None python_path = None
#: Set to 'ansible_sudo_exe' by on_action_run().
sudo_path = None
#: Set to 'ansible_ssh_timeout' by on_action_run(). #: Set to 'ansible_ssh_timeout' by on_action_run().
ansible_ssh_timeout = None ansible_ssh_timeout = None
#: Set to 'mitogen_via' by on_action_run().
mitogen_via = None
#: Set to 'inventory_hostname' by on_action_run().
inventory_hostname = None
#: Set to 'hostvars' by on_action_run()
host_vars = None
#: Set after connection to the target context's home directory. #: Set after connection to the target context's home directory.
_homedir = None _homedir = None
def __init__(self, play_context, new_stdin, original_transport, **kwargs): def __init__(self, play_context, new_stdin, **kwargs):
assert ansible_mitogen.process.MuxProcess.unix_listener_path, ( assert ansible_mitogen.process.MuxProcess.unix_listener_path, (
'The "mitogen" connection plug-in may only be instantiated ' 'Mitogen connection types may only be instantiated '
'by the "mitogen" strategy plug-in.' 'while the "mitogen" strategy is active.'
) )
self.original_transport = original_transport
self.transport = original_transport
self.kwargs = kwargs
super(Connection, self).__init__(play_context, new_stdin) super(Connection, self).__init__(play_context, new_stdin)
def __del__(self): def __del__(self):
@ -114,19 +236,12 @@ class Connection(ansible.plugins.connection.ConnectionBase):
executing. We use the opportunity to grab relevant bits from the executing. We use the opportunity to grab relevant bits from the
task-specific data. task-specific data.
""" """
self.ansible_ssh_timeout = task_vars.get( self.ansible_ssh_timeout = task_vars.get('ansible_ssh_timeout')
'ansible_ssh_timeout', self.python_path = task_vars.get('ansible_python_interpreter',
None '/usr/bin/python')
) self.mitogen_via = task_vars.get('mitogen_via')
self.python_path = task_vars.get( self.inventory_hostname = task_vars['inventory_hostname']
'ansible_python_interpreter', self.host_vars = task_vars['hostvars']
'/usr/bin/python'
)
self.sudo_path = task_vars.get(
'ansible_sudo_exe',
'sudo'
)
self.close(new_task=True) self.close(new_task=True)
@property @property
@ -138,109 +253,52 @@ class Connection(ansible.plugins.connection.ConnectionBase):
def connected(self): def connected(self):
return self.context is not None return self.context is not None
def _on_connection_error(self, msg): def _config_from_via(self, via_spec):
raise ansible.errors.AnsibleConnectionFailure(msg) become_user, _, inventory_name = via_spec.rpartition('@')
via_vars = self.host_vars[inventory_name]
def _on_become_error(self, msg): if isinstance(via_vars, jinja2.runtime.Undefined):
# TODO: vanilla become failures yield this: raise ansible.errors.AnsibleConnectionFailure(
# { self.unknown_via_msg % (
# "changed": false, self.mitogen_via,
# "module_stderr": "sudo: sorry, you must have a tty to run sudo\n", config['inventory_name'],
# "module_stdout": "", )
# "msg": "MODULE FAILURE",
# "rc": 1
# }
#
# Currently we yield this:
# {
# "msg": "EOF on stream; last 300 bytes received: 'sudo: ....\n'"
# }
raise ansible.errors.AnsibleModuleError(msg)
def _wrap_connect(self, on_error, kwargs):
dct = mitogen.service.call(
context=self.parent,
handle=ansible_mitogen.services.ContextService.handle,
method='get',
kwargs=mitogen.utils.cast(kwargs),
) )
if dct['msg']: return config_from_hostvars(
on_error(dct['msg']) transport=via_vars.get('ansible_connection', 'ssh'),
inventory_name=inventory_name,
return dct['context'], dct['home_dir'] connection=self,
hostvars=via_vars,
def _connect_local(self): become_user=become_user or None,
""" )
Fetch a reference to the local() Context from ContextService in the
master process.
"""
return self._wrap_connect(self._on_connection_error, {
'method_name': 'local',
'python_path': self.python_path,
})
def _connect_ssh(self): unknown_via_msg = 'mitogen_via=%s of %s specifies an unknown hostname'
""" via_cycle_msg = 'mitogen_via=%s of %s creates a cycle (%s)'
Fetch a reference to an SSH Context matching the play context from
ContextService in the master process. def _stack_from_config(self, config, stack=(), seen_names=()):
""" if config['inventory_name'] in seen_names:
return self._wrap_connect(self._on_connection_error, { raise ansible.errors.AnsibleConnectionFailure(
'method_name': 'ssh', self.via_cycle_msg % (
'check_host_keys': False, # TODO config['mitogen_via'],
'hostname': self._play_context.remote_addr, config['inventory_name'],
'username': self._play_context.remote_user, ' -> '.join(reversed(
'password': self._play_context.password, seen_names + (config['inventory_name'],)
'port': self._play_context.port, )),
'python_path': self.python_path, )
'identity_file': self._play_context.private_key_file,
'ssh_path': self._play_context.ssh_executable,
'connect_timeout': self.ansible_ssh_timeout,
'ssh_args': [
term
for s in (
getattr(self._play_context, 'ssh_args', ''),
getattr(self._play_context, 'ssh_common_args', ''),
getattr(self._play_context, 'ssh_extra_args', '')
) )
for term in shlex.split(s or '')
]
})
def _connect_docker(self): if config['mitogen_via']:
return self._wrap_connect(self._on_connection_error, { stack, seen_names = self._stack_from_config(
'method_name': 'docker', self._config_from_via(config['mitogen_via']),
'container': self._play_context.remote_addr, stack=stack,
'python_path': self.python_path, seen_names=seen_names + (config['inventory_name'],)
'connect_timeout': self._play_context.timeout, )
})
def _connect_sudo(self, via=None, python_path=None): stack += (CONNECTION_METHOD[config['transport']](config),)
""" if config['become']:
Fetch a reference to a sudo Context matching the play context from stack += (CONNECTION_METHOD[config['become_method']](config),)
ContextService in the master process.
:param via: return stack, seen_names
Parent Context of the sudo Context. For Ansible, this should always
be a Context returned by _connect_ssh().
"""
return self._wrap_connect(self._on_become_error, {
'method_name': 'sudo',
'username': self._play_context.become_user,
'password': self._play_context.become_pass,
'python_path': python_path or self.python_path,
'sudo_path': self.sudo_path,
'connect_timeout': self._play_context.timeout,
'via': via,
'sudo_args': [
term
for s in (
self._play_context.sudo_flags,
self._play_context.become_flags
)
for term in shlex.split(s or '')
],
})
def _connect(self): def _connect(self):
""" """
@ -263,24 +321,30 @@ class Connection(ansible.plugins.connection.ConnectionBase):
broker=self.broker, broker=self.broker,
) )
if self.original_transport == 'local': stack, _ = self._stack_from_config(
if self._play_context.become: config_from_play_context(
self.context, self._homedir = self._connect_sudo( transport=self.transport,
python_path=sys.executable inventory_name=self.inventory_hostname,
connection=self
)
) )
else:
self.context, self._homedir = self._connect_local()
return
if self.original_transport == 'docker': dct = mitogen.service.call(
self.host, self._homedir = self._connect_docker() context=self.parent,
elif self.original_transport == 'ssh': handle=ansible_mitogen.services.ContextService.handle,
self.host, self._homedir = self._connect_ssh() method='get',
kwargs=mitogen.utils.cast({
'stack': stack,
})
)
if dct['msg']:
if dct['method_name'] in self.become_methods:
raise ansible.errors.AnsibleModuleError(dct['msg'])
raise ansible.errors.AnsibleConnectionFailure(dct['msg'])
if self._play_context.become: self.context = dct['context']
self.context, self._homedir = self._connect_sudo(via=self.host) self._homedir = dct['home_dir']
else:
self.context = self.host
def get_context_name(self): def get_context_name(self):
""" """
@ -296,18 +360,16 @@ class Connection(ansible.plugins.connection.ConnectionBase):
gracefully shut down, and wait for shutdown to complete. Safe to call gracefully shut down, and wait for shutdown to complete. Safe to call
multiple times. multiple times.
""" """
for context in set([self.host, self.context]): if self.context:
if context:
mitogen.service.call( mitogen.service.call(
context=self.parent, context=self.parent,
handle=ansible_mitogen.services.ContextService.handle, handle=ansible_mitogen.services.ContextService.handle,
method='put', method='put',
kwargs={ kwargs={
'context': context 'context': self.context
} }
) )
self.host = None
self.context = None self.context = None
if self.broker and not new_task: if self.broker and not new_task:
self.broker.shutdown() self.broker.shutdown()
@ -420,3 +482,15 @@ class Connection(ansible.plugins.connection.ConnectionBase):
in_path=in_path, in_path=in_path,
out_path=out_path out_path=out_path
) )
class SshConnection(Connection):
transport = 'ssh'
class LocalConnection(Connection):
transport = 'local'
class DockerConnection(Connection):
transport = 'docker'

@ -0,0 +1,56 @@
# Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os.path
import sys
#
# This is not the real Connection implementation module, it simply exists as a
# proxy to the real module, which is loaded using Python's regular import
# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
# results in ansible_mitogen plugin modules being loaded twice: once by
# PluginLoader with a name like "ansible.plugins.connection.mitogen", which is
# stuffed into sys.modules even though attempting to import it will trigger an
# ImportError, and once under its canonical name, "ansible_mitogen.connection".
#
# Therefore we have a proxy module that imports it under the real name, and
# sets up the duff PluginLoader-imported module to just contain objects from
# the real module, so duplicate types don't exist in memory, and things like
# debuggers and isinstance() work predictably.
#
try:
import ansible_mitogen
except ImportError:
base_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
del base_dir
from ansible_mitogen.connection import DockerConnection as Connection
del os
del sys

@ -0,0 +1,56 @@
# Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os.path
import sys
#
# This is not the real Connection implementation module, it simply exists as a
# proxy to the real module, which is loaded using Python's regular import
# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
# results in ansible_mitogen plugin modules being loaded twice: once by
# PluginLoader with a name like "ansible.plugins.connection.mitogen", which is
# stuffed into sys.modules even though attempting to import it will trigger an
# ImportError, and once under its canonical name, "ansible_mitogen.connection".
#
# Therefore we have a proxy module that imports it under the real name, and
# sets up the duff PluginLoader-imported module to just contain objects from
# the real module, so duplicate types don't exist in memory, and things like
# debuggers and isinstance() work predictably.
#
try:
import ansible_mitogen
except ImportError:
base_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
del base_dir
from ansible_mitogen.connection import LocalConnection as Connection
del os
del sys

@ -51,6 +51,6 @@ except ImportError:
sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
del base_dir del base_dir
from ansible_mitogen.connection import Connection from ansible_mitogen.connection import SshConnection as Connection
del os del os
del sys del sys

@ -82,9 +82,9 @@ class ContextService(mitogen.service.Service):
#: Records the :meth:`get` result dict for successful calls, returned #: Records the :meth:`get` result dict for successful calls, returned
#: for identical subsequent calls. Keyed by :meth:`key_from_kwargs`. #: for identical subsequent calls. Keyed by :meth:`key_from_kwargs`.
self._response_by_key = {} self._response_by_key = {}
#: List of :class:`mitogen.core.Message` waiting for the result dict #: List of :class:`mitogen.core.Latch` awaiting the result for a
#: for a particular connection config. Keyed as sbove. #: particular key.
self._waiters_by_key = {} self._latches_by_key = {}
#: Mapping of :class:`mitogen.core.Context` -> reference count. Each #: Mapping of :class:`mitogen.core.Context` -> reference count. Each
#: call to :meth:`get` increases this by one. Calls to :meth:`put` #: call to :meth:`get` increases this by one. Calls to :meth:`put`
#: decrease it by one. #: decrease it by one.
@ -134,10 +134,10 @@ class ContextService(mitogen.service.Service):
""" """
self._lock.acquire() self._lock.acquire()
try: try:
waiters = self._waiters_by_key.pop(key) latches = self._latches_by_key.pop(key)
count = len(waiters) count = len(latches)
for msg in waiters: for latch in latches:
msg.reply(response) latch.put(response)
finally: finally:
self._lock.release() self._lock.release()
return count return count
@ -164,17 +164,12 @@ class ContextService(mitogen.service.Service):
finally: finally:
self._lock.release() self._lock.release()
def _update_lru(self, new_context, **kwargs): def _update_lru(self, new_context, spec, via):
""" """
Update the LRU ("MRU"?) list associated with the connection described Update the LRU ("MRU"?) list associated with the connection described
by `kwargs`, destroying the most recently created context if the list by `kwargs`, destroying the most recently created context if the list
is full. Finally add `new_context` to the list. is full. Finally add `new_context` to the list.
""" """
via = kwargs.get('via')
if via is None:
# We don't have a limit on the number of directly connections.
return
lru = self._lru_by_via.setdefault(via, []) lru = self._lru_by_via.setdefault(via, [])
if len(lru) < self.max_interpreters: if len(lru) < self.max_interpreters:
lru.append(new_context) lru.append(new_context)
@ -207,7 +202,7 @@ class ContextService(mitogen.service.Service):
""" """
# TODO: there is a race between creation of a context and disconnection # TODO: there is a race between creation of a context and disconnection
# of its related stream. An error reply should be sent to any message # of its related stream. An error reply should be sent to any message
# in _waiters_by_key below. # in _latches_by_key below.
self._lock.acquire() self._lock.acquire()
try: try:
for context, key in list(self._key_by_context.items()): for context, key in list(self._key_by_context.items()):
@ -215,14 +210,14 @@ class ContextService(mitogen.service.Service):
LOG.info('Dropping %r due to disconnect of %r', LOG.info('Dropping %r due to disconnect of %r',
context, stream) context, stream)
self._response_by_key.pop(key, None) self._response_by_key.pop(key, None)
self._waiters_by_key.pop(key, None) self._latches_by_key.pop(key, None)
self._refs_by_context.pop(context, None) self._refs_by_context.pop(context, None)
self._lru_by_via.pop(context, None) self._lru_by_via.pop(context, None)
self._refs_by_context.pop(context, None) self._refs_by_context.pop(context, None)
finally: finally:
self._lock.release() self._lock.release()
def _connect(self, key, method_name, **kwargs): def _connect(self, key, spec, via=None):
""" """
Actual connect implementation. Arranges for the Mitogen connection to Actual connect implementation. Arranges for the Mitogen connection to
be created and enqueues an asynchronous call to start the forked task be created and enqueues an asynchronous call to start the forked task
@ -230,11 +225,8 @@ class ContextService(mitogen.service.Service):
:param key: :param key:
Deduplication key representing the connection configuration. Deduplication key representing the connection configuration.
:param method_name: :param spec:
:class:`mitogen.parent.Router` method implementing the connection Connection specification.
type.
:param kwargs:
Keyword arguments passed to the router method.
:returns: :returns:
Dict like:: Dict like::
@ -248,21 +240,14 @@ class ContextService(mitogen.service.Service):
:data:`None`, or `msg` is :data:`None` and the remaining fields are :data:`None`, or `msg` is :data:`None` and the remaining fields are
set. set.
""" """
method = getattr(self.router, method_name, None)
if method is None:
raise Error('no such Router method: %s' % (method_name,))
try: try:
context = method(**kwargs) method = getattr(self.router, spec['method'])
except mitogen.core.StreamError as e: except AttributeError:
return { raise Error('unsupported method: %(transport)s' % spec)
'context': None,
'home_dir': None,
'msg': str(e),
}
if kwargs.get('via'): context = method(via=via, **spec['kwargs'])
self._update_lru(context, method_name=method_name, **kwargs) if via:
self._update_lru(context, spec, via)
else: else:
# For directly connected contexts, listen to the associated # For directly connected contexts, listen to the associated
# Stream's disconnect event and use it to invalidate dependent # Stream's disconnect event and use it to invalidate dependent
@ -289,60 +274,74 @@ class ContextService(mitogen.service.Service):
'msg': None, 'msg': None,
} }
@mitogen.service.expose(mitogen.service.AllowParents()) def _wait_or_start(self, spec, via=None):
@mitogen.service.arg_spec({ latch = mitogen.core.Latch()
'method_name': str key = self.key_from_kwargs(via=via, **spec)
})
def get(self, msg, **kwargs):
"""
Return a Context referring to an established connection with the given
configuration, establishing a new connection as necessary.
:param str method_name:
The :class:`mitogen.parent.Router` connection method to use.
:param dict kwargs:
Keyword arguments passed to `mitogen.master.Router.[method_name]()`.
:returns tuple:
Tuple of `(context, home_dir)`, where:
* `context` is the mitogen.master.Context referring to the
target context.
* `home_dir` is a cached copy of the remote directory.
"""
key = self.key_from_kwargs(**kwargs)
self._lock.acquire() self._lock.acquire()
try: try:
response = self._response_by_key.get(key) response = self._response_by_key.get(key)
if response is not None: if response is not None:
self._refs_by_context[response['context']] += 1 self._refs_by_context[response['context']] += 1
return response latch.put(response)
return latch
waiters = self._waiters_by_key.get(key) latches = self._latches_by_key.setdefault(key, [])
if waiters is not None: first = len(latches) == 0
waiters.append(msg) latches.append(latch)
return self.NO_REPLY
self._waiters_by_key[key] = [msg]
finally: finally:
self._lock.release() self._lock.release()
# I'm the first thread to wait, so I will create the connection. if first:
# I'm the first requestee, so I will create the connection.
try: try:
response = self._connect(key, **kwargs) response = self._connect(key, spec, via=via)
count = self._produce_response(key, response) count = self._produce_response(key, response)
if response['msg'] is None:
# Only record the response for non-error results. # Only record the response for non-error results.
self._response_by_key[key] = response self._response_by_key[key] = response
# Set the reference count to the number of waiters. # Set the reference count to the number of waiters.
self._refs_by_context[response['context']] += count self._refs_by_context[response['context']] += count
except mitogen.core.CallError:
e = sys.exc_info()[1]
self._produce_response(key, e)
except Exception: except Exception:
e = sys.exc_info()[1] self._produce_response(key, sys.exc_info())
self._produce_response(key, mitogen.core.CallError(e))
return latch
@mitogen.service.expose(mitogen.service.AllowParents())
@mitogen.service.arg_spec({
'stack': list
})
def get(self, msg, stack):
"""
Return a Context referring to an established connection with the given
configuration, establishing new connections as necessary.
:param list stack:
Connection descriptions. Each element is a dict containing 'method'
and 'kwargs' keys describing the Router method and arguments.
Subsequent elements are proxied via the previous.
:returns dict:
* context: mitogen.master.Context or None.
* homedir: Context's home directory or None.
* msg: StreamError exception text or None.
* method_name: string failing method name.
"""
via = None
for spec in stack:
try:
result = self._wait_or_start(spec, via=via).get()
if isinstance(result, tuple): # exc_info()
e1, e2, e3 = result
raise e1, e2, e3
via = result['context']
except mitogen.core.StreamError as e:
return {
'context': None,
'home_dir': None,
'method_name': spec['method'],
'msg': str(e),
}
return self.NO_REPLY return result
class FileService(mitogen.service.Service): class FileService(mitogen.service.Service):
@ -420,9 +419,9 @@ class FileService(mitogen.service.Service):
#: Time spent by the scheduler thread asleep when it has no more data to #: Time spent by the scheduler thread asleep when it has no more data to
#: pump, but while at least one transfer remains active. With #: pump, but while at least one transfer remains active. With
#: max_queue_size=1MiB and a sleep of 10ms, maximum throughput on any #: max_queue_size=1MiB and a sleep of 10ms, maximum throughput on any
#: single stream is 100MiB/sec, which is 5x what SSH can handle on my #: single stream is 112MiB/sec, which is >5x what SSH can handle on my
#: laptop. #: laptop.
sleep_delay_ms = 0.01 sleep_delay_secs = 0.01
def __init__(self, router): def __init__(self, router):
super(FileService, self).__init__(router) super(FileService, self).__init__(router)
@ -430,7 +429,7 @@ class FileService(mitogen.service.Service):
self._size_by_path = {} self._size_by_path = {}
#: Queue used to communicate from service to scheduler thread. #: Queue used to communicate from service to scheduler thread.
self._queue = mitogen.core.Latch() self._queue = mitogen.core.Latch()
#: Mapping of Stream->[(sender, fp)]. #: Mapping of Stream->[(Sender, file object)].
self._pending_by_stream = {} self._pending_by_stream = {}
self._thread = threading.Thread(target=self._scheduler_main) self._thread = threading.Thread(target=self._scheduler_main)
self._thread.start() self._thread.start()
@ -488,9 +487,9 @@ class FileService(mitogen.service.Service):
def _sleep_on_queue(self): def _sleep_on_queue(self):
""" """
Sleep indefinitely (no active transfers) or for :attr:`sleep_delay_ms` Sleep indefinitely (no active transfers) or for
(active transfers) waiting for a new transfer request to arrive from :attr:`sleep_delay_secs` (active transfers) waiting for a new transfer
the :meth:`fetch` method. request to arrive from the :meth:`fetch` method.
If a new request arrives, add it to the appropriate list in If a new request arrives, add it to the appropriate list in
:attr:`_pending_by_stream`. :attr:`_pending_by_stream`.
@ -500,8 +499,8 @@ class FileService(mitogen.service.Service):
:meth:`on_shutdown` hasn't been called yet, otherwise :meth:`on_shutdown` hasn't been called yet, otherwise
:data:`False`. :data:`False`.
""" """
if self._schedule_pending: if self._pending_by_stream:
timeout = self.sleep_delay_ms timeout = self.sleep_delay_secs
else: else:
timeout = None timeout = None
@ -523,7 +522,7 @@ class FileService(mitogen.service.Service):
""" """
Scheduler thread's main function. Sleep until Scheduler thread's main function. Sleep until
:meth:`_sleep_on_queue` indicates the queue has been shut down, :meth:`_sleep_on_queue` indicates the queue has been shut down,
pending pending file chunks each time we wake. pumping pending file chunks each time we wake.
""" """
while self._sleep_on_queue(): while self._sleep_on_queue():
for stream, pending in list(self._pending_by_stream.items()): for stream, pending in list(self._pending_by_stream.items()):

@ -69,8 +69,7 @@ def wrap_connection_loader__get(name, play_context, new_stdin, **kwargs):
an argument, so that it can emulate the original type. an argument, so that it can emulate the original type.
""" """
if name in ('ssh', 'local', 'docker'): if name in ('ssh', 'local', 'docker'):
kwargs['original_transport'] = name name = 'mitogen_' + name
name = 'mitogen'
return connection_loader__get(name, play_context, new_stdin, **kwargs) return connection_loader__get(name, play_context, new_stdin, **kwargs)

@ -1,64 +1,57 @@
Ansible Extension
=================
.. image:: images/ansible/cell_division.png .. image:: images/ansible/cell_division.png
:align: right :align: right
An extension to `Ansible`_ is included that implements host connections over Mitogen for Ansible
Mitogen, replacing embedded shell invocations with pure-Python equivalents ===================
invoked via highly efficient remote procedure calls tunnelled over SSH. No
changes are required to the target hosts.
The extension is approaching a generally dependable state, and works well for
many real-world playbooks. `Bug reports`_ in this area are very welcome
Ansible is a huge beast, and only significant testing will prove the
extension's soundness.
Divergence from Ansible's normal behaviour is considered a bug, so please An extension to `Ansible`_ is included that implements connections over
report anything you notice, regardless of how inconsequential it may seem. Mitogen, replacing embedded shell invocations with pure-Python equivalents
invoked via highly efficient remote procedure calls to persistent interpreters
tunnelled over SSH. No changes are required to target hosts.
The extension is approaching stability and real-world testing is now
encouraged. `Bug reports`_ are welcome: Ansible is huge, and only wide testing
will ensure soundness.
.. _Ansible: https://www.ansible.com/ .. _Ansible: https://www.ansible.com/
.. _Bug reports: https://goo.gl/yLKZiJ .. _Bug reports: https://goo.gl/yLKZiJ
Overview Overview
-------- --------
You should **expect a 1.25x - 7x speedup** and a **CPU usage reduction of at **Expect a 1.25x - 7x speedup** and a **CPU usage reduction of at least 2x**,
least 2x**, depending on network conditions, the specific modules executed, and depending on network conditions, modules executed, and time already spent by
time spent by the target host already doing useful work. Mitogen cannot speed targets on useful work. Mitogen cannot improve a module once it is executing,
up a module once it is executing, it can only ensure the module executes as it can only ensure the module executes as quickly as possible.
quickly as possible.
* **One connection is used per target**, in addition to one sudo invocation per
* **A single SSH connection is used for each target host**, in addition to one user account. This is much better than SSH multiplexing combined with
sudo invocation per distinct user account. Subsequent playbook steps always pipelining, as significant state can be maintained in RAM between steps, and
reuse the same connection. This is much better than SSH multiplexing combined system logs aren't spammed with repeat authentication events.
with pipelining, as significant state can be maintained in RAM between steps,
and the system logs aren't filled with spam from repeat SSH and sudo * **A single network roundtrip is used** to execute a step whose code already
invocations. exists in RAM on the target. Eliminating multiplexed SSH channel creation
saves 5 ms runtime per 1 ms of network latency for every playbook step.
* **A single Python interpreter is used** per host and sudo account combination
for the duration of the run, avoiding the repeat cost of invoking multiple * **Processes are aggressively reused**, avoiding the cost of invoking Python
interpreters and recompiling imports, saving 300-800 ms for every playbook and recompiling imports, saving 300-800 ms for every playbook step.
step.
* Code is ephemerally cached in RAM, **reducing bandwidth usage by an order
* Remote interpreters reuse Mitogen's module import mechanism, caching uploaded of magnitude** compared to SSH pipelining, with around 5x fewer frames
dependencies between steps at the host and user account level. As a traversing the network in a typical run.
consequence, **bandwidth usage is consistently an order of magnitude lower**
compared to SSH pipelining, and around 5x fewer frames are required to * **No writes to the target's filesystem occur**, unless explicitly triggered
traverse the wire for a run to complete successfully. by a playbook step. In all typical configurations, Ansible repeatedly
rewrites and extracts ZIP files to multiple temporary directories on the
* **No writes to the target host's filesystem occur**, unless explicitly target. Since no temporary files are used, security issues relating to those
triggered by a playbook step. In all typical configurations, Ansible files in cross-account scenarios are entirely avoided.
repeatedly rewrites and extracts ZIP files to multiple temporary directories
on the target host. Since no temporary files are used, security issues
relating to those files in cross-account scenarios are entirely avoided.
Demo Demo
---- ~~~~
This demonstrates Ansible running a subset of the Mitogen integration tests This demonstrates Ansible running a subset of the Mitogen integration tests
concurrent to an equivalent run using the extension. concurrent to an equivalent run using the extension.
@ -71,7 +64,7 @@ concurrent to an equivalent run using the extension.
Testimonials Testimonials
------------ ~~~~~~~~~~~~
* "With mitogen **my playbook runtime went from 45 minutes to just under 3 * "With mitogen **my playbook runtime went from 45 minutes to just under 3
minutes**. Awesome work!" minutes**. Awesome work!"
@ -96,14 +89,11 @@ Testimonials
Installation Installation
------------ ------------
.. caution:: 1. Thoroughly review the documented behavioural differences.
2. Verify Ansible 2.4/2.5 and Python 2.7 are listed in ``ansible --version``
Please review the behavioural differences documented below prior to use. output.
3. Download and extract https://github.com/dw/mitogen/archive/master.zip
1. Verify Ansible 2.4 and Python 2.7 are listed in the output of ``ansible 4. Modify ``ansible.cfg``:
--version``
2. Download and extract https://github.com/dw/mitogen/archive/master.zip
3. Modify ``ansible.cfg``:
.. code-block:: dosini .. code-block:: dosini
@ -111,70 +101,257 @@ Installation
strategy_plugins = /path/to/mitogen-master/ansible_mitogen/plugins/strategy strategy_plugins = /path/to/mitogen-master/ansible_mitogen/plugins/strategy
strategy = mitogen_linear strategy = mitogen_linear
The ``strategy`` key is optional. If omitted, you can set the The ``strategy`` key is optional. If omitted, the
``ANSIBLE_STRATEGY=mitogen_linear`` environment variable on a per-run basis. ``ANSIBLE_STRATEGY=mitogen_linear`` environment variable can be set on a
Like ``mitogen_linear``, the ``mitogen_free`` strategy also exists to mimic per-run basis. Like ``mitogen_linear``, the ``mitogen_free`` strategy exists
the built-in ``free`` strategy. to mimic the ``free`` strategy.
Noteworthy Differences
----------------------
* Ansible 2.4 and 2.5 are supported. File bugs to register interest in older
releases.
4. Cross your fingers and try it. * The ``sudo`` become method is available and ``su`` is planned. File bugs to
register interest in additional methods.
* The ``ssh``, ``local`` and ``docker`` connection types are available, with
more planned. File bugs to register interest.
Limitations * Local commands execute in a reuseable interpreter created identically to
----------- interpreters on targets. Presently one interpreter per ``become_user``
exists, and so only one local action may execute simultaneously.
* Only Ansible 2.4 is being used for development, with occasional tests under Ansible usually permits up to ``forks`` simultaneous local actions. Any
2.5, 2.3 and 2.2. It should be more than possible to fully support at least long-running local actions that execute for every target will experience
2.3, if not also 2.2. artificial serialization, causing slowdown equivalent to `task_duration *
num_targets`. This will be fixed soon.
* Only the ``sudo`` become method is available, however adding new methods is * Asynchronous jobs presently exist only for the duration of a run, and time
straightforward, and eventually at least ``su`` will be included. limits are not implemented.
* The extension's performance benefits do not scale perfectly linearly with the * Due to use of :func:`select.select` the IO multiplexer breaks down around 100
number of targets. This is a subject of ongoing investigation and targets, expect performance degradation as this number is approached and
improvements will appear in time. errant behaviour as it is exceeded. A replacement will appear soon.
* "Module Replacer" style modules are not yet supported. These rarely appear in * The undocumented ability to extend :mod:`ansible.module_utils` by supplying a
practice, and light Github code searches failed to reveal many examples of ``module_utils`` directory alongside a custom new-style module is not yet
them. supported.
* "Module Replacer" style modules are not supported. These rarely appear in
practice, and light web searches failed to reveal many examples of them.
Behavioural Differences * Ansible permits up to ``forks`` connections to be setup in parallel, whereas
----------------------- in Mitogen this is handled by a fixed-size thread pool. Up to 16 connections
may be established in parallel by default, this can be modified by setting
the ``MITOGEN_POOL_SIZE`` environment variable.
* Ansible permits up to ``forks`` SSH connections to be setup simultaneously, * Performance does not scale perfectly linearly with target count. This will
whereas in Mitogen this is handled by a thread pool. Eventually this pool improve over time.
will become per-CPU, but meanwhile, a maximum of 16 SSH connections may be
established simultaneously by default. This can be increased or decreased
setting the ``MITOGEN_POOL_SIZE`` environment variable.
* Mitogen treats connection timeouts for the SSH and become steps of a task * Timeouts normally apply to the combined runtime of the SSH and become steps
invocation separately, meaning that in some circumstances the configured of a task. As Mitogen treats SSH and sudo distincly, during a failure the
timeout may appear to be doubled. This is since Mitogen internally treats the effective timeout may appear to double.
creation of an SSH account context separately to the creation of a sudo
account context proxied via that SSH account.
A future revision may detect a sudo account context created immediately
following its parent SSH account, and try to emulate Ansible's existing
timeout semantics.
* Local commands are executed in a reuseable Python interpreter created New Features & Notes
identically to interpreters used on remote hosts. At present only one such --------------------
interpreter per ``become_user`` exists, and so only one local action may be
executed simultaneously per local user account.
Connection Delegation
~~~~~~~~~~~~~~~~~~~~~
.. image:: images/jumpbox.png
:align: right
Included is a preview of **Connection Delegation**, a Mitogen-specific
implementation of `stackable connection plug-ins`_. This enables multi-hop
connections via a bastion, or Docker connections delegated via their host
machine, where reaching the host may itself entail recursive delegation.
.. _Stackable connection plug-ins: https://github.com/ansible/proposals/issues/25
Unlike with SSH forwarding Ansible has complete visibility of the final
topology, declarative configuration via static/dynamic inventory is possible,
and data can be cached and re-served, and code executed on every intermediary.
For example when targeting Docker containers on a remote machine, each module
need only be uploaded once for the first task and container that requires it,
then cached and served from the SSH account for every future task in any
container.
.. raw:: html
<div style="clear: both;"></div>
.. caution::
Connection delegation is a work in progress, bug reports are welcome.
Ansible usually permits up to ``ansible.cfg:forks`` simultaneous local * While imports are cached on intermediaries, module scripts are needlessly
actions. Any long-running local actions that execute for every target will reuploaded for each target. Fixing this is equivalent to implementing
experience artificial serialization, causing slowdown equivalent to **Topology-Aware File Synchronization**, so it may remain unfixed until
`task_duration * num_targets`. This will be fixed soon. that feature is started.
* Delegated connection setup is single-threaded; only one connection can be
constructed in parallel per intermediary.
* Unbounded queue RAM growth may occur in an intermediary during large file
transfers if the link between any two hops is slower than the link
between the controller and the first hop.
* Inferring the configuration of intermediaries may be buggy, manifesting
as duplicate connections between hops, due to not perfectly replicating
the configuration Ansible would normally use for the intermediary.
* The extension does not understand the difference between a delegated
connection and a ``become_user``. If interpreter recycling kicks in, a
delegated connection could be prematurely recycled.
To enable connection delegation, set ``mitogen_via=<inventory name>`` on the
command line, or as host and group variables.
.. code-block:: ini
# Docker container on web1.dc1 is reachable via web1.dc1.
[app-containers.web1.dc1]
app1.web1.dc1 ansible_host=app1 ansible_connection=docker mitogen_via=web1.dc1
# Web servers in DC1 are reachable via bastion.dc1
[dc1]
web1.dc1
web2.dc1
web3.dc1
[dc1:vars]
mitogen_via = bastion.dc1
# Web servers in DC2 are reachable via bastion.dc2
[dc2]
web1.dc2
web2.dc2
web3.dc2
[dc2:vars]
mitogen_via = bastion.dc2
# Prod bastions are reachable via a magic account on a
# corporate network gateway.
[bastions]
bastion.dc1 mitogen_via=prod-ssh-access@corp-gateway.internal
bastion.dc2 mitogen_via=prod-ssh-access@corp-gateway.internal
[corp-gateway]
corp-gateway.internal
File Transfer
~~~~~~~~~~~~~
Normally a tool like ``scp`` is used to copy a file with the ``copy`` or
``template`` actions, or when uploading modules with pipelining disabled. With
Mitogen copies are implemented natively using the same interpreters, connection
tree, and routed message bus that carries RPCs.
This permits streaming directly between endpoints regardless of execution
environment, without necessitating temporary copies in intermediary accounts or
machines, for example when ``become`` is active, or in the presence of
connection delegation. It also neatly avoids the problem of securely sharing
temporary files between accounts and machines.
One roundtrip is required to initiate a transfer. For any tool that operates
via SSH multiplexing, 5 are required to configure the associated IO channel, in
addition to the time needed to start the local and remote processes. A complete
localhost invocation of ``scp`` requires around 15 ms.
As the implementation is self-contained, it is simple to make future
improvements like prioritizing transfers, supporting resume, or displaying
progress bars.
Interpreter Reuse
~~~~~~~~~~~~~~~~~
Python interpreters are aggressively reused to execute modules. While this
works well, it violates an unwritten assumption, and so it is possible an
earlier module execution could cause a subsequent module to fail, or for
unrelated modules to interact poorly due to bad hygiene, such as
monkey-patching that becomes stacked over repeat invocations.
Before reporting a bug relating to a misbehaving module, please re-run with
``-e mitogen_task_isolation=fork`` to see if the problem abates. This may be
set per-task, paying attention to the possibility an earlier task may be the
true cause of a failure.
.. code-block:: yaml
- name: My task.
broken_module:
some_option: true
vars:
mitogen_task_isolation: fork
* Asynchronous jobs exist only for the duration of a run, and cannot be If forking solves your problem, **please report a bug regardless**, as an
queried by subsequent ansible-playbook invocations. Since the ability to internal list can be updated to prevent others bumping into the same problem.
query job IDs across runs relied on an implementation detail, it is not
expected this will break any real-world playbooks.
Interpreter Recycling
~~~~~~~~~~~~~~~~~~~~~
There is a per-target limit on the number of interpreters. Once 20 exist, the
youngest is terminated before starting any new interpreter, preventing
situations like below from triggering memory exhaustion.
.. code-block:: yaml
- hosts: corp_boxes
vars:
user_directory: [
# 10,000 corporate user accounts
]
tasks:
- name: Create user bashrc
become: true
vars:
ansible_become_user: "{{item}}"
copy:
src: bashrc
dest: "~{{item}}/.bashrc"
with_items: "{{user_directory}}"
The youngest is chosen to preserve useful accounts like ``root`` and
``postgresql`` that often appear early in a run, however it is simple to
construct a playbook that defeats this strategy. A future version will key
interpreters on the identity of their creating task, avoiding useful account
recycling in every scenario.
To modify the limit, set the ``MITOGEN_MAX_INTERPRETERS`` environment variable.
Standard IO
~~~~~~~~~~~
Ansible uses pseudo TTYs for most invocations to allow it to type interactive
passwords, however pseudo TTYs are disabled where standard input is required or
``sudo`` is not in use. Additionally when SSH multiplexing is enabled, a string
like ``Shared connection to localhost closed\r\n`` appears in ``stderr`` of
every invocation.
Mitogen does not naturally require either of these, as command output is always
embedded within framed messages, and it can simply call :py:func:`pty.openpty`
in any location an interactive password must be typed.
A major downside to Ansible's behaviour is that ``stdout`` and ``stderr`` are
merged together into a single ``stdout`` variable, with carriage returns
inserted in the output by the TTY layer. However ugly, the extension emulates
this precisely, to avoid breaking playbooks that expect text to appear in
specific variables with a particular linefeed style.
How Modules Execute How Modules Execute
------------------- ~~~~~~~~~~~~~~~~~~~
Ansible usually modifies, recompresses and reuploads modules every time they Ansible usually modifies, recompresses and reuploads modules every time they
run on a target, work that must be repeated by the controller for every run on a target, work that must be repeated by the controller for every
@ -218,47 +395,51 @@ cached in RAM for the remainder of the run.
key2=repr(value2)[ ..]] "``. key2=repr(value2)[ ..]] "``.
Sample Profiles Runtime Patches
--------------- ~~~~~~~~~~~~~~~
Local VM connection
~~~~~~~~~~~~~~~~~~~
This demonstrates Mitogen vs. connection pipelining to a local VM, executing Three small runtime patches are employed in ``strategy.py`` to hook into
the 100 simple repeated steps of ``run_hostname_100_times.yml`` from the desirable locations, in order to override uses of shell, the module executor,
examples directory. Mitogen requires **43x less bandwidth and 4.25x less and the mechanism for selecting a connection plug-in. While it is hoped the
time**. patches can be avoided in future, for interesting versions of Ansible deployed
today this simply is not possible, and so they continue to be required.
.. image:: images/ansible/run_hostname_100_times.png The patches are concise and behave conservatively, including by disabling
themselves when non-Mitogen connections are in use. Additional third party
plug-ins are unlikely to attempt similar patches, so the risk to an established
configuration should be minimal.
Kathmandu to Paris Flag Emulation
~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~
This is a full Django application playbook over a ~180ms link between Kathmandu Mitogen re-parses ``sudo_flags``, ``become_flags``, and ``ssh_flags`` using
and Paris. Aside from large pauses where the host performs useful work, the option parsers extracted from `sudo(1)` and `ssh(1)` in order to emulate their
high latency of this link means Mitogen only manages a 1.7x speedup. equivalent semantics. This allows:
Many early roundtrips are due to inefficiencies in Mitogen's importer that will * robust support for common ``ansible.cfg`` tricks without reconfiguration,
be fixed over time, however the majority, comprising at least 10 seconds, are such as forwarding SSH agents across ``sudo`` invocations,
due to idling while the host's previous result and next command are in-flight * reporting on conflicting flag combinations,
on the network. * reporting on unsupported flag combinations,
* internally special-casing certain behaviour (like recursive agent forwarding)
without boring the user with the details,
* avoiding opening the extension up to untestable scenarios where users can
insert arbitrary garbage between Mitogen and the components it integrates
with,
* precise emulation by an alternative implementation, for example if Mitogen
grew support for Paramiko.
The initial extension lays groundwork for exciting structural changes to the
execution model: a future version will tackle latency head-on by delegating
some control flow to the target host, melding the performance and scalability
benefits of pull-based operation with the management simplicity of push-based
operation.
.. image:: images/ansible/costapp.png Supported Variables
-------------------
Matching Ansible's model, variables are treated on a per-task basis, causing
establishment of additional reuseable interpreters as necessary to match the
configuration of each task.
SSH Variables
-------------
Matching Ansible's existing model, these variables are treated on a per-task SSH
basis, causing establishment of additional reuseable interpreters as necessary ~~~
to match the configuration of each task.
This list will grow as more missing pieces are discovered. This list will grow as more missing pieces are discovered.
@ -272,8 +453,8 @@ This list will grow as more missing pieces are discovered.
* ``ssh_args``, ``ssh_common_args``, ``ssh_extra_args`` * ``ssh_args``, ``ssh_common_args``, ``ssh_extra_args``
Sudo Variables Sudo
-------------- ~~~~
* ``ansible_python_interpreter`` * ``ansible_python_interpreter``
* ``ansible_sudo_exe``, ``ansible_become_exe`` * ``ansible_sudo_exe``, ``ansible_become_exe``
@ -283,31 +464,23 @@ Sudo Variables
* ansible.cfg: ``timeout`` * ansible.cfg: ``timeout``
Docker Variables Docker
---------------- ~~~~~~
Note: Docker support is only intended for developer testing, it might disappear
entirely prior to a stable release.
* ansible_host Docker support has received relatively little testing, expect increased
probability of surprises for the time being.
* ``ansible_host``
Chat on IRC
-----------
Some users and developers hang out on the
`#mitogen <https://webchat.freenode.net/?channels=mitogen>`_ channel on the
FreeNode IRC network.
Debugging Debugging
--------- ---------
Normally with Ansible, diagnostics and use of the :py:mod:`logging` package Diagnostics and use of the :py:mod:`logging` package output on the target
output on the target machine are discarded. With Mitogen, all of this is machine are usually discarded. With Mitogen, all of this is captured and
captured and returned to the host machine, where it can be viewed as desired returned to the controller, where it can be viewed as desired with ``-vvv``.
with ``-vvv``. Basic high level logs are produced with ``-vvv``, with logging Basic high level logs are produced with ``-vvv``, with logging of all IO on the
of all IO on the controller with ``-vvvv`` or higher. controller with ``-vvvv`` or higher.
Although use of standard IO and the logging package on the target is forwarded Although use of standard IO and the logging package on the target is forwarded
to the controller, it is not possible to receive IO activity logs, as the to the controller, it is not possible to receive IO activity logs, as the
@ -320,129 +493,45 @@ When file-based logging is enabled, one file per context will be created on the
local machine and every target machine, as ``/tmp/mitogen.<pid>.log``. local machine and every target machine, as ``/tmp/mitogen.<pid>.log``.
Implementation Notes Getting Help
-------------------- ~~~~~~~~~~~~
Some users and developers hang out on the
Interpreter Reuse `#mitogen <https://webchat.freenode.net/?channels=mitogen>`_ channel on the
~~~~~~~~~~~~~~~~~ FreeNode IRC network.
The extension aggressively reuses the single target Python interpreter to
execute every module. While this generally works well, it violates an unwritten
assumption regarding Ansible modules, and so it is possible a buggy module
could cause a run to fail, or for unrelated modules to interact with each other
due to bad hygiene.
Before reporting a bug relating to a module behaving incorrectly, please re-run
your playbook with ``-e mitogen_task_isolation=fork`` to see if the problem
abates. This may also be set on a per-task basis:
::
- name: My task.
broken_module:
some_option: true
vars:
mitogen_task_isolation: fork
If forking fixes your problem, **please report a bug regardless**, as an
internal list can be updated to prevent users bumping into the same problem in
future.
Interpreter Recycling
~~~~~~~~~~~~~~~~~~~~~
The extension limits the number of persistent interpreters in use. When the
limit is reached, the youngest interpreter is terminated before starting a new
interpreter, preventing situations like below from triggering memory
exhaustion.
.. code-block:: yaml
- hosts: corp_boxes
vars:
user_directory: [
# 10,000 corporate user accounts
]
tasks:
- name: Create user bashrc
become: true
vars:
ansible_become_user: "{{item}}"
copy:
src: bashrc
dest: "~{{item}}/.bashrc"
with_items: "{{user_directory}}"
This recycling does not occur for direct connections from the controller, and
it is keyed on a per-target basis, i.e. up to 20 interpreters may exist for
each directly connected target.
The youngest interpreter is chosen to preserve useful accounts, like "root" or
"postgresql" that tend to appear early in a run, however it is simple to
construct a playbook that defeats this strategy. A future version will key
interpreters on the identity of their creating task, file and/or playbook,
avoiding useful account recycling in every scenario.
To raise or lower the limit from 20, set the ``MITOGEN_MAX_INTERPRETERS``
environment variable to a new value.
Runtime Patches
~~~~~~~~~~~~~~~
Three small runtime patches are employed in ``strategy.py`` to hook into
desirable locations, in order to override uses of shell, the module executor,
and the mechanism for selecting a connection plug-in. While it is hoped the
patches can be avoided in future, for interesting versions of Ansible deployed
today this simply is not possible, and so they continue to be required.
The patches are concise and behave conservatively, including by disabling
themselves when non-Mitogen connections are in use. Additional third party
plug-ins are unlikely to attempt similar patches, so the risk to an established
configuration should be minimal.
Sample Profiles
---------------
Standard IO Local VM connection
~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~
Ansible uses pseudo TTYs for most invocations, to allow it to handle typing This demonstrates Mitogen vs. connection pipelining to a local VM, executing
passwords interactively, however it disables pseudo TTYs for certain commands the 100 simple repeated steps of ``run_hostname_100_times.yml`` from the
where standard input is required or ``sudo`` is not in use. Additionally when examples directory. Mitogen requires **43x less bandwidth and 4.25x less
SSH multiplexing is enabled, a string like ``Shared connection to localhost time**.
closed\r\n`` appears in ``stderr`` of every invocation.
Mitogen does not naturally require either of these, as command output is .. image:: images/ansible/run_hostname_100_times.png
embedded within the SSH stream, and it can simply call :py:func:`pty.openpty`
in every location an interactive password must be typed.
A major downside to Ansible's behaviour is that ``stdout`` and ``stderr`` are
merged together into a single ``stdout`` variable, with carriage returns
inserted in the output by the TTY layer. However ugly, the extension emulates
all of this behaviour precisely, to avoid breaking playbooks that expect
certain text to appear in certain variables with certain linefeed characters.
See `Ansible#14377`_ for related discussion. Kathmandu to Paris
~~~~~~~~~~~~~~~~~~
.. _Ansible#14377: https://github.com/ansible/ansible/issues/14377 This is a full Django application playbook over a ~180ms link between Kathmandu
and Paris. Aside from large pauses where the host performs useful work, the
high latency of this link means Mitogen only manages a 1.7x speedup.
Many early roundtrips are due to inefficiencies in Mitogen's importer that will
be fixed over time, however the majority, comprising at least 10 seconds, are
due to idling while the host's previous result and next command are in-flight
on the network.
Flag Emulation The initial extension lays groundwork for exciting structural changes to the
~~~~~~~~~~~~~~ execution model: a future version will tackle latency head-on by delegating
some control flow to the target host, melding the performance and scalability
benefits of pull-based operation with the management simplicity of push-based
operation.
Mitogen re-parses ``sudo_flags``, ``become_flags``, and ``ssh_flags`` using .. image:: images/ansible/costapp.png
option parsers extracted from `sudo(1)` and `ssh(1)` in order to emulate their
equivalent semantics. This allows:
* robust support for common ``ansible.cfg`` tricks without reconfiguration,
such as forwarding SSH agents across ``sudo`` invocations,
* reporting on conflicting flag combinations,
* reporting on unsupported flag combinations,
* internally special-casing certain behaviour (like recursive agent forwarding)
without boring the user with the details,
* avoiding opening the extension up to untestable scenarios where users can
insert arbitrary garbage between Mitogen and the components it integrates
with,
* precise emulation by an alternative implementation, for example if Mitogen
grew support for Paramiko.

@ -0,0 +1,6 @@
Change Log
----------
.. literalinclude:: ../ChangeLog
:language: none

@ -9,6 +9,7 @@ import zlib
import mitogen.fakessh import mitogen.fakessh
import mitogen.master import mitogen.master
import mitogen.parent import mitogen.parent
import mitogen.service
import mitogen.ssh import mitogen.ssh
import mitogen.sudo import mitogen.sudo
@ -35,6 +36,7 @@ print(
for mod in ( for mod in (
mitogen.master, mitogen.master,
mitogen.parent, mitogen.parent,
mitogen.service,
mitogen.ssh, mitogen.ssh,
mitogen.sudo, mitogen.sudo,
mitogen.fakessh, mitogen.fakessh,

@ -1,6 +1,6 @@
- name: bench/file_transfer.yml - name: bench/file_transfer.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1 +1,16 @@
[test-targets]
localhost localhost
[connection-delegation-test]
cd-bastion
cd-rack11 mitogen_via=ssh-user@cd-bastion
cd-rack11a mitogen_via=root@cd-rack11
cd-rack11a-docker mitogen_via=docker-admin@cd-rack11a ansible_connection=docker
[connection-delegation-cycle]
# Create cycle with Docker container.
cdc-bastion mitogen_via=cdc-rack11a-docker
cdc-rack11 mitogen_via=ssh-user@cdc-bastion
cdc-rack11a mitogen_via=root@cdc-rack11
cdc-rack11a-docker mitogen_via=docker-admin@cdc-rack11a ansible_connection=docker

@ -1,7 +1,7 @@
# Verify the behaviour of _low_level_execute_command(). # Verify the behaviour of _low_level_execute_command().
- name: integration/action__low_level_execute_command.yml - name: integration/action__low_level_execute_command.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,6 +1,6 @@
- name: integration/action/make_tmp_path.yml - name: integration/action/make_tmp_path.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
gather_facts: true gather_facts: true
tasks: tasks:

@ -1,6 +1,6 @@
- name: integration/action/remote_file_exists.yml - name: integration/action/remote_file_exists.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,6 +1,6 @@
- name: integration/action/transfer_data.yml - name: integration/action/transfer_data.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
- name: integration/async/result_binary_producing_json.yml - name: integration/async/result_binary_producing_json.yml
gather_facts: true gather_facts: true
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
- name: integration/async/result_binary_producing_junk.yml - name: integration/async/result_binary_producing_junk.yml
gather_facts: true gather_facts: true
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
- name: integration/async/result_shell_echo_hi.yml - name: integration/async/result_shell_echo_hi.yml
gather_facts: true gather_facts: true
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
# Verify 'async: <timeout>' functions as desired. # Verify 'async: <timeout>' functions as desired.
- name: integration/async/runner_job_timeout.yml - name: integration/async/runner_job_timeout.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -2,7 +2,7 @@
# fields. # fields.
- name: integration/async/runner_one_job.yml - name: integration/async/runner_one_job.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,6 +1,6 @@
- name: integration/async/runner_two_simultaneous_jobs.yml - name: integration/async/runner_two_simultaneous_jobs.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,5 +1,5 @@
- name: integration/become/sudo_flags_failure.yml - name: integration/become/sudo_flags_failure.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,5 +1,5 @@
- name: integration/become/sudo_nonexistent.yml - name: integration/become/sudo_nonexistent.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
# Verify passwordless sudo behaviour in various cases. # Verify passwordless sudo behaviour in various cases.
- name: integration/become/sudo_basic.yml - name: integration/become/sudo_basic.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
# Verify passwordful sudo behaviour # Verify passwordful sudo behaviour
- name: integration/become/sudo_password.yml - name: integration/become/sudo_password.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
# Verify requiretty support # Verify requiretty support
- name: integration/become/sudo_requiretty.yml - name: integration/become/sudo_requiretty.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,7 +1,7 @@
# Ensure 'local' connections are grabbed. # Ensure 'local' connections are grabbed.
- name: integration/connection_loader__local_blemished.yml - name: integration/connection_loader__local_blemished.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- determine_strategy: - determine_strategy:

@ -1,7 +1,7 @@
# Ensure paramiko connections aren't grabbed. # Ensure paramiko connections aren't grabbed.
- name: integration/connection_loader__paramiko_unblemished.yml - name: integration/connection_loader__paramiko_unblemished.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_python_detect_environment: - custom_python_detect_environment:

@ -1,7 +1,7 @@
# Ensure 'ssh' connections are grabbed. # Ensure 'ssh' connections are grabbed.
- name: integration/connection_loader__ssh_blemished.yml - name: integration/connection_loader__ssh_blemished.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- determine_strategy: - determine_strategy:

@ -1,7 +1,7 @@
# Verify a maximum number of contexts are possible on one machine. # Verify a maximum number of contexts are possible on one machine.
- name: integration/context_service/lru_one_target.yml - name: integration/context_service/lru_one_target.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
vars: vars:
max_interps: "{{lookup('env', 'MITOGEN_MAX_INTERPRETERS')}}" max_interps: "{{lookup('env', 'MITOGEN_MAX_INTERPRETERS')}}"

@ -2,7 +2,7 @@
# cleanup of dependent (via=) contexts. # cleanup of dependent (via=) contexts.
- name: integration/context_service/reconnection.yml - name: integration/context_service/reconnection.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -3,7 +3,7 @@
# #
- name: integration/playbook_semantics/become_flags.yml - name: integration/playbook_semantics/become_flags.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
@ -15,7 +15,7 @@
- assert: - assert:
that: "out.stdout == ''" that: "out.stdout == ''"
- hosts: all - hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
become_flags: -E become_flags: -E
tasks: tasks:

@ -1,5 +1,5 @@
- name: integration/playbook_semantics/delegate_to.yml - name: integration/playbook_semantics/delegate_to.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
# #

@ -1,7 +1,7 @@
# Ensure environment: is preserved during call. # Ensure environment: is preserved during call.
- name: integration/playbook_semantics/environment.yml - name: integration/playbook_semantics/environment.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- shell: echo $SOME_ENV - shell: echo $SOME_ENV

@ -2,7 +2,7 @@
# the correct context. # the correct context.
- name: integration/playbook_semantics/with_items.yml - name: integration/playbook_semantics/with_items.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,6 +1,6 @@
- name: integration/runner__builtin_command_module.yml - name: integration/runner__builtin_command_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
gather_facts: true gather_facts: true
tasks: tasks:

@ -1,5 +1,5 @@
- name: integration/runner__custom_bash_old_style_module.yml - name: integration/runner__custom_bash_old_style_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,5 +1,5 @@
- name: integration/runner__custom_bash_want_json_module.yml - name: integration/runner__custom_bash_want_json_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_bash_want_json_module: - custom_bash_want_json_module:

@ -1,5 +1,5 @@
- name: integration/runner__custom_binary_producing_json.yml - name: integration/runner__custom_binary_producing_json.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_binary_producing_json: - custom_binary_producing_json:

@ -1,5 +1,5 @@
- name: integration/runner__custom_binary_producing_junk.yml - name: integration/runner__custom_binary_producing_junk.yml
hosts: all hosts: test-targets
tasks: tasks:
- custom_binary_producing_junk: - custom_binary_producing_junk:
foo: true foo: true
@ -8,7 +8,7 @@
register: out register: out
- hosts: all - hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- debug: msg={{out}} - debug: msg={{out}}

@ -1,5 +1,5 @@
- name: integration/runner__custom_binary_single_null.yml - name: integration/runner__custom_binary_single_null.yml
hosts: all hosts: test-targets
tasks: tasks:
- custom_binary_single_null: - custom_binary_single_null:
foo: true foo: true
@ -7,7 +7,7 @@
ignore_errors: true ignore_errors: true
register: out register: out
- hosts: all - hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- assert: - assert:

@ -1,5 +1,5 @@
- name: integration/runner__custom_perl_json_args_module.yml - name: integration/runner__custom_perl_json_args_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_perl_json_args_module: - custom_perl_json_args_module:

@ -1,5 +1,5 @@
- name: integration/runner__custom_perl_want_json_module.yml - name: integration/runner__custom_perl_want_json_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_perl_want_json_module: - custom_perl_want_json_module:

@ -1,5 +1,5 @@
- name: integration/runner__custom_python_json_args_module.yml - name: integration/runner__custom_python_json_args_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_python_json_args_module: - custom_python_json_args_module:

@ -1,6 +1,6 @@
- name: integration/runner__custom_python_new_style_module.yml - name: integration/runner__custom_python_new_style_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_python_new_style_missing_interpreter: - custom_python_new_style_missing_interpreter:

@ -1,5 +1,5 @@
- name: integration/runner__custom_python_new_style_module.yml - name: integration/runner__custom_python_new_style_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_python_new_style_module: - custom_python_new_style_module:

@ -1,5 +1,5 @@
- name: integration/runner__custom_python_want_json_module.yml - name: integration/runner__custom_python_want_json_module.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:
- custom_python_want_json_module: - custom_python_want_json_module:

@ -1,5 +1,5 @@
- name: integration/runner/custom_script_interpreter.yml - name: integration/runner/custom_script_interpreter.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,6 +1,6 @@
- name: integration/runner/forking_behaviour.yml - name: integration/runner/forking_behaviour.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -4,7 +4,7 @@
# remotely. # remotely.
# #
- name: integration/runner__remote_tmp.yml - name: integration/runner__remote_tmp.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
gather_facts: true gather_facts: true
tasks: tasks:

@ -6,7 +6,7 @@
# WARNING: this creates non-privilged accounts with pre-set passwords! # WARNING: this creates non-privilged accounts with pre-set passwords!
# #
- hosts: all - hosts: test-targets
gather_facts: true gather_facts: true
become: true become: true
tasks: tasks:

@ -2,7 +2,7 @@
# does not conflict with operation. # does not conflict with operation.
- name: regression/issue_109__target_has_old_ansible_installed.yml - name: regression/issue_109__target_has_old_ansible_installed.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
gather_facts: true gather_facts: true
tasks: tasks:

@ -3,7 +3,7 @@
- name: regression/issue_113__duplicate_module_imports.yml - name: regression/issue_113__duplicate_module_imports.yml
any_errors_fatal: true any_errors_fatal: true
hosts: all hosts: test-targets
tasks: tasks:
- name: Get auth token - name: Get auth token

@ -1,7 +1,7 @@
# issue #118 repro: chmod +x not happening during script upload # issue #118 repro: chmod +x not happening during script upload
- name: regression/issue_118__script_not_marked_exec.yml - name: regression/issue_118__script_not_marked_exec.yml
hosts: all hosts: test-targets
become: True become: True
tasks: tasks:

@ -6,7 +6,7 @@
# #
- name: regression/issue_122__environment_difference.yml - name: regression/issue_122__environment_difference.yml
hosts: all hosts: test-targets
tasks: tasks:
- script: scripts/print_env.sh - script: scripts/print_env.sh

@ -3,7 +3,7 @@
# with_items should crash for other reasons (RAM, file descriptor count, ..) # with_items should crash for other reasons (RAM, file descriptor count, ..)
- name: regression/issue_140__thread_pileup.yml - name: regression/issue_140__thread_pileup.yml
hosts: all hosts: test-targets
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -4,7 +4,7 @@
# can test for. # can test for.
- name: regression/issue_152__local_action_wrong_interpreter.yml - name: regression/issue_152__local_action_wrong_interpreter.yml
hosts: all hosts: test-targets
connection: local connection: local
any_errors_fatal: true any_errors_fatal: true
tasks: tasks:

@ -1,6 +1,6 @@
- name: regression/issue_152__virtualenv_python_fails.yml - name: regression/issue_152__virtualenv_python_fails.yml
any_errors_fatal: true any_errors_fatal: true
hosts: all hosts: test-targets
tasks: tasks:
# Can't use pip module because you can't fricking just create a virtualenv, # Can't use pip module because you can't fricking just create a virtualenv,

@ -3,7 +3,7 @@
- name: regression/issue_154__module_state_leaks.yml - name: regression/issue_154__module_state_leaks.yml
any_errors_fatal: true any_errors_fatal: true
hosts: all hosts: test-targets
tasks: tasks:
- custom_python_leaky_class_vars: - custom_python_leaky_class_vars:

@ -1,6 +1,6 @@
- name: regression/issue_177__copy_module_failing.yml - name: regression/issue_177__copy_module_failing.yml
any_errors_fatal: true any_errors_fatal: true
hosts: all hosts: test-targets
tasks: tasks:
- copy: - copy:

Loading…
Cancel
Save