diff --git a/.gitignore b/.gitignore index 458cf82c..6092d04e 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ MANIFEST build/ dist/ +docs/_build/ htmlcov/ *.egg-info __pycache__/ diff --git a/.travis.yml b/.travis.yml index 4aed8d0a..5dfdae00 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,11 +17,21 @@ install: - pip install -r dev_requirements.txt script: -- ${TRAVIS_BUILD_DIR}/.travis/${MODE}_tests.sh +- | + if [ -f "${TRAVIS_BUILD_DIR}/.travis/${MODE}_tests.sh" ]; then + ${TRAVIS_BUILD_DIR}/.travis/${MODE}_tests.sh; + else + ${TRAVIS_BUILD_DIR}/.travis/${MODE}_tests.py; + fi + services: - docker + +# To avoid matrix explosion, just test against oldest->newest and +# newest->oldest in various configuartions. + matrix: include: # Mitogen tests. @@ -34,85 +44,32 @@ matrix: # 2.6 -> 2.7 - python: "2.6" env: MODE=mitogen DISTRO=centos7 - # 2.6 -> 2.6 - - python: "2.6" - env: MODE=mitogen DISTRO=centos6 - # 3.6 -> 2.7 + # 3.6 -> 2.6 - python: "3.6" - env: MODE=mitogen DISTRO=debian + env: MODE=mitogen DISTRO=centos6 # Debops tests. - # 2.4.3.0; 2.7 -> 2.7 - - python: "2.7" - env: MODE=debops_common VER=2.4.3.0 - # 2.5.5; 2.7 -> 2.7 + # 2.4.6.0; 2.7 -> 2.7 - python: "2.7" - env: MODE=debops_common VER=2.6.1 - # 2.5.5; 3.6 -> 2.7 + env: MODE=debops_common VER=2.4.6.0 + # 2.5.7; 3.6 -> 2.7 - python: "3.6" - env: MODE=debops_common VER=2.6.1 + env: MODE=debops_common VER=2.6.2 # ansible_mitogen tests. - # 2.4.3.0; Debian; 2.7 -> 2.7 - - python: "2.7" - env: MODE=ansible VER=2.4.3.0 DISTRO=debian - # 2.5.5; Debian; 2.7 -> 2.7 - - python: "2.7" - env: MODE=ansible VER=2.5.5 DISTRO=debian - # 2.6.0; Debian; 2.7 -> 2.7 - - python: "2.7" - env: MODE=ansible VER=2.6.0 DISTRO=debian - # 2.6.1; Debian; 2.7 -> 2.7 - - python: "2.7" - env: MODE=ansible VER=2.6.1 DISTRO=debian - # Centos 7 Python2 - # Latest + # 2.6 -> {debian, centos6, centos7} - python: "2.6" - env: MODE=ansible VER=2.6.1 DISTRO=centos7 - # Backward Compatiability - - python: "2.7" - env: MODE=ansible VER=2.5.5 DISTRO=centos7 - - python: "2.7" - env: MODE=ansible VER=2.6.0 DISTRO=centos7 - - python: "2.7" - env: MODE=ansible VER=2.6.1 DISTRO=centos7 - - # Centos 7 Python3 - - python: "3.6" - env: MODE=ansible VER=2.5.5 DISTRO=centos7 - - python: "3.6" - env: MODE=ansible VER=2.6.0 DISTRO=centos7 - - python: "3.6" - env: MODE=ansible VER=2.6.1 DISTRO=centos7 - - - # Centos 6 Python2 - # Latest - - python: "2.6" - env: MODE=ansible VER=2.6.1 DISTRO=centos6 - # Backward Compatiability + env: MODE=ansible VER=2.4.6.0 - python: "2.6" - env: MODE=ansible VER=2.5.5 DISTRO=centos6 - - python: "2.6" - env: MODE=ansible VER=2.6.0 DISTRO=centos6 - - python: "2.7" - env: MODE=ansible VER=2.6.1 DISTRO=centos6 + env: MODE=ansible VER=2.6.2 - # Centos 6 Python3 + # 3.6 -> {debian, centos6, centos7} - python: "3.6" - env: MODE=ansible VER=2.5.5 DISTRO=centos6 + env: MODE=ansible VER=2.4.6.0 - python: "3.6" - env: MODE=ansible VER=2.6.0 DISTRO=centos6 - - python: "3.6" - env: MODE=ansible VER=2.6.1 DISTRO=centos6 + env: MODE=ansible VER=2.6.2 - # Sanity check our tests against vanilla Ansible, they should pass. - - python: "2.7" - env: MODE=ansible VER=2.5.5 DISTRO=debian STRATEGY=linear + # Sanity check against vanilla Ansible. One job suffices. - python: "2.7" - env: MODE=ansible VER=2.6.0 DISTRO=debian STRATEGY=linear - - python: "2.7" - env: MODE=ansible VER=2.6.1 DISTRO=debian STRATEGY=linear - - + env: MODE=ansible VER=2.6.2 DISTROS=debian STRATEGY=linear diff --git a/.travis/ansible_tests.py b/.travis/ansible_tests.py new file mode 100755 index 00000000..3b5e40db --- /dev/null +++ b/.travis/ansible_tests.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python +# Run tests/ansible/all.yml under Ansible and Ansible-Mitogen + +import os +import sys + +import ci_lib +from ci_lib import run + + +BASE_PORT = 2201 +TESTS_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/ansible') +HOSTS_DIR = os.path.join(ci_lib.TMP, 'hosts') + + +with ci_lib.Fold('docker_setup'): + for i, distro in enumerate(ci_lib.DISTROS): + try: + run("docker rm -f target-%s", distro) + except: pass + + run(""" + docker run + --rm + --detach + --publish 0.0.0.0:%s:22/tcp + --hostname=target-%s + --name=target-%s + mitogen/%s-test + """, BASE_PORT + i, distro, distro, distro) + + +with ci_lib.Fold('job_setup'): + os.chdir(TESTS_DIR) + os.chmod('../data/docker/mitogen__has_sudo_pubkey.key', int('0600', 7)) + + # Don't set -U as that will upgrade Paramiko to a non-2.6 compatible version. + run("pip install -q ansible==%s", ci_lib.ANSIBLE_VERSION) + + run("mkdir %s", HOSTS_DIR) + run("ln -s %s/hosts/common-hosts %s", TESTS_DIR, HOSTS_DIR) + + with open(os.path.join(HOSTS_DIR, 'target'), 'w') as fp: + fp.write('[test-targets]\n') + for i, distro in enumerate(ci_lib.DISTROS): + fp.write("target-%s " + "ansible_host=%s " + "ansible_port=%s " + "ansible_user=mitogen__has_sudo_nopw " + "ansible_password=has_sudo_nopw_password" + "\n" % ( + distro, + ci_lib.DOCKER_HOSTNAME, + BASE_PORT + i, + )) + + # Build the binaries. + # run("make -C %s", TESTS_DIR) + if not ci_lib.exists_in_path('sshpass'): + run("sudo apt-get update") + run("sudo apt-get install -y sshpass") + + +with ci_lib.Fold('ansible'): + run('/usr/bin/time ./run_ansible_playbook.sh all.yml -i "%s" %s', + HOSTS_DIR, ' '.join(sys.argv[1:])) diff --git a/.travis/ansible_tests.sh b/.travis/ansible_tests.sh deleted file mode 100755 index a61ed836..00000000 --- a/.travis/ansible_tests.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -ex -# Run tests/ansible/all.yml under Ansible and Ansible-Mitogen - -TRAVIS_BUILD_DIR="${TRAVIS_BUILD_DIR:-`pwd`}" -TMPDIR="/tmp/ansible-tests-$$" -ANSIBLE_VERSION="${VER:-2.6.1}" -export ANSIBLE_STRATEGY="${STRATEGY:-mitogen_linear}" -DISTRO="${DISTRO:-debian}" - -export PYTHONPATH="${PYTHONPATH}:${TRAVIS_BUILD_DIR}" - -# SSH passes these through to the container when run interactively, causing -# stdout to get messed up with libc warnings. -unset LANG LC_ALL - -function on_exit() -{ - rm -rf "$TMPDIR" - docker kill target || true -} - -trap on_exit EXIT -mkdir "$TMPDIR" - - -echo travis_fold:start:docker_setup -DOCKER_HOSTNAME="$(python ${TRAVIS_BUILD_DIR}/tests/show_docker_hostname.py)" - -docker run \ - --rm \ - --detach \ - --publish 0.0.0.0:2201:22/tcp \ - --name=target \ - mitogen/${DISTRO}-test -echo travis_fold:end:docker_setup - - -echo travis_fold:start:job_setup -pip install ansible=="${ANSIBLE_VERSION}" -cd ${TRAVIS_BUILD_DIR}/tests/ansible - -chmod go= ${TRAVIS_BUILD_DIR}/tests/data/docker/mitogen__has_sudo_pubkey.key -echo '[test-targets]' > ${TMPDIR}/hosts -echo \ - target \ - ansible_host=$DOCKER_HOSTNAME \ - ansible_port=2201 \ - ansible_user=mitogen__has_sudo_nopw \ - ansible_password=has_sudo_nopw_password \ - >> ${TMPDIR}/hosts - -# Build the binaries. -make -C ${TRAVIS_BUILD_DIR}/tests/ansible - -[ ! "$(type -p sshpass)" ] && sudo apt install -y sshpass - -echo travis_fold:end:job_setup - - -echo travis_fold:start:ansible -/usr/bin/time ./run_ansible_playbook.sh \ - all.yml \ - -i "${TMPDIR}/hosts" -echo travis_fold:end:ansible diff --git a/.travis/ci_lib.py b/.travis/ci_lib.py new file mode 100644 index 00000000..eb130a14 --- /dev/null +++ b/.travis/ci_lib.py @@ -0,0 +1,102 @@ + +from __future__ import absolute_import +from __future__ import print_function + +import atexit +import os +import subprocess +import sys +import shlex +import shutil +import tempfile + +import os +os.system('curl -H Metadata-Flavor:Google http://metadata.google.internal/computeMetadata/v1/instance/machine-type') + +# +# check_output() monkeypatch cutpasted from testlib.py +# + +def subprocess__check_output(*popenargs, **kwargs): + # Missing from 2.6. + process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) + output, _ = process.communicate() + retcode = process.poll() + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise subprocess.CalledProcessError(retcode, cmd) + return output + +if not hasattr(subprocess, 'check_output'): + subprocess.check_output = subprocess__check_output + +# ----------------- + +def _argv(s, *args): + if args: + s %= args + return shlex.split(s) + + +def run(s, *args, **kwargs): + argv = _argv(s, *args) + print('Running: %s' % (argv,)) + return subprocess.check_call(argv, **kwargs) + + +def get_output(s, *args, **kwargs): + argv = _argv(s, *args) + print('Running: %s' % (argv,)) + return subprocess.check_output(argv, **kwargs) + + +def exists_in_path(progname): + return any(os.path.exists(os.path.join(dirname, progname)) + for dirname in os.environ['PATH'].split(os.pathsep)) + + +class TempDir(object): + def __init__(self): + self.path = tempfile.mkdtemp(prefix='mitogen_ci_lib') + atexit.register(self.destroy) + + def destroy(self, rmtree=shutil.rmtree): + rmtree(self.path) + + +class Fold(object): + def __init__(self, name): + self.name = name + + def __enter__(self): + print('travis_fold:start:%s' % (self.name)) + + def __exit__(self, _1, _2, _3): + print('') + print('travis_fold:end:%s' % (self.name)) + + +os.environ.setdefault('ANSIBLE_STRATEGY', + os.environ.get('STRATEGY', 'mitogen_linear')) +ANSIBLE_VERSION = os.environ.get('VER', '2.6.2') +GIT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +DISTROS = os.environ.get('DISTROS', 'debian centos6 centos7').split() +TMP = TempDir().path + +os.environ['PYTHONDONTWRITEBYTECODE'] = 'x' +os.environ['PYTHONPATH'] = '%s:%s' % ( + os.environ.get('PYTHONPATH', ''), + GIT_ROOT +) + +DOCKER_HOSTNAME = subprocess.check_output([ + sys.executable, + os.path.join(GIT_ROOT, 'tests/show_docker_hostname.py'), +]).decode().strip() + +# SSH passes these through to the container when run interactively, causing +# stdout to get messed up with libc warnings. +os.environ.pop('LANG', None) +os.environ.pop('LC_ALL', None) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index c45a8aa7..708b6c13 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -31,7 +31,7 @@ from __future__ import unicode_literals import logging import os -import shlex +import random import stat import time @@ -53,7 +53,28 @@ import ansible_mitogen.target LOG = logging.getLogger(__name__) +def optional_secret(value): + """ + Wrap `value` in :class:`mitogen.core.Secret` if it is not :data:`None`, + otherwise return :data:`None`. + """ + if value is not None: + return mitogen.core.Secret(value) + + +def parse_python_path(s): + """ + Given the string set for ansible_python_interpeter, parse it using shell + syntax and return an appropriate argument vector. + """ + if s: + return ansible.utils.shlex.shlex_split(s) + + def _connect_local(spec): + """ + Return ContextService arguments for a local connection. + """ return { 'method': 'local', 'kwargs': { @@ -62,12 +83,10 @@ def _connect_local(spec): } -def wrap_or_none(klass, value): - if value is not None: - return klass(value) - - def _connect_ssh(spec): + """ + Return ContextService arguments for an SSH connection. + """ if C.HOST_KEY_CHECKING: check_host_keys = 'enforce' else: @@ -79,10 +98,11 @@ def _connect_ssh(spec): 'check_host_keys': check_host_keys, 'hostname': spec['remote_addr'], 'username': spec['remote_user'], - 'password': wrap_or_none(mitogen.core.Secret, spec['password']), + 'password': optional_secret(spec['password']), 'port': spec['port'], 'python_path': spec['python_path'], 'identity_file': spec['private_key_file'], + 'identities_only': False, 'ssh_path': spec['ssh_executable'], 'connect_timeout': spec['ansible_ssh_timeout'], 'ssh_args': spec['ssh_args'], @@ -92,6 +112,9 @@ def _connect_ssh(spec): def _connect_docker(spec): + """ + Return ContextService arguments for a Docker connection. + """ return { 'method': 'docker', 'kwargs': { @@ -103,7 +126,25 @@ def _connect_docker(spec): } +def _connect_kubectl(spec): + """ + Return ContextService arguments for a Kubernetes connection. + """ + return { + 'method': 'kubectl', + 'kwargs': { + 'pod': spec['remote_addr'], + 'python_path': spec['python_path'], + 'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'], + 'kubectl_args': spec['extra_args'], + } + } + + def _connect_jail(spec): + """ + Return ContextService arguments for a FreeBSD jail connection. + """ return { 'method': 'jail', 'kwargs': { @@ -116,6 +157,9 @@ def _connect_jail(spec): def _connect_lxc(spec): + """ + Return ContextService arguments for an LXC Classic container connection. + """ return { 'method': 'lxc', 'kwargs': { @@ -126,11 +170,31 @@ def _connect_lxc(spec): } +def _connect_lxd(spec): + """ + Return ContextService arguments for an LXD container connection. + """ + return { + 'method': 'lxd', + 'kwargs': { + 'container': spec['remote_addr'], + 'python_path': spec['python_path'], + 'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'], + } + } + + def _connect_machinectl(spec): + """ + Return ContextService arguments for a machinectl connection. + """ return _connect_setns(dict(spec, mitogen_kind='machinectl')) def _connect_setns(spec): + """ + Return ContextService arguments for a mitogen_setns connection. + """ return { 'method': 'setns', 'kwargs': { @@ -139,6 +203,7 @@ def _connect_setns(spec): 'python_path': spec['python_path'], 'kind': spec['mitogen_kind'], 'docker_path': spec['mitogen_docker_path'], + 'kubectl_path': spec['mitogen_kubectl_path'], 'lxc_info_path': spec['mitogen_lxc_info_path'], 'machinectl_path': spec['mitogen_machinectl_path'], } @@ -146,12 +211,15 @@ def _connect_setns(spec): def _connect_su(spec): + """ + Return ContextService arguments for su as a become method. + """ return { 'method': 'su', 'enable_lru': True, 'kwargs': { 'username': spec['become_user'], - 'password': wrap_or_none(mitogen.core.Secret, spec['become_pass']), + 'password': optional_secret(spec['become_pass']), 'python_path': spec['python_path'], 'su_path': spec['become_exe'], 'connect_timeout': spec['timeout'], @@ -160,12 +228,15 @@ def _connect_su(spec): def _connect_sudo(spec): + """ + Return ContextService arguments for sudo as a become method. + """ return { 'method': 'sudo', 'enable_lru': True, 'kwargs': { 'username': spec['become_user'], - 'password': wrap_or_none(mitogen.core.Secret, spec['become_pass']), + 'password': optional_secret(spec['become_pass']), 'python_path': spec['python_path'], 'sudo_path': spec['become_exe'], 'connect_timeout': spec['timeout'], @@ -175,12 +246,15 @@ def _connect_sudo(spec): def _connect_doas(spec): + """ + Return ContextService arguments for doas as a become method. + """ return { 'method': 'doas', 'enable_lru': True, 'kwargs': { 'username': spec['become_user'], - 'password': wrap_or_none(mitogen.core.Secret, spec['become_pass']), + 'password': optional_secret(spec['become_pass']), 'python_path': spec['python_path'], 'doas_path': spec['become_exe'], 'connect_timeout': spec['timeout'], @@ -189,12 +263,14 @@ def _connect_doas(spec): def _connect_mitogen_su(spec): - # su as a first-class proxied connection, not a become method. + """ + Return ContextService arguments for su as a first class connection. + """ return { 'method': 'su', 'kwargs': { 'username': spec['remote_user'], - 'password': wrap_or_none(mitogen.core.Secret, spec['password']), + 'password': optional_secret(spec['password']), 'python_path': spec['python_path'], 'su_path': spec['become_exe'], 'connect_timeout': spec['timeout'], @@ -203,12 +279,14 @@ def _connect_mitogen_su(spec): def _connect_mitogen_sudo(spec): - # sudo as a first-class proxied connection, not a become method. + """ + Return ContextService arguments for sudo as a first class connection. + """ return { 'method': 'sudo', 'kwargs': { 'username': spec['remote_user'], - 'password': wrap_or_none(mitogen.core.Secret, spec['password']), + 'password': optional_secret(spec['password']), 'python_path': spec['python_path'], 'sudo_path': spec['become_exe'], 'connect_timeout': spec['timeout'], @@ -218,12 +296,14 @@ def _connect_mitogen_sudo(spec): def _connect_mitogen_doas(spec): - # doas as a first-class proxied connection, not a become method. + """ + Return ContextService arguments for doas as a first class connection. + """ return { 'method': 'doas', 'kwargs': { 'username': spec['remote_user'], - 'password': wrap_or_none(mitogen.core.Secret, spec['password']), + 'password': optional_secret(spec['password']), 'python_path': spec['python_path'], 'doas_path': spec['become_exe'], 'connect_timeout': spec['timeout'], @@ -231,12 +311,16 @@ def _connect_mitogen_doas(spec): } +#: Mapping of connection method names to functions invoked as `func(spec)` +#: generating ContextService keyword arguments matching a connection +#: specification. CONNECTION_METHOD = { 'docker': _connect_docker, + 'kubectl': _connect_kubectl, 'jail': _connect_jail, 'local': _connect_local, 'lxc': _connect_lxc, - 'lxd': _connect_lxc, + 'lxd': _connect_lxd, 'machinectl': _connect_machinectl, 'setns': _connect_setns, 'ssh': _connect_ssh, @@ -249,17 +333,6 @@ CONNECTION_METHOD = { } -def parse_python_path(s): - """ - Given the string set for ansible_python_interpeter, parse it using shell - syntax and return an appropriate argument vector. - """ - if not s: - return None - - return ansible.utils.shlex.shlex_split(s) - - def config_from_play_context(transport, inventory_name, connection): """ Return a dict representing all important connection configuration, allowing @@ -277,11 +350,16 @@ def config_from_play_context(transport, inventory_name, connection): 'become_pass': connection._play_context.become_pass, 'password': connection._play_context.password, 'port': connection._play_context.port, - 'python_path': parse_python_path(connection.python_path), + 'python_path': parse_python_path( + connection.get_task_var('ansible_python_interpreter', + default='/usr/bin/python') + ), 'private_key_file': connection._play_context.private_key_file, 'ssh_executable': connection._play_context.ssh_executable, 'timeout': connection._play_context.timeout, - 'ansible_ssh_timeout': connection.ansible_ssh_timeout, + 'ansible_ssh_timeout': + connection.get_task_var('ansible_ssh_timeout', + default=C.DEFAULT_TIMEOUT), 'ssh_args': [ mitogen.core.to_text(term) for s in ( @@ -300,12 +378,22 @@ def config_from_play_context(transport, inventory_name, connection): ) for term in ansible.utils.shlex.shlex_split(s or '') ], - 'mitogen_via': connection.mitogen_via, - 'mitogen_kind': connection.mitogen_kind, - 'mitogen_docker_path': connection.mitogen_docker_path, - 'mitogen_lxc_info_path': connection.mitogen_lxc_info_path, - 'mitogen_machinectl_path': connection.mitogen_machinectl_path, - 'mitogen_ssh_debug_level': connection.mitogen_ssh_debug_level, + 'mitogen_via': + connection.get_task_var('mitogen_via'), + 'mitogen_kind': + connection.get_task_var('mitogen_kind'), + 'mitogen_docker_path': + connection.get_task_var('mitogen_docker_path'), + 'mitogen_kubectl_path': + connection.get_task_var('mitogen_kubectl_path'), + 'mitogen_lxc_info_path': + connection.get_task_var('mitogen_lxc_info_path'), + 'mitogen_machinectl_path': + connection.get_task_var('mitogen_machinectl_path'), + 'mitogen_ssh_debug_level': + connection.get_task_var('mitogen_ssh_debug_level'), + 'extra_args': + connection.get_extra_args(), } @@ -318,7 +406,7 @@ def config_from_hostvars(transport, inventory_name, connection, config = config_from_play_context(transport, inventory_name, connection) hostvars = dict(hostvars) return dict(config, **{ - 'remote_addr': hostvars.get('ansible_hostname', inventory_name), + 'remote_addr': hostvars.get('ansible_host', inventory_name), 'become': bool(become_user), 'become_user': become_user, 'become_pass': None, @@ -332,11 +420,40 @@ def config_from_hostvars(transport, inventory_name, connection, 'mitogen_via': hostvars.get('mitogen_via'), 'mitogen_kind': hostvars.get('mitogen_kind'), 'mitogen_docker_path': hostvars.get('mitogen_docker_path'), + 'mitogen_kubectl_path': hostvars.get('mitogen_kubectl_path'), 'mitogen_lxc_info_path': hostvars.get('mitogen_lxc_info_path'), 'mitogen_machinectl_path': hostvars.get('mitogen_machinctl_path'), }) +class CallChain(mitogen.parent.CallChain): + call_aborted_msg = ( + 'Mitogen was disconnected from the remote environment while a call ' + 'was in-progress. If you feel this is in error, please file a bug. ' + 'Original error was: %s' + ) + + def _rethrow(self, recv): + try: + return recv.get().unpickle() + except mitogen.core.ChannelError as e: + raise ansible.errors.AnsibleConnectionFailure( + self.call_aborted_msg % (e,) + ) + + def call(self, func, *args, **kwargs): + """ + Like :meth:`mitogen.parent.CallChain.call`, but log timings. + """ + t0 = time.time() + try: + recv = self.call_async(func, *args, **kwargs) + return self._rethrow(recv) + finally: + LOG.debug('Call took %d ms: %r', 1000 * (time.time() - t0), + mitogen.parent.CallSpec(func, args, kwargs)) + + class Connection(ansible.plugins.connection.ConnectionBase): #: mitogen.master.Broker for this worker. broker = None @@ -352,52 +469,50 @@ class Connection(ansible.plugins.connection.ConnectionBase): #: reached via become. context = None - #: mitogen.parent.Context for the login account on the target. This is - #: always the login account, even when become=True. + #: Context for the login account on the target. This is always the login + #: account, even when become=True. login_context = None - #: mitogen.parent.Context connected to the fork parent process in the - #: target user account. - fork_context = None - #: Only sudo, su, and doas are supported for now. become_methods = ['sudo', 'su', 'doas'] - #: Set to 'ansible_python_interpreter' by on_action_run(). - python_path = None - - #: Set to 'ansible_ssh_timeout' by on_action_run(). - ansible_ssh_timeout = None - - #: Set to 'mitogen_via' by on_action_run(). - mitogen_via = None - - #: Set to 'mitogen_kind' by on_action_run(). - mitogen_kind = None - - #: Set to 'mitogen_docker_path' by on_action_run(). - mitogen_docker_path = None - - #: Set to 'mitogen_lxc_info_path' by on_action_run(). - mitogen_lxc_info_path = None - - #: Set to 'mitogen_lxc_info_path' by on_action_run(). - mitogen_machinectl_path = None - - #: Set to 'mitogen_ssh_debug_level' by on_action_run(). - mitogen_ssh_debug_level = None - - #: Set to 'inventory_hostname' by on_action_run(). + #: Dict containing init_child() return value as recorded at startup by + #: ContextService. Contains: + #: + #: fork_context: Context connected to the fork parent : process in the + #: target account. + #: home_dir: Target context's home directory. + #: good_temp_dir: A writeable directory where new temporary directories + #: can be created. + init_child_result = None + + #: A :class:`mitogen.parent.CallChain` for calls made to the target + #: account, to ensure subsequent calls fail with the original exception if + #: pipelined directory creation or file transfer fails. + chain = None + + # + # Note: any of the attributes below may be :data:`None` if the connection + # plugin was constructed directly by a non-cooperative action, such as in + # the case of the synchronize module. + # + + #: Set to the host name as it appears in inventory by on_action_run(). inventory_hostname = None + #: Set to task_vars by on_action_run(). + _task_vars = None + #: Set to 'hostvars' by on_action_run() host_vars = None - #: Set to '_loader.get_basedir()' by on_action_run(). - loader_basedir = None + #: Set by on_action_run() + delegate_to_hostname = None - #: Set after connection to the target context's home directory. - home_dir = None + #: Set to '_loader.get_basedir()' by on_action_run(). Used by mitogen_local + #: to change the working directory to that of the current playbook, + #: matching vanilla Ansible behaviour. + loader_basedir = None def __init__(self, play_context, new_stdin, **kwargs): assert ansible_mitogen.process.MuxProcess.unix_listener_path, ( @@ -415,43 +530,53 @@ class Connection(ansible.plugins.connection.ConnectionBase): # https://github.com/dw/mitogen/issues/140 self.close() - def on_action_run(self, task_vars, loader_basedir): + def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir): """ Invoked by ActionModuleMixin to indicate a new task is about to start executing. We use the opportunity to grab relevant bits from the task-specific data. + + :param dict task_vars: + Task variable dictionary. + :param str delegate_to_hostname: + :data:`None`, or the template-expanded inventory hostname this task + is being delegated to. A similar variable exists on PlayContext + when ``delegate_to:`` is active, however it is unexpanded. + :param str loader_basedir: + Loader base directory; see :attr:`loader_basedir`. """ - self.ansible_ssh_timeout = task_vars.get('ansible_ssh_timeout', - C.DEFAULT_TIMEOUT) - self.python_path = task_vars.get('ansible_python_interpreter', - '/usr/bin/python') - self.mitogen_via = task_vars.get('mitogen_via') - self.mitogen_kind = task_vars.get('mitogen_kind') - self.mitogen_docker_path = task_vars.get('mitogen_docker_path') - self.mitogen_lxc_info_path = task_vars.get('mitogen_lxc_info_path') - self.mitogen_machinectl_path = task_vars.get('mitogen_machinectl_path') - self.mitogen_ssh_debug_level = task_vars.get('mitogen_ssh_debug_level') self.inventory_hostname = task_vars['inventory_hostname'] + self._task_vars = task_vars self.host_vars = task_vars['hostvars'] + self.delegate_to_hostname = delegate_to_hostname self.loader_basedir = loader_basedir self.close(new_task=True) + def get_task_var(self, key, default=None): + if self._task_vars and key in self._task_vars: + return self._task_vars[key] + return default + @property def homedir(self): self._connect() - return self.home_dir + return self.init_child_result['home_dir'] @property def connected(self): return self.context is not None def _config_from_via(self, via_spec): + """ + Produce a dict connection specifiction given a string `via_spec`, of + the form `[become_user@]inventory_hostname`. + """ become_user, _, inventory_name = via_spec.rpartition('@') via_vars = self.host_vars[inventory_name] if isinstance(via_vars, jinja2.runtime.Undefined): raise ansible.errors.AnsibleConnectionFailure( self.unknown_via_msg % ( - self.mitogen_via, + via_spec, inventory_name, ) ) @@ -492,20 +617,11 @@ class Connection(ansible.plugins.connection.ConnectionBase): return stack, seen_names - def _connect(self): + def _connect_broker(self): """ - Establish a connection to the master process's UNIX listener socket, - constructing a mitogen.master.Router to communicate with the master, - and a mitogen.parent.Context to represent it. - - Depending on the original transport we should emulate, trigger one of - the _connect_*() service calls defined above to cause the master - process to establish the real connection on our behalf, or return a - reference to the existing one. + Establish a reference to the Broker, Router and parent context used for + connections. """ - if self.connected: - return - if not self.broker: self.broker = mitogen.master.Broker() self.router, self.parent = mitogen.unix.connect( @@ -513,14 +629,47 @@ class Connection(ansible.plugins.connection.ConnectionBase): broker=self.broker, ) - stack, _ = self._stack_from_config( - config_from_play_context( - transport=self.transport, - inventory_name=self.inventory_hostname, - connection=self - ) + def _config_from_direct_connection(self): + """ + """ + return config_from_play_context( + transport=self.transport, + inventory_name=self.inventory_hostname, + connection=self + ) + + def _config_from_delegate_to(self): + return config_from_hostvars( + transport=self._play_context.connection, + inventory_name=self.delegate_to_hostname, + connection=self, + hostvars=self.host_vars[self.delegate_to_hostname], + become_user=(self._play_context.become_user + if self._play_context.become + else None), ) + def _build_stack(self): + """ + Construct a list of dictionaries representing the connection + configuration between the controller and the target. This is + additionally used by the integration tests "mitogen_get_stack" action + to fetch the would-be connection configuration. + """ + if self.delegate_to_hostname is not None: + target_config = self._config_from_delegate_to() + else: + target_config = self._config_from_direct_connection() + + stack, _ = self._stack_from_config(target_config) + return stack + + def _connect_stack(self, stack): + """ + Pass `stack` to ContextService, requesting a copy of the context object + representing the target. If no connection exists yet, ContextService + will establish it before returning it or throwing an error. + """ dct = self.parent.call_service( service_name='ansible_mitogen.services.ContextService', method_name='get', @@ -533,13 +682,50 @@ class Connection(ansible.plugins.connection.ConnectionBase): raise ansible.errors.AnsibleConnectionFailure(dct['msg']) self.context = dct['context'] + self.chain = CallChain(self.context, pipelined=True) if self._play_context.become: self.login_context = dct['via'] else: self.login_context = self.context - self.fork_context = dct['init_child_result']['fork_context'] - self.home_dir = dct['init_child_result']['home_dir'] + self.init_child_result = dct['init_child_result'] + + def get_good_temp_dir(self): + self._connect() + return self.init_child_result['good_temp_dir'] + + def _generate_tmp_path(self): + return os.path.join( + self.get_good_temp_dir(), + 'ansible_mitogen_action_%016x' % ( + random.getrandbits(8*8), + ) + ) + + def _make_tmp_path(self): + assert getattr(self._shell, 'tmpdir', None) is None + self._shell.tmpdir = self._generate_tmp_path() + LOG.debug('Temporary directory: %r', self._shell.tmpdir) + self.get_chain().call_no_reply(os.mkdir, self._shell.tmpdir) + return self._shell.tmpdir + + def _connect(self): + """ + Establish a connection to the master process's UNIX listener socket, + constructing a mitogen.master.Router to communicate with the master, + and a mitogen.parent.Context to represent it. + + Depending on the original transport we should emulate, trigger one of + the _connect_*() service calls defined above to cause the master + process to establish the real connection on our behalf, or return a + reference to the existing one. + """ + if self.connected: + return + + self._connect_broker() + stack = self._build_stack() + self._connect_stack(stack) def close(self, new_task=False): """ @@ -547,7 +733,16 @@ class Connection(ansible.plugins.connection.ConnectionBase): gracefully shut down, and wait for shutdown to complete. Safe to call multiple times. """ + if getattr(self._shell, 'tmpdir', None) is not None: + # Avoid CallChain to ensure exception is logged on failure. + self.context.call_no_reply( + ansible_mitogen.target.prune_tree, + self._shell.tmpdir, + ) + self._shell.tmpdir = None + if self.context: + self.chain.reset() self.parent.call_service( service_name='ansible_mitogen.services.ContextService', method_name='put', @@ -555,46 +750,33 @@ class Connection(ansible.plugins.connection.ConnectionBase): ) self.context = None - self.fork_context = None self.login_context = None + self.init_child_result = None + self.chain = None if self.broker and not new_task: self.broker.shutdown() self.broker.join() self.broker = None self.router = None - def call_async(self, func, *args, **kwargs): + def get_chain(self, use_login=False, use_fork=False): """ - Start a function call to the target. - - :param bool use_login_context: - If present and :data:`True`, send the call to the login account - context rather than the optional become user context. - :returns: - mitogen.core.Receiver that receives the function call result. + Return the :class:`mitogen.parent.CallChain` to use for executing + function calls. + + :param bool use_login: + If :data:`True`, always return the chain for the login account + rather than any active become user. + :param bool use_fork: + If :data:`True`, return the chain for the fork parent. + :returns mitogen.parent.CallChain: """ self._connect() - if kwargs.pop('use_login_context', None): - call_context = self.login_context - else: - call_context = self.context - return call_context.call_async(func, *args, **kwargs) - - def call(self, func, *args, **kwargs): - """ - Start and wait for completion of a function call in the target. - - :raises mitogen.core.CallError: - The function call failed. - :returns: - Function return value. - """ - t0 = time.time() - try: - return self.call_async(func, *args, **kwargs).get().unpickle() - finally: - LOG.debug('Call took %d ms: %r', 1000 * (time.time() - t0), - mitogen.parent.CallSpec(func, args, kwargs)) + if use_login: + return self.login_context.default_call_chain + if use_fork: + return self.init_child_result['fork_context'].default_call_chain + return self.chain def create_fork_child(self): """ @@ -605,7 +787,17 @@ class Connection(ansible.plugins.connection.ConnectionBase): :returns: mitogen.core.Context of the new child. """ - return self.call(ansible_mitogen.target.create_fork_child) + return self.get_chain(use_fork=True).call( + ansible_mitogen.target.create_fork_child + ) + + def get_extra_args(self): + """ + Overridden by connections/mitogen_kubectl.py to a list of additional + arguments for the command. + """ + # TODO: maybe use this for SSH too. + return [] def get_default_cwd(self): """ @@ -634,7 +826,7 @@ class Connection(ansible.plugins.connection.ConnectionBase): (return code, stdout bytes, stderr bytes) """ emulate_tty = (not in_data and sudoable) - rc, stdout, stderr = self.call( + rc, stdout, stderr = self.get_chain().call( ansible_mitogen.target.exec_command, cmd=mitogen.utils.cast(cmd), in_data=mitogen.utils.cast(in_data), @@ -658,25 +850,37 @@ class Connection(ansible.plugins.connection.ConnectionBase): :param str out_path: Local filesystem path to write. """ - output = self.call(ansible_mitogen.target.read_path, - mitogen.utils.cast(in_path)) + output = self.get_chain().call( + ansible_mitogen.target.read_path, + mitogen.utils.cast(in_path), + ) ansible_mitogen.target.write_path(out_path, output) def put_data(self, out_path, data, mode=None, utimes=None): """ - Implement put_file() by caling the corresponding - ansible_mitogen.target function in the target. + Implement put_file() by caling the corresponding ansible_mitogen.target + function in the target, transferring small files inline. This is + pipelined and will return immediately; failed transfers are reported as + exceptions in subsequent functon calls. :param str out_path: Remote filesystem path to write. :param byte data: File contents to put. """ - self.call(ansible_mitogen.target.write_path, - mitogen.utils.cast(out_path), - mitogen.core.Blob(data), - mode=mode, - utimes=utimes) + self.get_chain().call_no_reply( + ansible_mitogen.target.write_path, + mitogen.utils.cast(out_path), + mitogen.core.Blob(data), + mode=mode, + utimes=utimes, + ) + + #: Maximum size of a small file before switching to streaming + #: transfer. This should really be the same as + #: mitogen.services.FileService.IO_SIZE, however the message format has + #: slightly more overhead, so just randomly subtract 4KiB. + SMALL_FILE_LIMIT = mitogen.core.CHUNK_SIZE - 4096 def put_file(self, in_path, out_path): """ @@ -695,14 +899,14 @@ class Connection(ansible.plugins.connection.ConnectionBase): # If the file is sufficiently small, just ship it in the argument list # rather than introducing an extra RTT for the child to request it from # FileService. - if st.st_size <= 32768: + if st.st_size <= self.SMALL_FILE_LIMIT: fp = open(in_path, 'rb') try: - s = fp.read(32769) + s = fp.read(self.SMALL_FILE_LIMIT + 1) finally: fp.close() - # Ensure file was not growing during call. + # Ensure did not grow during read. if len(s) == st.st_size: return self.put_data(out_path, s, mode=st.st_mode, utimes=(st.st_atime, st.st_mtime)) @@ -712,7 +916,12 @@ class Connection(ansible.plugins.connection.ConnectionBase): method_name='register', path=mitogen.utils.cast(in_path) ) - self.call( + + # For now this must remain synchronous, as the action plug-in may have + # passed us a temporary file to transfer. A future FileService could + # maintain an LRU list of open file descriptors to keep the temporary + # file alive, but that requires more work. + self.get_chain().call( ansible_mitogen.target.transfer_file, context=self.parent, in_path=in_path, diff --git a/ansible_mitogen/loaders.py b/ansible_mitogen/loaders.py index 441e8113..08c59278 100644 --- a/ansible_mitogen/loaders.py +++ b/ansible_mitogen/loaders.py @@ -37,10 +37,12 @@ try: from ansible.plugins.loader import connection_loader from ansible.plugins.loader import module_loader from ansible.plugins.loader import module_utils_loader + from ansible.plugins.loader import shell_loader from ansible.plugins.loader import strategy_loader except ImportError: # Ansible <2.4 from ansible.plugins import action_loader from ansible.plugins import connection_loader from ansible.plugins import module_loader from ansible.plugins import module_utils_loader + from ansible.plugins import shell_loader from ansible.plugins import strategy_loader diff --git a/ansible_mitogen/mixins.py b/ansible_mitogen/mixins.py index 2a9fdac8..4c06063b 100644 --- a/ansible_mitogen/mixins.py +++ b/ansible_mitogen/mixins.py @@ -110,19 +110,11 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): """ self._connection.on_action_run( task_vars=task_vars, + delegate_to_hostname=self._task.delegate_to, loader_basedir=self._loader.get_basedir(), ) return super(ActionModuleMixin, self).run(tmp, task_vars) - def call(self, func, *args, **kwargs): - """ - Arrange for a Python function to be called in the target context, which - should be some function from the standard library or - ansible_mitogen.target module. This junction point exists mainly as a - nice place to insert print statements during debugging. - """ - return self._connection.call(func, *args, **kwargs) - COMMAND_RESULT = { 'rc': 0, 'stdout': '', @@ -163,7 +155,10 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): target user account. """ LOG.debug('_remote_file_exists(%r)', path) - return self.call(os.path.exists, mitogen.utils.cast(path)) + return self._connection.get_chain().call( + os.path.exists, + mitogen.utils.cast(path) + ) def _configure_module(self, module_name, module_args, task_vars=None): """ @@ -179,48 +174,26 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): """ assert False, "_is_pipelining_enabled() should never be called." - def _get_remote_tmp(self): - """ - Mitogen-only: return the 'remote_tmp' setting. - """ - try: - s = self._connection._shell.get_option('remote_tmp') - except AttributeError: - s = ansible.constants.DEFAULT_REMOTE_TMP # <=2.4.x - - return self._remote_expand_user(s, sudoable=False) - def _make_tmp_path(self, remote_user=None): """ - Replace the base implementation's use of shell to implement mkdtemp() - with an actual call to mkdtemp(). Like vanilla, the directory is always - created in the login account context. + Return the directory created by the Connection instance during + connection. """ LOG.debug('_make_tmp_path(remote_user=%r)', remote_user) - - # _make_tmp_path() is basically a global stashed away as Shell.tmpdir. - # The copy action plugin violates layering and grabs this attribute - # directly. - self._connection._shell.tmpdir = self._connection.call( - ansible_mitogen.target.make_temp_directory, - base_dir=self._get_remote_tmp(), - use_login_context=True, - ) - LOG.debug('Temporary directory: %r', self._connection._shell.tmpdir) - self._cleanup_remote_tmp = True - return self._connection._shell.tmpdir + return self._connection._make_tmp_path() def _remove_tmp_path(self, tmp_path): """ - Replace the base implementation's invocation of rm -rf with a call to - shutil.rmtree(). + Stub out the base implementation's invocation of rm -rf, replacing it + with nothing, as the persistent interpreter automatically cleans up + after itself without introducing roundtrips. """ + # The actual removal is pipelined by Connection.close(). LOG.debug('_remove_tmp_path(%r)', tmp_path) - if tmp_path is None: - tmp_path = self._connection._shell.tmpdir - if self._should_remove_tmp_path(tmp_path): - self.call(shutil.rmtree, tmp_path) - self._connection._shell.tmpdir = None + # Upstream _remove_tmp_path resets shell.tmpdir here, however + # connection.py uses that as the sole location of the temporary + # directory, if one exists. + # self._connection._shell.tmpdir = None def _transfer_data(self, remote_path, data): """ @@ -237,6 +210,11 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): self._connection.put_data(remote_path, data) return remote_path + #: Actions listed here cause :func:`_fixup_perms2` to avoid a needless + #: roundtrip, as they modify file modes separately afterwards. This is due + #: to the method prototype having a default of `execute=True`. + FIXUP_PERMS_RED_HERRING = set(['copy']) + def _fixup_perms2(self, remote_paths, remote_user=None, execute=True): """ Mitogen always executes ActionBase helper methods in the context of the @@ -245,7 +223,7 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): """ LOG.debug('_fixup_perms2(%r, remote_user=%r, execute=%r)', remote_paths, remote_user, execute) - if execute: + if execute and self._load_name not in self.FIXUP_PERMS_RED_HERRING: return self._remote_chmod(remote_paths, mode='u+x') return self.COMMAND_RESULT.copy() @@ -257,7 +235,7 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): LOG.debug('_remote_chmod(%r, mode=%r, sudoable=%r)', paths, mode, sudoable) return self.fake_shell(lambda: mitogen.select.Select.all( - self._connection.call_async( + self._connection.get_chain().call_async( ansible_mitogen.target.set_file_mode, path, mode ) for path in paths @@ -270,9 +248,9 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): """ LOG.debug('_remote_chown(%r, user=%r, sudoable=%r)', paths, user, sudoable) - ent = self.call(pwd.getpwnam, user) + ent = self._connection.get_chain().call(pwd.getpwnam, user) return self.fake_shell(lambda: mitogen.select.Select.all( - self._connection.call_async( + self._connection.get_chain().call_async( os.chown, path, ent.pw_uid, ent.pw_gid ) for path in paths @@ -300,8 +278,10 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): # ~/.ansible -> /home/dmw/.ansible return os.path.join(self._connection.homedir, path[2:]) # ~root/.ansible -> /root/.ansible - return self.call(os.path.expanduser, mitogen.utils.cast(path), - use_login_context=not sudoable) + return self._connection.get_chain(use_login=(not sudoable)).call( + os.path.expanduser, + mitogen.utils.cast(path), + ) def get_task_timeout_secs(self): """ @@ -312,6 +292,25 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): except AttributeError: return getattr(self._task, 'async') + def _temp_file_gibberish(self, module_args, wrap_async): + # Ansible>2.5 module_utils reuses the action's temporary directory if + # one exists. Older versions error if this key is present. + if ansible.__version__ > '2.5': + if wrap_async: + # Sharing is not possible with async tasks, as in that case, + # the directory must outlive the action plug-in. + module_args['_ansible_tmpdir'] = None + else: + module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir + + # If _ansible_tmpdir is unset, Ansible>2.6 module_utils will use + # _ansible_remote_tmp as the location to create the module's temporary + # directory. Older versions error if this key is present. + if ansible.__version__ > '2.6': + module_args['_ansible_remote_tmp'] = ( + self._connection.get_good_temp_dir() + ) + def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True, wrap_async=False): @@ -330,6 +329,7 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): self._update_module_args(module_name, module_args, task_vars) env = {} self._compute_environment_string(env) + self._temp_file_gibberish(module_args, wrap_async) self._connection._connect() return ansible_mitogen.planner.invoke( diff --git a/ansible_mitogen/planner.py b/ansible_mitogen/planner.py index c297ad8f..caf40af3 100644 --- a/ansible_mitogen/planner.py +++ b/ansible_mitogen/planner.py @@ -55,6 +55,7 @@ import ansible_mitogen.target LOG = logging.getLogger(__name__) NO_METHOD_MSG = 'Mitogen: no invocation method found for: ' NO_INTERPRETER_MSG = 'module (%s) is missing interpreter line' +NO_MODULE_MSG = 'The module %s was not found in configured module paths.' class Invocation(object): @@ -148,6 +149,8 @@ class Planner(object): """ new = dict((mitogen.core.UnicodeType(k), kwargs[k]) for k in kwargs) + new.setdefault('good_temp_dir', + self._inv.connection.get_good_temp_dir()) new.setdefault('cwd', self._inv.connection.get_default_cwd()) new.setdefault('extra_env', self._inv.connection.get_default_env()) new.setdefault('emulate_tty', True) @@ -393,6 +396,9 @@ _planners = [ def get_module_data(name): path = ansible_mitogen.loaders.module_loader.find_plugin(name, '') + if path is None: + raise ansible.errors.AnsibleError(NO_MODULE_MSG % (name,)) + with open(path, 'rb') as fp: source = fp.read() return mitogen.core.to_text(path), source @@ -474,7 +480,7 @@ def invoke(invocation): response = _invoke_forked_task(invocation, planner) else: _propagate_deps(invocation, planner, invocation.connection.context) - response = invocation.connection.call( + response = invocation.connection.get_chain().call( ansible_mitogen.target.run_module, kwargs=planner.get_kwargs(), ) diff --git a/ansible_mitogen/plugins/connection/mitogen_doas.py b/ansible_mitogen/plugins/connection/mitogen_doas.py index 7d60b482..873b0d9d 100644 --- a/ansible_mitogen/plugins/connection/mitogen_doas.py +++ b/ansible_mitogen/plugins/connection/mitogen_doas.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_docker.py b/ansible_mitogen/plugins/connection/mitogen_docker.py index a98273e0..8af42711 100644 --- a/ansible_mitogen/plugins/connection/mitogen_docker.py +++ b/ansible_mitogen/plugins/connection/mitogen_docker.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_jail.py b/ansible_mitogen/plugins/connection/mitogen_jail.py index 1c57bb38..fb7bce54 100644 --- a/ansible_mitogen/plugins/connection/mitogen_jail.py +++ b/ansible_mitogen/plugins/connection/mitogen_jail.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_kubectl.py b/ansible_mitogen/plugins/connection/mitogen_kubectl.py new file mode 100644 index 00000000..5ffe3f7b --- /dev/null +++ b/ansible_mitogen/plugins/connection/mitogen_kubectl.py @@ -0,0 +1,56 @@ +# coding: utf-8 +# Copyright 2018, Yannig Perré +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import +import os.path +import sys + +import ansible.plugins.connection.kubectl +from ansible.module_utils.six import iteritems + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'kubectl' + + def get_extra_args(self): + parameters = [] + for key, option in iteritems(ansible.plugins.connection.kubectl.CONNECTION_OPTIONS): + if self.get_task_var('ansible_' + key) is not None: + parameters += [ option, self.get_task_var('ansible_' + key) ] + + return parameters diff --git a/ansible_mitogen/plugins/connection/mitogen_local.py b/ansible_mitogen/plugins/connection/mitogen_local.py index 35504d4d..fcd9c030 100644 --- a/ansible_mitogen/plugins/connection/mitogen_local.py +++ b/ansible_mitogen/plugins/connection/mitogen_local.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_lxc.py b/ansible_mitogen/plugins/connection/mitogen_lxc.py index 2195aa3c..ce394102 100644 --- a/ansible_mitogen/plugins/connection/mitogen_lxc.py +++ b/ansible_mitogen/plugins/connection/mitogen_lxc.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_lxd.py b/ansible_mitogen/plugins/connection/mitogen_lxd.py index 5d1391b9..77efe6c1 100644 --- a/ansible_mitogen/plugins/connection/mitogen_lxd.py +++ b/ansible_mitogen/plugins/connection/mitogen_lxd.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_machinectl.py b/ansible_mitogen/plugins/connection/mitogen_machinectl.py index e71496a3..9b332a3f 100644 --- a/ansible_mitogen/plugins/connection/mitogen_machinectl.py +++ b/ansible_mitogen/plugins/connection/mitogen_machinectl.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_setns.py b/ansible_mitogen/plugins/connection/mitogen_setns.py index 5f131655..23f62135 100644 --- a/ansible_mitogen/plugins/connection/mitogen_setns.py +++ b/ansible_mitogen/plugins/connection/mitogen_setns.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_ssh.py b/ansible_mitogen/plugins/connection/mitogen_ssh.py index c0c577c3..dbaba407 100644 --- a/ansible_mitogen/plugins/connection/mitogen_ssh.py +++ b/ansible_mitogen/plugins/connection/mitogen_ssh.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys @@ -41,6 +42,8 @@ DOCUMENTATION = """ options: """ +import ansible.plugins.connection.ssh + try: import ansible_mitogen.connection except ImportError: @@ -53,3 +56,10 @@ import ansible_mitogen.connection class Connection(ansible_mitogen.connection.Connection): transport = 'ssh' + vanilla_class = ansible.plugins.connection.ssh.Connection + + @staticmethod + def _create_control_path(*args, **kwargs): + """Forward _create_control_path() to the implementation in ssh.py.""" + # https://github.com/dw/mitogen/issues/342 + return Connection.vanilla_class._create_control_path(*args, **kwargs) diff --git a/ansible_mitogen/plugins/connection/mitogen_su.py b/ansible_mitogen/plugins/connection/mitogen_su.py index fd09d0f0..104a7190 100644 --- a/ansible_mitogen/plugins/connection/mitogen_su.py +++ b/ansible_mitogen/plugins/connection/mitogen_su.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/connection/mitogen_sudo.py b/ansible_mitogen/plugins/connection/mitogen_sudo.py index a6cb8bd2..367dd61b 100644 --- a/ansible_mitogen/plugins/connection/mitogen_sudo.py +++ b/ansible_mitogen/plugins/connection/mitogen_sudo.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys diff --git a/ansible_mitogen/plugins/strategy/mitogen.py b/ansible_mitogen/plugins/strategy/mitogen.py index 3ef522b4..f8608745 100644 --- a/ansible_mitogen/plugins/strategy/mitogen.py +++ b/ansible_mitogen/plugins/strategy/mitogen.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys @@ -44,12 +45,12 @@ import sys # debuggers and isinstance() work predictably. # -try: - import ansible_mitogen -except ImportError: - base_dir = os.path.dirname(__file__) - sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) - del base_dir +BASE_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../../..') +) + +if BASE_DIR not in sys.path: + sys.path.insert(0, BASE_DIR) import ansible_mitogen.strategy import ansible.plugins.strategy.linear diff --git a/ansible_mitogen/plugins/strategy/mitogen_free.py b/ansible_mitogen/plugins/strategy/mitogen_free.py index 34f959ca..d3b1cdc6 100644 --- a/ansible_mitogen/plugins/strategy/mitogen_free.py +++ b/ansible_mitogen/plugins/strategy/mitogen_free.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys @@ -44,12 +45,12 @@ import sys # debuggers and isinstance() work predictably. # -try: - import ansible_mitogen -except ImportError: - base_dir = os.path.dirname(__file__) - sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) - del base_dir +BASE_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../../..') +) + +if BASE_DIR not in sys.path: + sys.path.insert(0, BASE_DIR) import ansible_mitogen.loaders import ansible_mitogen.strategy diff --git a/ansible_mitogen/plugins/strategy/mitogen_linear.py b/ansible_mitogen/plugins/strategy/mitogen_linear.py index a5ea2a3d..51b03096 100644 --- a/ansible_mitogen/plugins/strategy/mitogen_linear.py +++ b/ansible_mitogen/plugins/strategy/mitogen_linear.py @@ -26,6 +26,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. +from __future__ import absolute_import import os.path import sys @@ -44,12 +45,12 @@ import sys # debuggers and isinstance() work predictably. # -try: - import ansible_mitogen -except ImportError: - base_dir = os.path.dirname(__file__) - sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) - del base_dir +BASE_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../../..') +) + +if BASE_DIR not in sys.path: + sys.path.insert(0, BASE_DIR) import ansible_mitogen.loaders import ansible_mitogen.strategy diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index f19079ee..6e18a863 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -27,12 +27,19 @@ # POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import +import atexit import errno import logging import os import signal import socket import sys +import time + +try: + import faulthandler +except ImportError: + faulthandler = None import mitogen import mitogen.core @@ -43,6 +50,7 @@ import mitogen.service import mitogen.unix import mitogen.utils +import ansible.constants as C import ansible_mitogen.logging import ansible_mitogen.services @@ -52,6 +60,54 @@ from mitogen.core import b LOG = logging.getLogger(__name__) +def clean_shutdown(sock): + """ + Shut the write end of `sock`, causing `recv` in the worker process to wake + up with a 0-byte read and initiate mux process exit, then wait for a 0-byte + read from the read end, which will occur after the the child closes the + descriptor on exit. + + This is done using :mod:`atexit` since Ansible lacks any more sensible hook + to run code during exit, and unless some synchronization exists with + MuxProcess, debug logs may appear on the user's terminal *after* the prompt + has been printed. + """ + sock.shutdown(socket.SHUT_WR) + sock.recv(1) + + +def getenv_int(key, default=0): + """ + Get an integer-valued environment variable `key`, if it exists and parses + as an integer, otherwise return `default`. + """ + try: + return int(os.environ.get(key, str(default))) + except ValueError: + return default + + +def setup_gil(): + """ + Set extremely long GIL release interval to let threads naturally progress + through CPU-heavy sequences without forcing the wake of another thread that + may contend trying to run the same CPU-heavy code. For the new-style work, + this drops runtime ~33% and involuntary context switches by >80%, + essentially making threads cooperatively scheduled. + """ + try: + # Python 2. + sys.setcheckinterval(100000) + except AttributeError: + pass + + try: + # Python 3. + sys.setswitchinterval(10) + except AttributeError: + pass + + class MuxProcess(object): """ Implement a subprocess forked from the Ansible top-level, as a safe place @@ -109,11 +165,19 @@ class MuxProcess(object): if cls.worker_sock is not None: return + if faulthandler is not None: + faulthandler.enable() + + setup_gil() cls.unix_listener_path = mitogen.unix.make_socket_path() cls.worker_sock, cls.child_sock = socket.socketpair() + atexit.register(lambda: clean_shutdown(cls.worker_sock)) mitogen.core.set_cloexec(cls.worker_sock.fileno()) mitogen.core.set_cloexec(cls.child_sock.fileno()) + if os.environ.get('MITOGEN_PROFILING'): + mitogen.core.enable_profiling() + cls.original_env = dict(os.environ) cls.child_pid = os.fork() ansible_mitogen.logging.setup() @@ -139,10 +203,18 @@ class MuxProcess(object): # Let the parent know our listening socket is ready. mitogen.core.io_op(self.child_sock.send, b('1')) - self.child_sock.send(b('1')) # Block until the socket is closed, which happens on parent exit. mitogen.core.io_op(self.child_sock.recv, 1) + def _enable_router_debug(self): + if 'MITOGEN_ROUTER_DEBUG' in os.environ: + self.router.enable_debug() + + def _enable_stack_dumps(self): + secs = getenv_int('MITOGEN_DUMP_THREAD_STACKS', default=0) + if secs: + mitogen.debug.dump_to_logger(secs=secs) + def _setup_master(self): """ Construct a Router, Broker, and mitogen.unix listener @@ -155,11 +227,10 @@ class MuxProcess(object): self.listener = mitogen.unix.Listener( router=self.router, path=self.unix_listener_path, + backlog=C.DEFAULT_FORKS, ) - if 'MITOGEN_ROUTER_DEBUG' in os.environ: - self.router.enable_debug() - if 'MITOGEN_DUMP_THREAD_STACKS' in os.environ: - mitogen.debug.dump_to_logger() + self._enable_router_debug() + self._enable_stack_dumps() def _setup_services(self): """ @@ -174,7 +245,7 @@ class MuxProcess(object): ansible_mitogen.services.ContextService(self.router), ansible_mitogen.services.ModuleDepService(self.router), ], - size=int(os.environ.get('MITOGEN_POOL_SIZE', '16')), + size=getenv_int('MITOGEN_POOL_SIZE', default=16), ) LOG.debug('Service pool configured: size=%d', self.pool.size) @@ -199,4 +270,10 @@ class MuxProcess(object): ourself. In future this should gracefully join the pool, but TERM is fine for now. """ + if os.environ.get('MITOGEN_PROFILING'): + # TODO: avoid killing pool threads before they have written their + # .pstats. Really shouldn't be using kill() here at all, but hard + # to guarantee services can always be unblocked during shutdown. + time.sleep(1) + os.kill(os.getpid(), signal.SIGTERM) diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index ca3928b3..45bb5f0b 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -38,12 +38,14 @@ how to build arguments for it, preseed related data, etc. from __future__ import absolute_import from __future__ import unicode_literals +import atexit import ctypes import errno import imp import json import logging import os +import shlex import sys import tempfile import types @@ -82,6 +84,110 @@ iteritems = getattr(dict, 'iteritems', dict.items) LOG = logging.getLogger(__name__) +class EnvironmentFileWatcher(object): + """ + Usually Ansible edits to /etc/environment and ~/.pam_environment are + reflected in subsequent tasks if become:true or SSH multiplexing is + disabled, due to sudo and/or SSH reinvoking pam_env. Rather than emulate + existing semantics, do our best to ensure edits are always reflected. + + This can't perfectly replicate the existing behaviour, but it can safely + update and remove keys that appear to originate in `path`, and that do not + conflict with any existing environment key inherited from elsewhere. + + A more robust future approach may simply be to arrange for the persistent + interpreter to restart when a change is detected. + """ + def __init__(self, path): + self.path = os.path.expanduser(path) + #: Inode data at time of last check. + self._st = self._stat() + #: List of inherited keys appearing to originated from this file. + self._keys = [key for key, value in self._load() + if value == os.environ.get(key)] + LOG.debug('%r installed; existing keys: %r', self, self._keys) + + def __repr__(self): + return 'EnvironmentFileWatcher(%r)' % (self.path,) + + def _stat(self): + try: + return os.stat(self.path) + except OSError: + return None + + def _load(self): + try: + with open(self.path, 'r') as fp: + return list(self._parse(fp)) + except IOError: + return [] + + def _parse(self, fp): + """ + linux-pam-1.3.1/modules/pam_env/pam_env.c#L207 + """ + for line in fp: + # ' #export foo=some var ' -> ['#export', 'foo=some var '] + bits = shlex.split(line, comments=True) + if (not bits) or bits[0].startswith('#'): + continue + + if bits[0] == 'export': + bits.pop(0) + + key, sep, value = (' '.join(bits)).partition('=') + if key and sep: + yield key, value + + def _on_file_changed(self): + LOG.debug('%r: file changed, reloading', self) + for key, value in self._load(): + if key in os.environ: + LOG.debug('%r: existing key %r=%r exists, not setting %r', + self, key, os.environ[key], value) + else: + LOG.debug('%r: setting key %r to %r', self, key, value) + self._keys.append(key) + os.environ[key] = value + + def _remove_existing(self): + """ + When a change is detected, remove keys that existed in the old file. + """ + for key in self._keys: + if key in os.environ: + LOG.debug('%r: removing old key %r', self, key) + del os.environ[key] + self._keys = [] + + def check(self): + """ + Compare the :func:`os.stat` for the pam_env style environmnt file + `path` with the previous result `old_st`, which may be :data:`None` if + the previous stat attempt failed. Reload its contents if the file has + changed or appeared since last attempt. + + :returns: + New :func:`os.stat` result. The new call to :func:`reload_env` should + pass it as the value of `old_st`. + """ + st = self._stat() + if self._st == st: + return + + self._st = st + self._remove_existing() + + if st is None: + LOG.debug('%r: file has disappeared', self) + else: + self._on_file_changed() + +_pam_env_watcher = EnvironmentFileWatcher('~/.pam_environment') +_etc_env_watcher = EnvironmentFileWatcher('/etc/environment') + + def utf8(s): """ Coerce an object to bytes if it is Unicode. @@ -125,6 +231,11 @@ class Runner(object): This is passed as a string rather than a dict in order to mimic the implicit bytes/str conversion behaviour of a 2.x controller running against a 3.x target. + :param str good_temp_dir: + The writeable temporary directory for this user account reported by + :func:`ansible_mitogen.target.init_child` passed via the controller. + This is specified explicitly to remain compatible with Ansible<2.5, and + for forked tasks where init_child never runs. :param dict env: Additional environment variables to set during the run. Keys with :data:`None` are unset if present. @@ -137,16 +248,40 @@ class Runner(object): When :data:`True`, indicate the runner should detach the context from its parent after setup has completed successfully. """ - def __init__(self, module, service_context, json_args, extra_env=None, - cwd=None, env=None, econtext=None, detach=False): + def __init__(self, module, service_context, json_args, good_temp_dir, + extra_env=None, cwd=None, env=None, econtext=None, + detach=False): self.module = module self.service_context = service_context self.econtext = econtext self.detach = detach self.args = json.loads(json_args) + self.good_temp_dir = good_temp_dir self.extra_env = extra_env self.env = env self.cwd = cwd + #: If not :data:`None`, :meth:`get_temp_dir` had to create a temporary + #: directory for this run, because we're in an asynchronous task, or + #: because the originating action did not create a directory. + self._temp_dir = None + + def get_temp_dir(self): + path = self.args.get('_ansible_tmpdir') + if path is not None: + return path + + if self._temp_dir is None: + self._temp_dir = tempfile.mkdtemp( + prefix='ansible_mitogen_runner_', + dir=self.good_temp_dir, + ) + + return self._temp_dir + + def revert_temp_dir(self): + if self._temp_dir is not None: + ansible_mitogen.target.prune_tree(self._temp_dir) + self._temp_dir = None def setup(self): """ @@ -154,12 +289,25 @@ class Runner(object): from the parent, as :meth:`run` may detach prior to beginning execution. The base implementation simply prepares the environment. """ + self._setup_cwd() + self._setup_environ() + + def _setup_cwd(self): + """ + For situations like sudo to a non-privileged account, CWD could be + $HOME of the old account, which could have mode go=, which means it is + impossible to restore the old directory, so don't even try. + """ if self.cwd: - # For situations like sudo to another non-privileged account, the - # CWD could be $HOME of the old account, which could have mode go=, - # which means it is impossible to restore the old directory, so - # don't even bother. os.chdir(self.cwd) + + def _setup_environ(self): + """ + Apply changes from /etc/environment files before creating a + TemporaryEnvironment to snapshot environment state prior to module run. + """ + _pam_env_watcher.check() + _etc_env_watcher.check() env = dict(self.extra_env or {}) if self.env: env.update(self.env) @@ -171,33 +319,7 @@ class Runner(object): implementation simply restores the original environment. """ self._env.revert() - self._try_cleanup_temp() - - def _cleanup_temp(self): - """ - Empty temp_dir in time for the next module invocation. - """ - for name in os.listdir(ansible_mitogen.target.temp_dir): - if name in ('.', '..'): - continue - - path = os.path.join(ansible_mitogen.target.temp_dir, name) - LOG.debug('Deleting %r', path) - ansible_mitogen.target.prune_tree(path) - - def _try_cleanup_temp(self): - """ - During broker shutdown triggered by async task timeout or loss of - connection to the parent, it is possible for prune_tree() in - target.py::_on_broker_shutdown() to run before _cleanup_temp(), so skip - cleanup if the directory or a file disappears from beneath us. - """ - try: - self._cleanup_temp() - except (IOError, OSError) as e: - if e.args[0] == errno.ENOENT: - return - raise + self.revert_temp_dir() def _run(self): """ @@ -264,9 +386,9 @@ class ModuleUtilsImporter(object): mod.__loader__ = self if is_pkg: mod.__path__ = [] - mod.__package__ = fullname + mod.__package__ = str(fullname) else: - mod.__package__ = fullname.rpartition('.')[0] + mod.__package__ = str(fullname.rpartition('.')[0]) exec(code, mod.__dict__) self._loaded.add(fullname) return mod @@ -310,7 +432,8 @@ class NewStyleStdio(object): """ Patch ansible.module_utils.basic argument globals. """ - def __init__(self, args): + def __init__(self, args, temp_dir): + self.temp_dir = temp_dir self.original_stdout = sys.stdout self.original_stderr = sys.stderr self.original_stdin = sys.stdin @@ -320,7 +443,15 @@ class NewStyleStdio(object): ansible.module_utils.basic._ANSIBLE_ARGS = utf8(encoded) sys.stdin = StringIO(mitogen.core.to_text(encoded)) + self.original_get_path = getattr(ansible.module_utils.basic, + 'get_module_path', None) + ansible.module_utils.basic.get_module_path = self._get_path + + def _get_path(self): + return self.temp_dir + def revert(self): + ansible.module_utils.basic.get_module_path = self.original_get_path sys.stdout = self.original_stdout sys.stderr = self.original_stderr sys.stdin = self.original_stdin @@ -364,7 +495,7 @@ class ProgramRunner(Runner): fetched via :meth:`_get_program`. """ filename = self._get_program_filename() - path = os.path.join(ansible_mitogen.target.temp_dir, filename) + path = os.path.join(self.get_temp_dir(), filename) self.program_fp = open(path, 'wb') self.program_fp.write(self._get_program()) self.program_fp.flush() @@ -444,7 +575,7 @@ class ArgsFileRunner(Runner): self.args_fp = tempfile.NamedTemporaryFile( prefix='ansible_mitogen', suffix='-args', - dir=ansible_mitogen.target.temp_dir, + dir=self.get_temp_dir(), ) self.args_fp.write(utf8(self._get_args_contents())) self.args_fp.flush() @@ -548,10 +679,18 @@ class NewStyleRunner(ScriptRunner): for fullname in self.module_map['builtin']: mitogen.core.import_module(fullname) + def _setup_excepthook(self): + """ + Starting with Ansible 2.6, some modules (file.py) install a + sys.excepthook and never clean it up. So we must preserve the original + excepthook and restore it after the run completes. + """ + self.original_excepthook = sys.excepthook + def setup(self): super(NewStyleRunner, self).setup() - self._stdio = NewStyleStdio(self.args) + self._stdio = NewStyleStdio(self.args, self.get_temp_dir()) # It is possible that not supplying the script filename will break some # module, but this has never been a bug report. Instead act like an # interpreter that had its script piped on stdin. @@ -561,12 +700,17 @@ class NewStyleRunner(ScriptRunner): module_utils=self.module_map['custom'], ) self._setup_imports() + self._setup_excepthook() if libc__res_init: libc__res_init() + def _revert_excepthook(self): + sys.excepthook = self.original_excepthook + def revert(self): self._argv.revert() self._stdio.revert() + self._revert_excepthook() super(NewStyleRunner, self).revert() def _get_program_filename(self): @@ -600,9 +744,39 @@ class NewStyleRunner(ScriptRunner): else: main_module_name = b'__main__' - def _run(self): - code = self._get_code() + def _handle_magic_exception(self, mod, exc): + """ + Beginning with Ansible >2.6, some modules (file.py) install a + sys.excepthook which is a closure over AnsibleModule, redirecting the + magical exception to AnsibleModule.fail_json(). + For extra special needs bonus points, the class is not defined in + module_utils, but is defined in the module itself, meaning there is no + type for isinstance() that outlasts the invocation. + """ + klass = getattr(mod, 'AnsibleModuleError', None) + if klass and isinstance(exc, klass): + mod.module.fail_json(**exc.results) + + def _run_code(self, code, mod): + try: + if mitogen.core.PY3: + exec(code, vars(mod)) + else: + exec('exec code in vars(mod)') + except Exception as e: + self._handle_magic_exception(mod, e) + raise + + def _run_atexit_funcs(self): + """ + Newer Ansibles use atexit.register() to trigger tmpdir cleanup, when + AnsibleModule.tmpdir is responsible for creating its own temporary + directory. + """ + atexit._run_exitfuncs() + + def _run(self): mod = types.ModuleType(self.main_module_name) mod.__package__ = None # Some Ansible modules use __file__ to find the Ansiballz temporary @@ -610,16 +784,17 @@ class NewStyleRunner(ScriptRunner): # don't want to pointlessly write the module to disk when it never # actually needs to exist. So just pass the filename as it would exist. mod.__file__ = os.path.join( - ansible_mitogen.target.temp_dir, + self.get_temp_dir(), 'ansible_module_' + os.path.basename(self.path), ) + code = self._get_code() exc = None try: - if mitogen.core.PY3: - exec(code, vars(mod)) - else: - exec('exec code in vars(mod)') + try: + self._run_code(code, mod) + finally: + self._run_atexit_funcs() except SystemExit as e: exc = e diff --git a/ansible_mitogen/services.py b/ansible_mitogen/services.py index a7bb7db1..199f2116 100644 --- a/ansible_mitogen/services.py +++ b/ansible_mitogen/services.py @@ -46,14 +46,23 @@ import os.path import sys import threading +import ansible.constants + import mitogen import mitogen.service +import mitogen.utils +import ansible_mitogen.loaders import ansible_mitogen.module_finder import ansible_mitogen.target LOG = logging.getLogger(__name__) +# Force load of plugin to ensure ConfigManager has definitions loaded. Done +# during module import to ensure a single-threaded environment; PluginLoader +# is not thread-safe. +ansible_mitogen.loaders.shell_loader.get('sh') + if sys.version_info[0] == 3: def reraise(tp, value, tb): @@ -69,6 +78,17 @@ else: ) +def _get_candidate_temp_dirs(): + options = ansible.constants.config.get_plugin_options('shell', 'sh') + + # Pre 2.5 this came from ansible.constants. + remote_tmp = (options.get('remote_tmp') or + ansible.constants.DEFAULT_REMOTE_TMP) + dirs = list(options.get('system_tmpdirs', ('/var/tmp', '/tmp'))) + dirs.insert(0, remote_tmp) + return mitogen.utils.cast(dirs) + + class Error(Exception): pass @@ -119,11 +139,15 @@ class ContextService(mitogen.service.Service): count reaches zero. """ LOG.debug('%r.put(%r)', self, context) - if self._refs_by_context.get(context, 0) == 0: - LOG.warning('%r.put(%r): refcount was 0. shutdown_all called?', - self, context) - return - self._refs_by_context[context] -= 1 + self._lock.acquire() + try: + if self._refs_by_context.get(context, 0) == 0: + LOG.warning('%r.put(%r): refcount was 0. shutdown_all called?', + self, context) + return + self._refs_by_context[context] -= 1 + finally: + self._lock.release() def key_from_kwargs(self, **kwargs): """ @@ -163,29 +187,24 @@ class ContextService(mitogen.service.Service): self._lock.release() return count - def _shutdown(self, context, lru=None, new_context=None): + def _shutdown_unlocked(self, context, lru=None, new_context=None): """ Arrange for `context` to be shut down, and optionally add `new_context` to the LRU list while holding the lock. """ - LOG.info('%r._shutdown(): shutting down %r', self, context) + LOG.info('%r._shutdown_unlocked(): shutting down %r', self, context) context.shutdown() key = self._key_by_context[context] + del self._response_by_key[key] + del self._refs_by_context[context] + del self._key_by_context[context] + if lru and context in lru: + lru.remove(context) + if new_context: + lru.append(new_context) - self._lock.acquire() - try: - del self._response_by_key[key] - del self._refs_by_context[context] - del self._key_by_context[context] - if lru and context in lru: - lru.remove(context) - if new_context: - lru.append(new_context) - finally: - self._lock.release() - - def _update_lru(self, new_context, spec, via): + def _update_lru_unlocked(self, new_context, spec, via): """ Update the LRU ("MRU"?) list associated with the connection described by `kwargs`, destroying the most recently created context if the list @@ -204,16 +223,27 @@ class ContextService(mitogen.service.Service): 'but they are all marked as in-use.', via) return - self._shutdown(context, lru=lru, new_context=new_context) + self._shutdown_unlocked(context, lru=lru, new_context=new_context) + + def _update_lru(self, new_context, spec, via): + self._lock.acquire() + try: + self._update_lru_unlocked(new_context, spec, via) + finally: + self._lock.release() @mitogen.service.expose(mitogen.service.AllowParents()) def shutdown_all(self): """ For testing use, arrange for all connections to be shut down. """ - for context in list(self._key_by_context): - self._shutdown(context) - self._lru_by_via = {} + self._lock.acquire() + try: + for context in list(self._key_by_context): + self._shutdown_unlocked(context) + self._lru_by_via = {} + finally: + self._lock.release() def _on_stream_disconnect(self, stream): """ @@ -249,8 +279,19 @@ class ContextService(mitogen.service.Service): ) def _send_module_forwards(self, context): - for fullname in self.ALWAYS_PRELOAD: - self.router.responder.forward_module(context, fullname) + self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD) + + _candidate_temp_dirs = None + + def _get_candidate_temp_dirs(self): + """ + Return a list of locations to try to create the single temporary + directory used by the run. This simply caches the (expensive) plugin + load of :func:`_get_candidate_temp_dirs`. + """ + if self._candidate_temp_dirs is None: + self._candidate_temp_dirs = _get_candidate_temp_dirs() + return self._candidate_temp_dirs def _connect(self, key, spec, via=None): """ @@ -298,8 +339,11 @@ class ContextService(mitogen.service.Service): lambda: self._on_stream_disconnect(stream)) self._send_module_forwards(context) - init_child_result = context.call(ansible_mitogen.target.init_child, - log_level=LOG.getEffectiveLevel()) + init_child_result = context.call( + ansible_mitogen.target.init_child, + log_level=LOG.getEffectiveLevel(), + candidate_temp_dirs=self._get_candidate_temp_dirs(), + ) if os.environ.get('MITOGEN_DUMP_THREAD_STACKS'): from mitogen import debug @@ -345,6 +389,12 @@ class ContextService(mitogen.service.Service): return latch + disconnect_msg = ( + 'Channel was disconnected while connection attempt was in progress; ' + 'this may be caused by an abnormal Ansible exit, or due to an ' + 'unreliable target.' + ) + @mitogen.service.expose(mitogen.service.AllowParents()) @mitogen.service.arg_spec({ 'stack': list @@ -372,6 +422,13 @@ class ContextService(mitogen.service.Service): if isinstance(result, tuple): # exc_info() reraise(*result) via = result['context'] + except mitogen.core.ChannelError: + return { + 'context': None, + 'init_child_result': None, + 'method_name': spec['method'], + 'msg': self.disconnect_msg, + } except mitogen.core.StreamError as e: return { 'context': None, @@ -388,6 +445,8 @@ class ModuleDepService(mitogen.service.Service): Scan a new-style module and produce a cached mapping of module_utils names to their resolved filesystem paths. """ + invoker_class = mitogen.service.SerializedInvoker + def __init__(self, *args, **kwargs): super(ModuleDepService, self).__init__(*args, **kwargs) self._cache = {} diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index fbe23ef7..e105984c 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -59,7 +59,7 @@ def wrap_connection_loader__get(name, *args, **kwargs): While the strategy is active, rewrite connection_loader.get() calls for some transports into requests for a compatible Mitogen transport. """ - if name in ('docker', 'jail', 'local', 'lxc', + if name in ('docker', 'kubectl', 'jail', 'local', 'lxc', 'lxd', 'machinectl', 'setns', 'ssh'): name = 'mitogen_' + name return connection_loader__get(name, *args, **kwargs) diff --git a/ansible_mitogen/target.py b/ansible_mitogen/target.py index e5365dd4..ff6ed083 100644 --- a/ansible_mitogen/target.py +++ b/ansible_mitogen/target.py @@ -43,30 +43,64 @@ import operator import os import pwd import re +import resource import signal import stat import subprocess +import sys import tempfile import traceback +import types -import ansible.module_utils.json_utils -import ansible_mitogen.runner import mitogen.core import mitogen.fork import mitogen.parent import mitogen.service +# Ansible since PR #41749 inserts "import __main__" into +# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so +# we must setup a fake "__main__" before that module is ever imported. The +# str() is to cast Unicode to bytes on Python 2.6. +if not sys.modules.get(str('__main__')): + sys.modules[str('__main__')] = types.ModuleType(str('__main__')) + +import ansible.module_utils.json_utils +import ansible_mitogen.runner + LOG = logging.getLogger(__name__) -#: Set by init_child() to the single temporary directory that will exist for -#: the duration of the process. -temp_dir = None +MAKE_TEMP_FAILED_MSG = ( + "Unable to find a useable temporary directory. This likely means no\n" + "system-supplied TMP directory can be written to, or all directories\n" + "were mounted on 'noexec' filesystems.\n" + "\n" + "The following paths were tried:\n" + " %(namelist)s\n" + "\n" + "Please check '-vvv' output for a log of individual path errors." +) + #: Initialized to an econtext.parent.Context pointing at a pristine fork of #: the target Python interpreter before it executes any code or imports. _fork_parent = None +#: Set by :func:`init_child` to the name of a writeable and executable +#: temporary directory accessible by the active user account. +good_temp_dir = None + + +# issue #362: subprocess.Popen(close_fds=True) aka. AnsibleModule.run_command() +# loops the entire SC_OPEN_MAX space. CentOS>5 ships with 1,048,576 FDs by +# default, resulting in huge (>500ms) runtime waste running many commands. +# Therefore if we are a child, cap the range to something reasonable. +rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) +if (rlimit[0] > 512 or rlimit[1] > 512) and not mitogen.is_master: + resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512)) + subprocess.MAXFD = 512 # Python <3.x +del rlimit + def get_small_file(context, path): """ @@ -180,29 +214,77 @@ def _on_broker_shutdown(): prune_tree(temp_dir) -@mitogen.core.takes_econtext -def reset_temp_dir(econtext): +def is_good_temp_dir(path): """ - Create one temporary directory to be reused by all runner.py invocations - for the lifetime of the process. The temporary directory is changed for - each forked job, and emptied as necessary by runner.py::_cleanup_temp() - after each module invocation. + Return :data:`True` if `path` can be used as a temporary directory, logging + any failures that may cause it to be unsuitable. If the directory doesn't + exist, we attempt to create it using :func:`os.makedirs`. + """ + if not os.path.exists(path): + try: + os.makedirs(path, mode=int('0700', 8)) + except OSError as e: + LOG.debug('temp dir %r unusable: did not exist and attempting ' + 'to create it failed: %s', path, e) + return False + + try: + tmp = tempfile.NamedTemporaryFile( + prefix='ansible_mitogen_is_good_temp_dir', + dir=path, + ) + except (OSError, IOError) as e: + LOG.debug('temp dir %r unusable: %s', path, e) + return False + + try: + try: + os.chmod(tmp.name, int('0700', 8)) + except OSError as e: + LOG.debug('temp dir %r unusable: %s: chmod failed: %s', + path, e) + return False + + try: + # access(.., X_OK) is sufficient to detect noexec. + if not os.access(tmp.name, os.X_OK): + raise OSError('filesystem appears to be mounted noexec') + except OSError as e: + LOG.debug('temp dir %r unusable: %s: %s', path, e) + return False + finally: + tmp.close() + + return True - The result is that a context need only create and delete one directory - during startup and shutdown, and no further filesystem writes need occur - assuming no modules execute that create temporary files. + +def find_good_temp_dir(candidate_temp_dirs): + """ + Given a list of candidate temp directories extracted from ``ansible.cfg``, + combine it with the Python-builtin list of candidate directories used by + :mod:`tempfile`, then iteratively try each until one is found that is both + writeable and executable. + + :param list candidate_temp_dirs: + List of candidate $variable-expanded and tilde-expanded directory paths + that may be usable as a temporary directory. """ - global temp_dir - # https://github.com/dw/mitogen/issues/239 - temp_dir = tempfile.mkdtemp(prefix='ansible_mitogen_') + paths = [os.path.expandvars(os.path.expanduser(p)) + for p in candidate_temp_dirs] + paths.extend(tempfile._candidate_tempdir_list()) - # This must be reinstalled in forked children too, since the Broker - # instance from the parent process does not carry over to the new child. - mitogen.core.listen(econtext.broker, 'shutdown', _on_broker_shutdown) + for path in paths: + if is_good_temp_dir(path): + LOG.debug('Selected temp directory: %r (from %r)', path, paths) + return path + + raise IOError(MAKE_TEMP_FAILED_MSG % { + 'paths': '\n '.join(paths), + }) @mitogen.core.takes_econtext -def init_child(econtext, log_level): +def init_child(econtext, log_level, candidate_temp_dirs): """ Called by ContextService immediately after connection; arranges for the (presently) spotless Python interpreter to be forked, where the newly @@ -215,6 +297,9 @@ def init_child(econtext, log_level): :param int log_level: Logging package level active in the master. + :param list[str] candidate_temp_dirs: + List of $variable-expanded and tilde-expanded directory names to add to + candidate list of temporary directories. :returns: Dict like:: @@ -228,20 +313,23 @@ def init_child(econtext, log_level): the controller will use to start forked jobs, and `home_dir` is the home directory for the active user account. """ - global _fork_parent - mitogen.parent.upgrade_router(econtext) - _fork_parent = econtext.router.fork() - reset_temp_dir(econtext) - # Copying the master's log level causes log messages to be filtered before # they reach LogForwarder, thus reducing an influx of tiny messges waking # the connection multiplexer process in the master. LOG.setLevel(log_level) logging.getLogger('ansible_mitogen').setLevel(log_level) + global _fork_parent + mitogen.parent.upgrade_router(econtext) + _fork_parent = econtext.router.fork() + + global good_temp_dir + good_temp_dir = find_good_temp_dir(candidate_temp_dirs) + return { 'fork_context': _fork_parent, 'home_dir': mitogen.core.to_text(os.path.expanduser('~')), + 'good_temp_dir': good_temp_dir, } @@ -254,7 +342,6 @@ def create_fork_child(econtext): """ mitogen.parent.upgrade_router(econtext) context = econtext.router.fork() - context.call(reset_temp_dir) LOG.debug('create_fork_child() -> %r', context) return context @@ -406,27 +493,6 @@ def run_module_async(kwargs, job_id, timeout_secs, econtext): arunner.run() -def make_temp_directory(base_dir): - """ - Handle creation of `base_dir` if it is absent, in addition to a unique - temporary directory within `base_dir`. This is the temporary directory that - becomes 'remote_tmp', not the one used by Ansiballz. It always uses the - system temporary directory. - - :returns: - Newly created temporary directory. - """ - # issue #301: remote_tmp may contain $vars. - base_dir = os.path.expandvars(base_dir) - - if not os.path.exists(base_dir): - os.makedirs(base_dir, mode=int('0700', 8)) - return tempfile.mkdtemp( - dir=base_dir, - prefix='ansible-mitogen-tmp-', - ) - - def get_user_shell(): """ For commands executed directly via an SSH command-line, SSH looks up the diff --git a/dev_requirements.txt b/dev_requirements.txt index f093721b..68f0422a 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -3,7 +3,7 @@ ansible==2.6.1 coverage==4.5.1 Django==1.6.11 # Last version supporting 2.6. mock==2.0.0 -pytz==2012d # Last 2.6-compat version. +pytz==2018.5 paramiko==2.3.2 # Last 2.6-compat version. pytest-catchlog==1.2.2 pytest==3.1.2 diff --git a/docs/ansible.rst b/docs/ansible.rst index 501dafe9..263d2b10 100644 --- a/docs/ansible.rst +++ b/docs/ansible.rst @@ -78,9 +78,27 @@ Installation deploy = (ALL) NOPASSWD:/usr/bin/python -c* -5. Subscribe to the `mitogen-announce mailing list - `_ to stay updated with new - releases and important bug fixes. +5. + + .. raw:: html + +
+ Releases occur frequently and often include important fixes. Subscribe + to the mitogen-announce + mailing list be notified of new releases. + +

+ + + + + +

+
+ Demo @@ -116,6 +134,7 @@ Testimonials strategy took Clojars' Ansible runs from **14 minutes to 2 minutes**. I still can't quite believe it." +* "Enabling the mitogen plugin in ansible feels like switching from floppy to SSD" .. _noteworthy_differences: @@ -123,7 +142,7 @@ Testimonials Noteworthy Differences ---------------------- -* Ansible 2.3-2.5 are supported along with Python 2.6, 2.7 or 3.6. Verify your +* Ansible 2.3-2.7 are supported along with Python 2.6, 2.7 or 3.6. Verify your installation is running one of these versions by checking ``ansible --version`` output. @@ -136,6 +155,7 @@ Noteworthy Differences * The `docker `_, `jail `_, + `kubectl `_, `local `_, `lxc `_, `lxd `_, @@ -270,8 +290,7 @@ command line, or as host and group variables. File Transfer ~~~~~~~~~~~~~ -Normally `sftp `_ or -`scp `_ are used to copy files by the +Normally `sftp(1)`_ or `scp(1)`_ are used to copy files by the `assemble `_, `copy `_, `patch `_, @@ -282,6 +301,9 @@ actions, or when uploading modules with pipelining disabled. With Mitogen copies are implemented natively using the same interpreters, connection tree, and routed message bus that carries RPCs. +.. _scp(1): https://linux.die.net/man/1/scp +.. _sftp(1): https://linux.die.net/man/1/sftp + This permits direct streaming between endpoints regardless of execution environment, without necessitating temporary copies in intermediary accounts or machines, for example when ``become`` is active, or in the presence of @@ -301,8 +323,7 @@ to rename over any existing file. This ensures the file remains consistent at all times, in the event of a crash, or when overlapping `ansible-playbook` runs deploy differing file contents. -The `sftp `_ and `scp -`_ tools may cause undetected data corruption +The `sftp(1)`_ and `scp(1)`_ tools may cause undetected data corruption in the form of truncated files, or files containing intermingled data segments from overlapping runs. As part of normal operation, both tools expose a window where readers may observe inconsistent file contents. @@ -311,10 +332,11 @@ where readers may observe inconsistent file contents. Performance ^^^^^^^^^^^ -One roundtrip initiates a transfer larger than 32KiB, while smaller transfers -are embedded in the initiating RPC. For tools operating via SSH multiplexing, 4 -roundtrips are required to configure the IO channel, in addition to the time to -start the local and remote processes. +One roundtrip initiates a transfer larger than 124 KiB, while smaller transfers +are embedded in a 0-roundtrip pipelined call. For tools operating via SSH +multiplexing, 4 roundtrips are required to configure the IO channel, followed +by 6 roundtrips to transfer the file in the case of ``sftp``, in addition to +the time to start the local and remote processes. An invocation of ``scp`` with an empty ``.profile`` over a 30 ms link takes ~140 ms, wasting 110 ms per invocation, rising to ~2,000 ms over a 400 ms @@ -400,6 +422,141 @@ this precisely, to avoid breaking playbooks that expect text to appear in specific variables with a particular linefeed style. +.. _ansible_tempfiles: + +Temporary Files +~~~~~~~~~~~~~~~ + +Temporary file handling in Ansible is incredibly tricky business, and the exact +behaviour varies across major releases. + +Ansible creates a variety of temporary files and directories depending on its +operating mode. + +In the best case when pipelining is enabled and no temporary uploads are +required, for each task Ansible will create one directory below a +system-supplied temporary directory returned by :func:`tempfile.mkdtemp`, owned +by the target account a new-style module will execute in. + +In other cases depending on the task type, whether become is active, whether +the target become user is privileged, whether the associated action plugin +needs to upload files, and whether the associated module needs to store files, +Ansible may: + +* Create a directory owned by the SSH user either under ``remote_tmp``, or a + system-default directory, +* Upload action dependencies such as non-new style modules or rendered + templates to that directory via `sftp(1)`_ or `scp(1)`_. +* Attempt to modify the directory's access control list to grant access to the + target user using `setfacl(1) `_, + requiring that tool to be installed and a supported filesystem to be in use, + or for the ``allow_world_readable_tmpfiles`` setting to be :data:`True`. +* Create a directory owned by the target user either under ``remote_tmp``, or + a system-default directory, if a new-style module needs a temporary directory + and one was not previously created for a supporting file earlier in the + invocation. + +In summary, for each task Ansible may create one or more of: + +* ``~ssh_user//...`` owned by the login user, +* ``$TMPDIR/ansible-tmp-...`` owned by the login user, +* ``$TMPDIR/ansible-tmp-...`` owned by the login user with ACLs permitting + write access by the become user, +* ``~become_user//...`` owned by the become user, +* ``$TMPDIR/ansible__payload_.../`` owned by the become user, +* ``$TMPDIR/ansible-module-tmp-.../`` owned by the become user. + + +Mitogen for Ansible +^^^^^^^^^^^^^^^^^^^ + +Temporary h +Temporary directory handling is fiddly and varies across major Ansible +releases. + + +Temporary directories must exist to maintain compatibility with Ansible, as +many modules introspect :data:`sys.argv` to find a directory where they may +write files, however only one directory exists for the lifetime of each +interpreter, its location is consistent for each target account, and it is +always privately owned by that account. + +The paths below are tried until one is found that is writeable and lives on a +filesystem with ``noexec`` disabled: + +1. ``$variable`` and tilde-expanded ``remote_tmp`` setting from + ``ansible.cfg`` +2. ``$variable`` and tilde-expanded ``system_tmpdirs`` setting from + ``ansible.cfg`` +3. ``TMPDIR`` environment variable +4. ``TEMP`` environment variable +5. ``TMP`` environment variable +6. ``/tmp`` +7. ``/var/tmp`` +8. ``/usr/tmp`` +9. Current working directory + +The directory is created once at startup, and subdirectories are automatically +created and destroyed for every new task. Management of subdirectories happens +on the controller, but management of the parent directory happens entirely on +the target. + + +.. _ansible_process_env: + +Process Environment Emulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since Ansible discards processes after each module invocation, follow-up tasks +often (but not always) receive a new environment that will usually include +changes made by previous tasks. As such modifications are common, for +compatibility the extension emulates the existing behaviour as closely as +possible. + +Some scenarios exist where emulation is impossible, for example, applying +``nsswitch.conf`` changes when ``nscd`` is not in use. If future scenarios +appear that cannot be solved through emulation, the extension will be updated +to automatically restart affected interpreters instead. + + +DNS Resolution +^^^^^^^^^^^^^^ + +Modifications to ``/etc/resolv.conf`` cause the glibc resolver configuration to +be reloaded via `res_init(3) `_. This +isn't necessary on some Linux distributions carrying glibc patches to +automatically check ``/etc/resolv.conf`` periodically, however it is necessary +on at least Debian and BSD derivatives. + + +``/etc/environment`` +^^^^^^^^^^^^^^^^^^^^ + +When ``become: true`` is active or SSH multiplexing is disabled, modifications +by previous tasks to ``/etc/environment`` and ``$HOME/.pam_environment`` are +normally reflected, since the content of those files is reapplied by `PAM +`_ via `pam_env` +on each authentication of ``sudo`` or ``sshd``. + +Both files are monitored for changes, and changes are applied where it appears +safe to do so: + +* New keys are added if they did not otherwise exist in the inherited + environment, or previously had the same value as found in the file before it + changed. + +* Given a key (such as ``http_proxy``) added to the file where no such key + exists in the environment, the key will be added. + +* Given a key (such as ``PATH``) where an existing environment key exists with + a different value, the update or deletion will be ignored, as it is likely + the key was overridden elsewhere after `pam_env` ran, such as by + ``/etc/profile``. + +* Given a key removed from the file that had the same value as the existing + environment key, the key will be removed. + + How Modules Execute ~~~~~~~~~~~~~~~~~~~ @@ -525,6 +682,8 @@ connection delegation is supported. * ``ansible_user``: Name of user within the container to execute as. +.. _method-jail: + FreeBSD Jail ~~~~~~~~~~~~ @@ -536,6 +695,19 @@ connection delegation is supported. * ``ansible_user``: Name of user within the jail to execute as. +.. _method-kubectl: + +Kubernetes Pod +~~~~~~~~~~~~~~ + +Like `kubectl +`_ except +connection delegation is supported. + +* ``ansible_host``: Name of pod (default: inventory hostname). +* ``ansible_user``: Name of user to authenticate to API as. + + Local ~~~~~ @@ -568,10 +740,10 @@ additional differences exist that may break existing playbooks. LXC ~~~ -Like `lxc `_ -and `lxd `_ -except connection delegation is supported, and ``lxc-attach`` is always used -rather than the LXC Python bindings, as is usual with ``lxc``. +Connect to classic LXC containers, like `lxc +`_ except +connection delegation is supported, and ``lxc-attach`` is always used rather +than the LXC Python bindings, as is usual with ``lxc``. The ``lxc-attach`` command must be available on the host machine. @@ -579,6 +751,20 @@ The ``lxc-attach`` command must be available on the host machine. * ``ansible_host``: Name of LXC container (default: inventory hostname). +.. _method-lxd: + +LXD +~~~ + +Connect to modern LXD containers, like `lxd +`_ except +connection delegation is supported. The ``lxc`` command must be available on +the host machine. + +* ``ansible_python_interpreter`` +* ``ansible_host``: Name of LXC container (default: inventory hostname). + + .. _machinectl: Machinectl @@ -601,21 +787,23 @@ Setns ~~~~~ The ``setns`` method connects to Linux containers via `setns(2) -`_. Unlike :ref:`method-docker` and -:ref:`method-lxc` the namespace transition is handled internally, ensuring -optimal throughput to the child. This is necessary for :ref:`machinectl` where -only PTY channels are supported. +`_. Unlike :ref:`method-docker`, +:ref:`method-lxc`, and :ref:`method-lxd` the namespace transition is handled +internally, ensuring optimal throughput to the child. This is necessary for +:ref:`machinectl` where only PTY channels are supported. A utility program must be installed to discover the PID of the container's root process. -* ``mitogen_kind``: one of ``docker``, ``lxc`` or ``machinectl``. +* ``mitogen_kind``: one of ``docker``, ``lxc``, ``lxd`` or ``machinectl``. * ``ansible_host``: Name of container as it is known to the corresponding tool (default: inventory hostname). * ``ansible_user``: Name of user within the container to execute as. * ``mitogen_docker_path``: path to Docker if not available on the system path. -* ``mitogen_lxc_info_path``: path to ``lxc-info`` command if not available as - ``/usr/bin/lxc-info``. +* ``mitogen_lxc_path``: path to LXD's ``lxc`` command if not available as + ``lxc-info``. +* ``mitogen_lxc_info_path``: path to LXC classic's ``lxc-info`` command if not + available as ``lxc-info``. * ``mitogen_machinectl_path``: path to ``machinectl`` command if not available as ``/bin/machinectl``. @@ -692,25 +880,45 @@ except connection delegation is supported. Debugging --------- -Diagnostics and use of the :py:mod:`logging` package output on the target -machine are usually discarded. With Mitogen, all of this is captured and -returned to the controller, where it can be viewed as desired with ``-vvv``. -Basic high level logs are produced with ``-vvv``, with logging of all IO on the -controller with ``-vvvv`` or higher. - -Although use of standard IO and the logging package on the target is forwarded -to the controller, it is not possible to receive IO activity logs, as the -process of receiving those logs would would itself generate IO activity. To -receive a complete trace of every process on every machine, file-based logging -is necessary. File-based logging can be enabled by setting -``MITOGEN_ROUTER_DEBUG=1`` in your environment. - -When file-based logging is enabled, one file per context will be created on the -local machine and every target machine, as ``/tmp/mitogen..log``. - -If you are experiencing a hang, ``MITOGEN_DUMP_THREAD_STACKS=1`` causes every -process on every machine to dump every thread stack into the logging framework -every 5 seconds. +Diagnostics and :py:mod:`logging` package output on targets are usually +discarded. With Mitogen, these are captured and forwarded to the controller +where they can be viewed with ``-vvv``. Basic high level logs are produced with +``-vvv``, with logging of all IO on the controller with ``-vvvv`` or higher. + +While uncaptured standard IO and the logging package on targets is forwarded, +it is not possible to receive IO activity logs, as the forwarding process would +would itself generate additional IO. + +To receive a complete trace of every process on every machine, file-based +logging is necessary. File-based logging can be enabled by setting +``MITOGEN_ROUTER_DEBUG=1`` in your environment. When file-based logging is +enabled, one file per context will be created on the local machine and every +target machine, as ``/tmp/mitogen..log``. + +.. _diagnosing-hangs: + +Diagnosing Hangs +~~~~~~~~~~~~~~~~ + +If you encounter a hang, the ``MITOGEN_DUMP_THREAD_STACKS=`` environment +variable arranges for each process on each machine to dump each thread stack +into the logging framework every `secs` seconds, which is visible when running +with ``-vvv``. + +However, certain controller hangs may render ``MITOGEN_DUMP_THREAD_STACKS`` +ineffective, or occur too infrequently for interactive reproduction. In these +cases `faulthandler `_ may be used: + +1. For Python 2, ``pip install faulthandler``. This is unnecessary on Python 3. +2. Once the hang occurs, observe the process tree using ``pstree`` or ``ps + --forest``. +3. The most likely process to be hung is the connection multiplexer, which can + easily be identified as the parent of all SSH client processes. +4. Send ``kill -SEGV `` to the multiplexer PID, causing it to print all + thread stacks. +5. `File a bug `_ including a copy + of the stacks, along with a description of the last task executing prior to + the hang. Getting Help diff --git a/docs/api.rst b/docs/api.rst index 6efca6dd..72a6b4db 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -35,9 +35,9 @@ mitogen.core Decorator that marks a function or class method to automatically receive a kwarg named `econtext`, referencing the - :py:class:`mitogen.core.ExternalContext` active in the context in which the + :class:`mitogen.core.ExternalContext` active in the context in which the function is being invoked in. The decorator is only meaningful when the - function is invoked via :py:data:`CALL_FUNCTION + function is invoked via :data:`CALL_FUNCTION `. When the function is invoked directly, `econtext` must still be passed to @@ -47,10 +47,10 @@ mitogen.core .. decorator:: takes_router Decorator that marks a function or class method to automatically receive a - kwarg named `router`, referencing the :py:class:`mitogen.core.Router` + kwarg named `router`, referencing the :class:`mitogen.core.Router` active in the context in which the function is being invoked in. The decorator is only meaningful when the function is invoked via - :py:data:`CALL_FUNCTION `. + :data:`CALL_FUNCTION `. When the function is invoked directly, `router` must still be passed to it explicitly. @@ -87,25 +87,25 @@ Message Class .. class:: Message - Messages are the fundamental unit of communication, comprising the fields - from in the :ref:`stream-protocol` header, an optional reference to the - receiving :class:`mitogen.core.Router` for ingress messages, and helper - methods for deserialization and generating replies. + Messages are the fundamental unit of communication, comprising fields from + the :ref:`stream-protocol` header, an optional reference to the receiving + :class:`mitogen.core.Router` for ingress messages, and helper methods for + deserialization and generating replies. .. attribute:: router - The :py:class:`mitogen.core.Router` responsible for routing the - message. This is :py:data:`None` for locally originated messages. + The :class:`mitogen.core.Router` responsible for routing the + message. This is :data:`None` for locally originated messages. .. attribute:: receiver - The :py:class:`mitogen.core.Receiver` over which the message was last - received. Part of the :py:class:`mitogen.select.Select` interface. - Defaults to :py:data:`None`. + The :class:`mitogen.core.Receiver` over which the message was last + received. Part of the :class:`mitogen.select.Select` interface. + Defaults to :data:`None`. .. attribute:: dst_id - Integer target context ID. :py:class:`mitogen.core.Router` delivers + Integer target context ID. :class:`mitogen.core.Router` delivers messages locally when their :attr:`dst_id` matches :data:`mitogen.context_id`, otherwise they are routed up or downstream. @@ -117,12 +117,12 @@ Message Class .. attribute:: auth_id The context ID under whose authority the message is acting. See - :py:ref:`source-verification`. + :ref:`source-verification`. .. attribute:: handle Integer target handle in the destination context. This is one of the - :py:ref:`standard-handles`, or a dynamically generated handle used to + :ref:`standard-handles`, or a dynamically generated handle used to receive a one-time reply, such as the return value of a function call. .. attribute:: reply_to @@ -143,12 +143,12 @@ Message Class .. py:method:: __init__ (\**kwargs) - Construct a message from from the supplied `kwargs`. :py:attr:`src_id` - and :py:attr:`auth_id` are always set to :py:data:`mitogen.context_id`. + Construct a message from from the supplied `kwargs`. :attr:`src_id` + and :attr:`auth_id` are always set to :data:`mitogen.context_id`. .. py:classmethod:: pickled (obj, \**kwargs) - Construct a pickled message, setting :py:attr:`data` to the + Construct a pickled message, setting :attr:`data` to the serialization of `obj`, and setting remaining fields using `kwargs`. :returns: @@ -156,10 +156,10 @@ Message Class .. method:: unpickle (throw=True) - Unpickle :py:attr:`data`, optionally raising any exceptions present. + Unpickle :attr:`data`, optionally raising any exceptions present. :param bool throw: - If :py:data:`True`, raise exceptions, otherwise it is the caller's + If :data:`True`, raise exceptions, otherwise it is the caller's responsibility. :raises mitogen.core.CallError: @@ -169,8 +169,8 @@ Message Class .. method:: reply (obj, router=None, \**kwargs) - Compose a reply to this message and send it using :py:attr:`router`, or - `router` is :py:attr:`router` is :data:`None`. + Compose a reply to this message and send it using :attr:`router`, or + `router` is :attr:`router` is :data:`None`. :param obj: Either a :class:`Message`, or an object to be serialized in order @@ -190,8 +190,8 @@ Router Class .. class:: Router Route messages between parent and child contexts, and invoke handlers - defined on our parent context. :py:meth:`Router.route() ` straddles - the :py:class:`Broker ` and user threads, it is safe + defined on our parent context. :meth:`Router.route() ` straddles + the :class:`Broker ` and user threads, it is safe to call anywhere. **Note:** This is the somewhat limited core version of the Router class @@ -217,7 +217,7 @@ Router Class .. method:: stream_by_id (dst_id) - Return the :py:class:`mitogen.core.Stream` that should be used to + Return the :class:`mitogen.core.Stream` that should be used to communicate with `dst_id`. If a specific route for `dst_id` is not known, a reference to the parent context's stream is returned. @@ -238,16 +238,16 @@ Router Class .. method:: add_handler (fn, handle=None, persist=True, respondent=None, policy=None) Invoke `fn(msg)` for each Message sent to `handle` from this context. - Unregister after one invocation if `persist` is ``False``. If `handle` - is ``None``, a new handle is allocated and returned. + Unregister after one invocation if `persist` is :data:`False`. If + `handle` is :data:`None`, a new handle is allocated and returned. :param int handle: - If not ``None``, an explicit handle to register, usually one of the - ``mitogen.core.*`` constants. If unspecified, a new unused handle - will be allocated. + If not :data:`None`, an explicit handle to register, usually one of + the ``mitogen.core.*`` constants. If unspecified, a new unused + handle will be allocated. :param bool persist: - If ``False``, the handler will be unregistered after a single + If :data:`False`, the handler will be unregistered after a single message has been received. :param mitogen.core.Context respondent: @@ -260,28 +260,29 @@ Router Class :param function policy: Function invoked as `policy(msg, stream)` where `msg` is a - :py:class:`mitogen.core.Message` about to be delivered, and - `stream` is the :py:class:`mitogen.core.Stream` on which it was - received. The function must return :py:data:`True`, otherwise an + :class:`mitogen.core.Message` about to be delivered, and + `stream` is the :class:`mitogen.core.Stream` on which it was + received. The function must return :data:`True`, otherwise an error is logged and delivery is refused. Two built-in policy functions exist: - * :py:func:`mitogen.core.has_parent_authority`: requires the + * :func:`mitogen.core.has_parent_authority`: requires the message arrived from a parent context, or a context acting with a parent context's authority (``auth_id``). - * :py:func:`mitogen.parent.is_immediate_child`: requires the + * :func:`mitogen.parent.is_immediate_child`: requires the message arrived from an immediately connected child, for use in messaging patterns where either something becomes buggy or insecure by permitting indirect upstream communication. In case of refusal, and the message's ``reply_to`` field is - nonzero, a :py:class:`mitogen.core.CallError` is delivered to the + nonzero, a :class:`mitogen.core.CallError` is delivered to the sender indicating refusal occurred. :return: - `handle`, or if `handle` was ``None``, the newly allocated handle. + `handle`, or if `handle` was :data:`None`, the newly allocated + handle. .. method:: del_handler (handle) @@ -296,22 +297,22 @@ Router Class destination is the local context, then arrange for it to be dispatched using the local handlers. - This is a lower overhead version of :py:meth:`route` that may only be + This is a lower overhead version of :meth:`route` that may only be called from the I/O multiplexer thread. :param mitogen.core.Stream stream: - If not ``None``, a reference to the stream the message arrived on. - Used for performing source route verification, to ensure sensitive - messages such as ``CALL_FUNCTION`` arrive only from trusted - contexts. + If not :data:`None`, a reference to the stream the message arrived + on. Used for performing source route verification, to ensure + sensitive messages such as ``CALL_FUNCTION`` arrive only from + trusted contexts. .. method:: route(msg) - Arrange for the :py:class:`Message` `msg` to be delivered to its + Arrange for the :class:`Message` `msg` to be delivered to its destination using any relevant downstream context, or if none is found, by forwarding the message upstream towards the master context. If `msg` is destined for the local context, it is dispatched using the handles - registered with :py:meth:`add_handler`. + registered with :meth:`add_handler`. This may be called from any thread. @@ -320,7 +321,7 @@ Router Class .. class:: Router (broker=None) - Extend :py:class:`mitogen.core.Router` with functionality useful to + Extend :class:`mitogen.core.Router` with functionality useful to masters, and child contexts who later become masters. Currently when this class is required, the target context's router is upgraded at runtime. @@ -333,16 +334,16 @@ Router Class customers or projects. :param mitogen.master.Broker broker: - :py:class:`Broker` instance to use. If not specified, a private - :py:class:`Broker` is created. + :class:`Broker` instance to use. If not specified, a private + :class:`Broker` is created. .. attribute:: profiling When :data:`True`, cause the broker thread and any subsequent broker and main threads existing in any child to write ``/tmp/mitogen.stats...log`` containing a - :py:mod:`cProfile` dump on graceful exit. Must be set prior to - construction of any :py:class:`Broker`, e.g. via: + :mod:`cProfile` dump on graceful exit. Must be set prior to + construction of any :class:`Broker`, e.g. via: .. code:: @@ -377,7 +378,7 @@ Router Class and router, and responds to function calls identically to children created using other methods. - For long-lived processes, :py:meth:`local` is always better as it + For long-lived processes, :meth:`local` is always better as it guarantees a pristine interpreter state that inherited little from the parent. Forking should only be used in performance-sensitive scenarios where short-lived children must be spawned to isolate potentially buggy @@ -419,10 +420,10 @@ Router Class immediate copy-on-write to large portions of the process heap. * Locks held in the parent causing random deadlocks in the child, such - as when another thread emits a log entry via the :py:mod:`logging` - package concurrent to another thread calling :py:meth:`fork`. + as when another thread emits a log entry via the :mod:`logging` + package concurrent to another thread calling :meth:`fork`. - * Objects existing in Thread-Local Storage of every non-:py:meth:`fork` + * Objects existing in Thread-Local Storage of every non-:meth:`fork` thread becoming permanently inaccessible, and never having their object destructors called, including TLS usage by native extension code, triggering many new variants of all the issues above. @@ -433,16 +434,16 @@ Router Class case, children continually reuse the same state due to repeatedly forking from a static parent. - :py:meth:`fork` cleans up Mitogen-internal objects, in addition to - locks held by the :py:mod:`logging` package, reseeds - :py:func:`random.random`, and the OpenSSL PRNG via - :py:func:`ssl.RAND_add`, but only if the :py:mod:`ssl` module is + :meth:`fork` cleans up Mitogen-internal objects, in addition to + locks held by the :mod:`logging` package, reseeds + :func:`random.random`, and the OpenSSL PRNG via + :func:`ssl.RAND_add`, but only if the :mod:`ssl` module is already loaded. You must arrange for your program's state, including any third party packages in use, to be cleaned up by specifying an `on_fork` function. The associated stream implementation is - :py:class:`mitogen.fork.Stream`. + :class:`mitogen.fork.Stream`. :param function on_fork: Function invoked as `on_fork()` from within the child process. This @@ -457,20 +458,20 @@ Router Class rich data structures that cannot normally be passed via a serialization. - :param Context via: - Same as the `via` parameter for :py:meth:`local`. + :param mitogen.core.Context via: + Same as the `via` parameter for :meth:`local`. :param bool debug: - Same as the `debug` parameter for :py:meth:`local`. + Same as the `debug` parameter for :meth:`local`. :param bool profiling: - Same as the `profiling` parameter for :py:meth:`local`. + Same as the `profiling` parameter for :meth:`local`. .. method:: local (remote_name=None, python_path=None, debug=False, connect_timeout=None, profiling=False, via=None) Construct a context on the local machine as a subprocess of the current process. The associated stream implementation is - :py:class:`mitogen.master.Stream`. + :class:`mitogen.master.Stream`. :param str remote_name: The ``argv[0]`` suffix for the new process. If `remote_name` is @@ -492,9 +493,9 @@ Router Class another tool, such as ``["/usr/bin/env", "python"]``. :param bool debug: - If :data:`True`, arrange for debug logging (:py:meth:`enable_debug`) to + If :data:`True`, arrange for debug logging (:meth:`enable_debug`) to be enabled in the new context. Automatically :data:`True` when - :py:meth:`enable_debug` has been called, but may be used + :meth:`enable_debug` has been called, but may be used selectively otherwise. :param bool unidirectional: @@ -509,14 +510,14 @@ Router Class healthy. Defaults to 30 seconds. :param bool profiling: - If :data:`True`, arrange for profiling (:py:data:`profiling`) to be + If :data:`True`, arrange for profiling (:data:`profiling`) to be enabled in the new context. Automatically :data:`True` when - :py:data:`profiling` is :data:`True`, but may be used selectively + :data:`profiling` is :data:`True`, but may be used selectively otherwise. :param mitogen.core.Context via: - If not ``None``, arrange for construction to occur via RPCs made to - the context `via`, and for :py:data:`ADD_ROUTE + If not :data:`None`, arrange for construction to occur via RPCs + made to the context `via`, and for :data:`ADD_ROUTE ` messages to be generated as appropriate. .. code-block:: python @@ -527,28 +528,28 @@ Router Class # Use the SSH connection to create a sudo connection. remote_root = router.sudo(username='root', via=remote_machine) - .. method:: dos (username=None, password=None, su_path=None, password_prompt=None, incorrect_prompts=None, \**kwargs) + .. method:: doas (username=None, password=None, doas_path=None, password_prompt=None, incorrect_prompts=None, \**kwargs) - Construct a context on the local machine over a ``su`` invocation. The - ``su`` process is started in a newly allocated pseudo-terminal, and - supports typing interactive passwords. + Construct a context on the local machine over a ``doas`` invocation. + The ``doas`` process is started in a newly allocated pseudo-terminal, + and supports typing interactive passwords. - Accepts all parameters accepted by :py:meth:`local`, in addition to: + Accepts all parameters accepted by :meth:`local`, in addition to: :param str username: Username to use, defaults to ``root``. :param str password: The account password to use if requested. - :param str su_path: - Filename or complete path to the ``su`` binary. ``PATH`` will be - searched if given as a filename. Defaults to ``su``. + :param str doas_path: + Filename or complete path to the ``doas`` binary. ``PATH`` will be + searched if given as a filename. Defaults to ``doas``. :param bytes password_prompt: A string that indicates ``doas`` is requesting a password. Defaults to ``Password:``. :param list incorrect_prompts: List of bytestrings indicating the password is incorrect. Defaults to `(b"doas: authentication failed")`. - :raises mitogen.su.PasswordError: + :raises mitogen.doas.PasswordError: A password was requested but none was provided, the supplied password was incorrect, or the target account did not exist. @@ -558,7 +559,7 @@ Router Class temporary new Docker container using the ``docker`` program. One of `container` or `image` must be specified. - Accepts all parameters accepted by :py:meth:`local`, in addition to: + Accepts all parameters accepted by :meth:`local`, in addition to: :param str container: Existing container to connect to. Defaults to :data:`None`. @@ -567,7 +568,7 @@ Router Class :data:`None`, which Docker interprets as ``root``. :param str image: Image tag to use to construct a temporary container. Defaults to - ``None``. + :data:`None`. :param str docker_path: Filename or complete path to the Docker binary. ``PATH`` will be searched if given as a filename. Defaults to ``docker``. @@ -577,7 +578,7 @@ Router Class Construct a context on the local machine within a FreeBSD jail using the ``jexec`` program. - Accepts all parameters accepted by :py:meth:`local`, in addition to: + Accepts all parameters accepted by :meth:`local`, in addition to: :param str container: Existing container to connect to. Defaults to :data:`None`. @@ -588,28 +589,57 @@ Router Class Filename or complete path to the ``jexec`` binary. ``PATH`` will be searched if given as a filename. Defaults to ``/usr/sbin/jexec``. + .. method:: kubectl (pod, kubectl_path=None, kubectl_args=None, \**kwargs) + + Construct a context in a container via the Kubernetes ``kubectl`` + program. + + Accepts all parameters accepted by :meth:`local`, in addition to: + + :param str pod: + Kubernetes pod to connect to. + :param str kubectl_path: + Filename or complete path to the ``kubectl`` binary. ``PATH`` will + be searched if given as a filename. Defaults to ``kubectl``. + :param list kubectl_args: + Additional arguments to pass to the ``kubectl`` command. + .. method:: lxc (container, lxc_attach_path=None, \**kwargs) - Construct a context on the local machine within an LXC container using - the ``lxc-attach`` program. + Construct a context on the local machine within an LXC classic + container using the ``lxc-attach`` program. - Accepts all parameters accepted by :py:meth:`local`, in addition to: + Accepts all parameters accepted by :meth:`local`, in addition to: :param str container: - Existing container to connect to. Defaults to ``None``. + Existing container to connect to. Defaults to :data:`None`. :param str lxc_attach_path: Filename or complete path to the ``lxc-attach`` binary. ``PATH`` will be searched if given as a filename. Defaults to ``lxc-attach``. - .. method:: setns (container, kind, docker_path=None, lxc_info_path=None, machinectl_path=None, \**kwargs) + .. method:: lxc (container, lxc_attach_path=None, \**kwargs) + + Construct a context on the local machine within a LXD container using + the ``lxc`` program. + + Accepts all parameters accepted by :meth:`local`, in addition to: + + :param str container: + Existing container to connect to. Defaults to :data:`None`. + :param str lxc_path: + Filename or complete path to the ``lxc`` binary. ``PATH`` will be + searched if given as a filename. Defaults to ``lxc``. + + .. method:: setns (container, kind, username=None, docker_path=None, lxc_info_path=None, machinectl_path=None, \**kwargs) Construct a context in the style of :meth:`local`, but change the active Linux process namespaces via calls to `setns(1)` before executing Python. The namespaces to use, and the active root file system are taken from - the root PID of a running Docker, LXC, or systemd-nspawn container. + the root PID of a running Docker, LXC, LXD, or systemd-nspawn + container. A program is required only to find the root PID, after which management of the child Python interpreter is handled directly. @@ -617,14 +647,19 @@ Router Class :param str container: Container to connect to. :param str kind: - One of ``docker``, ``lxc`` or ``machinectl``. + One of ``docker``, ``lxc``, ``lxd`` or ``machinectl``. + :param str username: + Username within the container to :func:`setuid` to. Defaults to + ``root``. :param str docker_path: Filename or complete path to the Docker binary. ``PATH`` will be searched if given as a filename. Defaults to ``docker``. + :param str lxc_path: + Filename or complete path to the LXD ``lxc`` binary. ``PATH`` will + be searched if given as a filename. Defaults to ``lxc``. :param str lxc_info_path: - Filename or complete path to the ``lxc-info`` binary. ``PATH`` - will be searched if given as a filename. Defaults to - ``lxc-info``. + Filename or complete path to the LXC ``lxc-info`` binary. ``PATH`` + will be searched if given as a filename. Defaults to ``lxc-info``. :param str machinectl_path: Filename or complete path to the ``machinectl`` binary. ``PATH`` will be searched if given as a filename. Defaults to @@ -636,7 +671,7 @@ Router Class ``su`` process is started in a newly allocated pseudo-terminal, and supports typing interactive passwords. - Accepts all parameters accepted by :py:meth:`local`, in addition to: + Accepts all parameters accepted by :meth:`local`, in addition to: :param str username: Username to pass to ``su``, defaults to ``root``. @@ -663,7 +698,7 @@ Router Class The ``sudo`` process is started in a newly allocated pseudo-terminal, and supports typing interactive passwords. - Accepts all parameters accepted by :py:meth:`local`, in addition to: + Accepts all parameters accepted by :meth:`local`, in addition to: :param str username: Username to pass to sudo as the ``-u`` parameter, defaults to @@ -674,34 +709,41 @@ Router Class :param str password: The password to use if/when sudo requests it. Depending on the sudo configuration, this is either the current account password or the - target account password. :py:class:`mitogen.sudo.PasswordError` + target account password. :class:`mitogen.sudo.PasswordError` will be raised if sudo requests a password but none is provided. :param bool set_home: - If :py:data:`True`, request ``sudo`` set the ``HOME`` environment + If :data:`True`, request ``sudo`` set the ``HOME`` environment variable to match the target UNIX account. :param bool preserve_env: - If :py:data:`True`, request ``sudo`` to preserve the environment of + If :data:`True`, request ``sudo`` to preserve the environment of the parent process. :param list sudo_args: - Arguments in the style of :py:data:`sys.argv` that would normally + Arguments in the style of :data:`sys.argv` that would normally be passed to ``sudo``. The arguments are parsed in-process to set equivalent parameters. Re-parsing ensures unsupported options cause - :py:class:`mitogen.core.StreamError` to be raised, and that + :class:`mitogen.core.StreamError` to be raised, and that attributes of the stream match the actual behaviour of ``sudo``. - .. method:: ssh (hostname, username=None, ssh_path=None, port=None, check_host_keys='enforce', password=None, identity_file=None, compression=True, \**kwargs) + .. method:: ssh (hostname, username=None, ssh_path=None, ssh_args=None, port=None, check_host_keys='enforce', password=None, identity_file=None, identities_only=True, compression=True, \**kwargs) + + Construct a remote context over an OpenSSH ``ssh`` invocation. - Construct a remote context over a ``ssh`` invocation. The ``ssh`` - process is started in a newly allocated pseudo-terminal, and supports - typing interactive passwords. + The ``ssh`` process is started in a newly allocated pseudo-terminal to + support typing interactive passwords and responding to prompts, if a + password is specified, or `check_host_keys=accept`. In other scenarios, + ``BatchMode`` is enabled and no PTY is allocated. For many-target + configurations, both options should be avoided as most systems have a + conservative limit on the number of pseudo-terminals that may exist. - Accepts all parameters accepted by :py:meth:`local`, in addition to: + Accepts all parameters accepted by :meth:`local`, in addition to: :param str username: The SSH username; default is unspecified, which causes SSH to pick the username to use. :param str ssh_path: Absolute or relative path to ``ssh``. Defaults to ``ssh``. + :param list ssh_args: + Additional arguments to pass to the SSH command. :param int port: Port number to connect to; default is unspecified, which causes SSH to pick the port number. @@ -717,7 +759,7 @@ Router Class unknown hosts cause a connection failure. :param str password: Password to type if/when ``ssh`` requests it. If not specified and - a password is requested, :py:class:`mitogen.ssh.PasswordError` is + a password is requested, :class:`mitogen.ssh.PasswordError` is raised. :param str identity_file: Path to an SSH private key file to use for authentication. Default @@ -727,13 +769,20 @@ Router Class the SSH client to perform authenticaion; agent authentication is automatically disabled, as is reading the default private key from ``~/.ssh/id_rsa``, or ``~/.ssh/id_dsa``. + :param bool identities_only: + If :data:`True` and a password or explicit identity file is + specified, instruct the SSH client to disable any authentication + identities inherited from the surrounding environment, such as + those loaded in any running ``ssh-agent``, or default key files + present in ``~/.ssh``. This ensures authentication attempts only + occur using the supplied password or SSH key. :param bool compression: - If :py:data:`True`, enable ``ssh`` compression support. Compression + If :data:`True`, enable ``ssh`` compression support. Compression has a minimal effect on the size of modules transmitted, as they are already compressed, however it has a large effect on every remaining message in the otherwise uncompressed stream protocol, such as function call arguments and return values. - :parama int ssh_debug_level: + :param int ssh_debug_level: Optional integer `0..3` indicating the SSH client debug level. :raises mitogen.ssh.PasswordError: A password was requested but none was specified, or the specified @@ -760,8 +809,8 @@ Context Class .. method:: send (msg) - Arrange for `msg` to be delivered to this context. Updates the - message's `dst_id` prior to routing it via the associated router. + Arrange for `msg` to be delivered to this context. + :attr:`dst_id ` is set to the target context ID. :param mitogen.core.Message msg: The message. @@ -769,43 +818,52 @@ Context Class .. method:: send_async (msg, persist=False) Arrange for `msg` to be delivered to this context, with replies - delivered to a newly constructed Receiver. Updates the message's - `dst_id` prior to routing it via the associated router and registers a - handle which is placed in the message's `reply_to`. + directed to a newly constructed receiver. :attr:`dst_id + ` is set to the target context ID, and :attr:`reply_to + ` is set to the newly constructed receiver's handle. :param bool persist: - If ``False``, the handler will be unregistered after a single + If :data:`False`, the handler will be unregistered after a single message has been received. :param mitogen.core.Message msg: The message. :returns: - :py:class:`mitogen.core.Receiver` configured to receive any replies + :class:`mitogen.core.Receiver` configured to receive any replies sent to the message's `reply_to` handle. .. method:: send_await (msg, deadline=None) - As with :py:meth:`send_async`, but expect a single reply - (`persist=False`) delivered within `deadline` seconds. + Like :meth:`send_async`, but expect a single reply (`persist=False`) + delivered within `deadline` seconds. :param mitogen.core.Message msg: The message. - :param float deadline: - If not ``None``, seconds before timing out waiting for a reply. - + If not :data:`None`, seconds before timing out waiting for a reply. + :returns: + The deserialized reply. :raises mitogen.core.TimeoutError: No message was received and `deadline` passed. .. currentmodule:: mitogen.parent +.. autoclass:: CallChain + :members: + .. class:: Context - Extend :py:class:`mitogen.core.Router` with functionality useful to - masters, and child contexts who later become parents. Currently when this - class is required, the target context's router is upgraded at runtime. + Extend :class:`mitogen.core.Context` with functionality useful to masters, + and child contexts who later become parents. Currently when this class is + required, the target context's router is upgraded at runtime. + + .. attribute:: default_call_chain + + A :class:`CallChain` instance constructed by default, with pipelining + disabled. :meth:`call`, :meth:`call_async` and :meth:`call_no_reply` + use this instance. .. method:: shutdown (wait=False) @@ -816,7 +874,7 @@ Context Class terminate a hung context using this method. This will be fixed shortly. :param bool wait: - If :py:data:`True`, block the calling thread until the context has + If :data:`True`, block the calling thread until the context has completely terminated. :returns: If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch` @@ -826,81 +884,15 @@ Context Class .. method:: call_async (fn, \*args, \*\*kwargs) - Arrange for the context's ``CALL_FUNCTION`` handle to receive a - message that causes `fn(\*args, \**kwargs)` to be invoked on the - context's main thread. - - :param fn: - A free function in module scope, or a classmethod or staticmethod - of a class directly reachable from module scope: - - .. code-block:: python - - # mymodule.py - - def my_func(): - """A free function reachable as mymodule.my_func""" - - class MyClass: - @staticmethod - def my_staticmethod(): - """Reachable as mymodule.MyClass.my_staticmethod""" - - @classmethod - def my_classmethod(cls): - """Reachable as mymodule.MyClass.my_classmethod""" - - def my_instancemethod(self): - """Unreachable: requires a class instance!""" - - class MyEmbeddedClass: - @classmethod - def my_classmethod(cls): - """Not directly reachable from module scope!""" - - :param tuple args: - Function arguments, if any. See :ref:`serialization-rules` for - permitted types. - :param dict kwargs: - Function keyword arguments, if any. See :ref:`serialization-rules` - for permitted types. - :returns: - :py:class:`mitogen.core.Receiver` configured to receive the result - of the invocation: - - .. code-block:: python - - recv = context.call_async(os.check_output, 'ls /tmp/') - try: - # Prints output once it is received. - msg = recv.get() - print(msg.unpickle()) - except mitogen.core.CallError, e: - print('Call failed:', str(e)) - - Asynchronous calls may be dispatched in parallel to multiple - contexts and consumed as they complete using - :py:class:`mitogen.select.Select`. + See :meth:`CallChain.call_async`. .. method:: call (fn, \*args, \*\*kwargs) - Equivalent to :py:meth:`call_async(fn, \*args, \**kwargs).get().unpickle() - `. - - :returns: - The function's return value. - - :raises mitogen.core.CallError: - An exception was raised in the remote context during execution. + See :meth:`CallChain.call`. .. method:: call_no_reply (fn, \*args, \*\*kwargs) - Send a function call, but expect no return value. If the call fails, - the full exception will be logged to the target context's logging framework. - - :raises mitogen.core.CallError: - An exception was raised in the remote context during execution. - + See :meth:`CallChain.call_no_reply`. Receiver Class @@ -912,15 +904,15 @@ Receiver Class Receivers are used to wait for pickled responses from another context to be sent to a handle registered in this context. A receiver may be single-use - (as in the case of :py:meth:`mitogen.parent.Context.call_async`) or + (as in the case of :meth:`mitogen.parent.Context.call_async`) or multiple use. :param mitogen.core.Router router: Router to register the handler on. :param int handle: - If not ``None``, an explicit handle to register, otherwise an unused - handle is chosen. + If not :data:`None`, an explicit handle to register, otherwise an + unused handle is chosen. :param bool persist: If :data:`True`, do not unregister the receiver's handler after the @@ -928,20 +920,20 @@ Receiver Class :param mitogen.core.Context respondent: Reference to the context this receiver is receiving from. If not - ``None``, arranges for the receiver to receive a dead message if + :data:`None`, arranges for the receiver to receive a dead message if messages can no longer be routed to the context, due to disconnection or exit. .. attribute:: notify = None - If not ``None``, a reference to a function invoked as + If not :data:`None`, a reference to a function invoked as `notify(receiver)` when a new message is delivered to this receiver. - Used by :py:class:`mitogen.select.Select` to implement waiting on + Used by :class:`mitogen.select.Select` to implement waiting on multiple receivers. .. py:method:: to_sender () - Return a :py:class:`mitogen.core.Sender` configured to deliver messages + Return a :class:`mitogen.core.Sender` configured to deliver messages to this receiver. Since a Sender can be serialized, this makes it convenient to pass `(context_id, handle)` pairs around:: @@ -958,15 +950,15 @@ Receiver Class .. py:method:: empty () - Return :data:`True` if calling :py:meth:`get` would block. + Return :data:`True` if calling :meth:`get` would block. - As with :py:class:`Queue.Queue`, :data:`True` may be returned even - though a subsequent call to :py:meth:`get` will succeed, since a - message may be posted at any moment between :py:meth:`empty` and - :py:meth:`get`. + As with :class:`Queue.Queue`, :data:`True` may be returned even + though a subsequent call to :meth:`get` will succeed, since a + message may be posted at any moment between :meth:`empty` and + :meth:`get`. - :py:meth:`empty` is only useful to avoid a race while installing - :py:attr:`notify`: + :meth:`empty` is only useful to avoid a race while installing + :attr:`notify`: .. code-block:: python @@ -980,15 +972,15 @@ Receiver Class .. py:method:: close () - Cause :py:class:`mitogen.core.ChannelError` to be raised in any thread - waiting in :py:meth:`get` on this receiver. + Cause :class:`mitogen.core.ChannelError` to be raised in any thread + waiting in :meth:`get` on this receiver. .. py:method:: get (timeout=None) Sleep waiting for a message to arrive on this receiver. :param float timeout: - If not ``None``, specifies a timeout in seconds. + If not :data:`None`, specifies a timeout in seconds. :raises mitogen.core.ChannelError: The remote end indicated the channel should be closed, or @@ -999,17 +991,17 @@ Receiver Class :returns: `(msg, data)` tuple, where `msg` is the - :py:class:`mitogen.core.Message` that was received, and `data` is + :class:`mitogen.core.Message` that was received, and `data` is its unpickled data part. .. py:method:: get_data (timeout=None) - Like :py:meth:`get`, except only return the data part. + Like :meth:`get`, except only return the data part. .. py:method:: __iter__ () Block and yield `(msg, data)` pairs delivered to this receiver until - :py:class:`mitogen.core.ChannelError` is raised. + :class:`mitogen.core.ChannelError` is raised. Sender Class @@ -1020,10 +1012,10 @@ Sender Class .. class:: Sender (context, dst_handle) Senders are used to send pickled messages to a handle in another context, - it is the inverse of :py:class:`mitogen.core.Sender`. + it is the inverse of :class:`mitogen.core.Sender`. Senders may be serialized, making them convenient to wire up data flows. - See :py:meth:`mitogen.core.Receiver.to_sender` for more information. + See :meth:`mitogen.core.Receiver.to_sender` for more information. :param mitogen.core.Context context: Context to send messages to. @@ -1032,7 +1024,7 @@ Sender Class .. py:method:: close () - Send a dead message to the remote end, causing :py:meth:`ChannelError` + Send a dead message to the remote end, causing :meth:`ChannelError` to be raised in any waiting thread. .. py:method:: send (data) @@ -1051,11 +1043,11 @@ Select Class Support scatter/gather asynchronous calls and waiting on multiple receivers, channels, and sub-Selects. Accepts a sequence of - :py:class:`mitogen.core.Receiver` or :py:class:`mitogen.select.Select` + :class:`mitogen.core.Receiver` or :class:`mitogen.select.Select` instances and returns the first value posted to any receiver or select. If `oneshot` is :data:`True`, then remove each receiver as it yields a - result; since :py:meth:`__iter__` terminates once the final receiver is + result; since :meth:`__iter__` terminates once the final receiver is removed, this makes it convenient to respond to calls made in parallel: .. code-block:: python @@ -1070,7 +1062,7 @@ Select Class # Iteration ends when last Receiver yields a result. print('Received total %s from %s receivers' % (total, len(recvs))) - :py:class:`Select` may drive a long-running scheduler: + :class:`Select` may drive a long-running scheduler: .. code-block:: python @@ -1081,7 +1073,7 @@ Select Class for context, workfunc in get_new_work(): select.add(context.call_async(workfunc)) - :py:class:`Select` may be nested: + :class:`Select` may be nested: .. code-block:: python @@ -1099,11 +1091,12 @@ Select Class .. py:classmethod:: all (it) - Take an iterable of receivers and retrieve a :py:class:`Message` from + Take an iterable of receivers and retrieve a :class:`Message` from each, returning the result of calling `msg.unpickle()` on each in turn. Results are returned in the order they arrived. - This is sugar for handling batch :py:class:`Context.call_async` + This is sugar for handling batch + :meth:`Context.call_async ` invocations: .. code-block:: python @@ -1119,32 +1112,32 @@ Select Class .. code-block:: python - sum(context.call_async(get_disk_usage).get().unpickle() - for context in contexts) + recvs = [c.call_async(get_disk_usage) for c in contexts] + sum(recv.get().unpickle() for recv in recvs) - Result processing happens concurrently to new results arriving, so - :py:meth:`all` should always be faster. + Result processing happens in the order results arrive, rather than the + order requests were issued, so :meth:`all` should always be faster. .. py:method:: get (timeout=None, block=True) Fetch the next available value from any receiver, or raise - :py:class:`mitogen.core.TimeoutError` if no value is available within + :class:`mitogen.core.TimeoutError` if no value is available within `timeout` seconds. - On success, the message's :py:attr:`receiver + On success, the message's :attr:`receiver ` attribute is set to the receiver. :param float timeout: Timeout in seconds. :param bool block: - If :py:data:`False`, immediately raise - :py:class:`mitogen.core.TimeoutError` if the select is empty. + If :data:`False`, immediately raise + :class:`mitogen.core.TimeoutError` if the select is empty. :return: - :py:class:`mitogen.core.Message` + :class:`mitogen.core.Message` :raises mitogen.core.TimeoutError: Timeout was reached. :raises mitogen.core.LatchError: - :py:meth:`close` has been called, and the underlying latch is no + :meth:`close` has been called, and the underlying latch is no longer valid. .. py:method:: __bool__ () @@ -1155,8 +1148,8 @@ Select Class Remove the select's notifier function from each registered receiver, mark the associated latch as closed, and cause any thread currently - sleeping in :py:meth:`get` to be woken with - :py:class:`mitogen.core.LatchError`. + sleeping in :meth:`get` to be woken with + :class:`mitogen.core.LatchError`. This is necessary to prevent memory leaks in long-running receivers. It is called automatically when the Python :keyword:`with` statement is @@ -1164,35 +1157,35 @@ Select Class .. py:method:: empty () - Return :data:`True` if calling :py:meth:`get` would block. + Return :data:`True` if calling :meth:`get` would block. - As with :py:class:`Queue.Queue`, :data:`True` may be returned even - though a subsequent call to :py:meth:`get` will succeed, since a - message may be posted at any moment between :py:meth:`empty` and - :py:meth:`get`. + As with :class:`Queue.Queue`, :data:`True` may be returned even + though a subsequent call to :meth:`get` will succeed, since a + message may be posted at any moment between :meth:`empty` and + :meth:`get`. - :py:meth:`empty` may return ``False`` even when :py:meth:`get` would - block if another thread has drained a receiver added to this select. - This can be avoided by only consuming each receiver from a single - thread. + :meth:`empty` may return :data:`False` even when :meth:`get` + would block if another thread has drained a receiver added to this + select. This can be avoided by only consuming each receiver from a + single thread. .. py:method:: __iter__ (self) - Yield the result of :py:meth:`get` until no receivers remain in the + Yield the result of :meth:`get` until no receivers remain in the select, either because `oneshot` is :data:`True`, or each receiver was - explicitly removed via :py:meth:`remove`. + explicitly removed via :meth:`remove`. .. py:method:: add (recv) - Add the :py:class:`mitogen.core.Receiver` or - :py:class:`mitogen.core.Channel` `recv` to the select. + Add the :class:`mitogen.core.Receiver` or + :class:`mitogen.core.Channel` `recv` to the select. .. py:method:: remove (recv) - Remove the :py:class:`mitogen.core.Receiver` or - :py:class:`mitogen.core.Channel` `recv` from the select. Note that if - the receiver has notified prior to :py:meth:`remove`, then it will - still be returned by a subsequent :py:meth:`get`. This may change in a + Remove the :class:`mitogen.core.Receiver` or + :class:`mitogen.core.Channel` `recv` from the select. Note that if + the receiver has notified prior to :meth:`remove`, then it will + still be returned by a subsequent :meth:`get`. This may change in a future version. @@ -1203,7 +1196,7 @@ Channel Class .. class:: Channel (router, context, dst_handle, handle=None) - A channel inherits from :py:class:`mitogen.core.Sender` and + A channel inherits from :class:`mitogen.core.Sender` and `mitogen.core.Receiver` to provide bidirectional functionality. Since all handles aren't known until after both ends are constructed, for @@ -1221,12 +1214,12 @@ Broker Class Responsible for handling I/O multiplexing in a private thread. **Note:** This is the somewhat limited core version of the Broker class - used by child contexts. The master subclass is documented below this one. + used by child contexts. The master subclass is documented below. .. attribute:: shutdown_timeout = 3.0 - Seconds grace to allow :py:class:`streams ` to shutdown - gracefully before force-disconnecting them during :py:meth:`shutdown`. + Seconds grace to allow :class:`streams ` to shutdown + gracefully before force-disconnecting them during :meth:`shutdown`. .. method:: defer (func, \*args, \*kwargs) @@ -1236,26 +1229,26 @@ Broker Class .. method:: start_receive (stream) - Mark the :py:attr:`receive_side ` on `stream` as + Mark the :attr:`receive_side ` on `stream` as ready for reading. Safe to call from any thread. When the associated file descriptor becomes ready for reading, - :py:meth:`BasicStream.on_receive` will be called. + :meth:`BasicStream.on_receive` will be called. .. method:: stop_receive (stream) - Mark the :py:attr:`receive_side ` on `stream` as + Mark the :attr:`receive_side ` on `stream` as not ready for reading. Safe to call from any thread. .. method:: _start_transmit (stream) - Mark the :py:attr:`transmit_side ` on `stream` as + Mark the :attr:`transmit_side ` on `stream` as ready for writing. Must only be called from the Broker thread. When the associated file descriptor becomes ready for writing, - :py:meth:`BasicStream.on_transmit` will be called. + :meth:`BasicStream.on_transmit` will be called. .. method:: stop_receive (stream) - Mark the :py:attr:`transmit_side ` on `stream` as + Mark the :attr:`transmit_side ` on `stream` as not ready for writing. Safe to call from any thread. .. method:: shutdown @@ -1265,12 +1258,13 @@ Broker Class .. method:: join Wait for the broker to stop, expected to be called after - :py:meth:`shutdown`. + :meth:`shutdown`. .. method:: keep_alive - Return :data:`True` if any reader's :py:attr:`Side.keep_alive` - attribute is :data:`True`, or any :py:class:`Context` is still + Return :data:`True` if any reader's :attr:`Side.keep_alive` + attribute is :data:`True`, or any + :class:`Context ` is still registered that is not the master. Used to delay shutdown while some important work is in progress (e.g. log draining). @@ -1278,11 +1272,11 @@ Broker Class .. method:: _broker_main - Handle events until :py:meth:`shutdown`. On shutdown, invoke - :py:meth:`Stream.on_shutdown` for every active stream, then allow up to - :py:attr:`shutdown_timeout` seconds for the streams to unregister + Handle events until :meth:`shutdown`. On shutdown, invoke + :meth:`Stream.on_shutdown` for every active stream, then allow up to + :attr:`shutdown_timeout` seconds for the streams to unregister themselves before forcefully calling - :py:meth:`Stream.on_disconnect`. + :meth:`Stream.on_disconnect`. .. currentmodule:: mitogen.master @@ -1298,7 +1292,7 @@ Broker Class :param bool install_watcher: If :data:`True`, an additional thread is started to monitor the - lifetime of the main thread, triggering :py:meth:`shutdown` + lifetime of the main thread, triggering :meth:`shutdown` automatically in case the user forgets to call it, or their code crashed. @@ -1309,8 +1303,8 @@ Broker Class .. attribute:: shutdown_timeout = 5.0 - Seconds grace to allow :py:class:`streams ` to shutdown - gracefully before force-disconnecting them during :py:meth:`shutdown`. + Seconds grace to allow :class:`streams ` to shutdown + gracefully before force-disconnecting them during :meth:`shutdown`. Utility Functions @@ -1326,7 +1320,7 @@ A random assortment of utility functions useful on masters and children. Many tools love to subclass built-in types in order to implement useful functionality, such as annotating the safety of a Unicode string, or adding additional methods to a dict. However, cPickle loves to preserve those - subtypes during serialization, resulting in CallError during :py:meth:`call + subtypes during serialization, resulting in CallError during :meth:`call ` in the target when it tries to deserialize the data. @@ -1346,27 +1340,27 @@ A random assortment of utility functions useful on masters and children. Remove all entries mentioning ``site-packages`` or ``Extras`` from the system path. Used primarily for testing on OS X within a virtualenv, where - OS X bundles some ancient version of the :py:mod:`six` module. + OS X bundles some ancient version of the :mod:`six` module. .. currentmodule:: mitogen.utils .. function:: log_to_file (path=None, io=False, level='INFO') - Install a new :py:class:`logging.Handler` writing applications logs to the + Install a new :class:`logging.Handler` writing applications logs to the filesystem. Useful when debugging slave IO problems. Parameters to this function may be overridden at runtime using environment variables. See :ref:`logging-env-vars`. :param str path: - If not ``None``, a filesystem path to write logs to. Otherwise, logs - are written to :py:data:`sys.stderr`. + If not :data:`None`, a filesystem path to write logs to. Otherwise, + logs are written to :data:`sys.stderr`. :param bool io: If :data:`True`, include extremely verbose IO logs in the output. Useful for debugging hangs, less useful for debugging application code. :param str level: - Name of the :py:mod:`logging` package constant that is the minimum + Name of the :mod:`logging` package constant that is the minimum level to log at. Useful levels are ``DEBUG``, ``INFO``, ``WARNING``, and ``ERROR``. @@ -1374,7 +1368,7 @@ A random assortment of utility functions useful on masters and children. .. function:: run_with_router(func, \*args, \**kwargs) Arrange for `func(router, \*args, \**kwargs)` to run with a temporary - :py:class:`mitogen.master.Router`, ensuring the Router and Broker are + :class:`mitogen.master.Router`, ensuring the Router and Broker are correctly shut down during normal or exceptional return. :returns: @@ -1383,7 +1377,7 @@ A random assortment of utility functions useful on masters and children. .. currentmodule:: mitogen.utils .. decorator:: with_router - Decorator version of :py:func:`run_with_router`. Example: + Decorator version of :func:`run_with_router`. Example: .. code-block:: python @@ -1399,29 +1393,9 @@ Exceptions .. currentmodule:: mitogen.core -.. class:: Error (fmt, \*args) - - Base for all exceptions raised by Mitogen. - -.. class:: CallError (e) - - Raised when :py:meth:`Context.call() ` fails. - A copy of the traceback from the external context is appended to the - exception message. - -.. class:: ChannelError (fmt, \*args) - - Raised when a channel dies or has been closed. - -.. class:: LatchError (fmt, \*args) - - Raised when an attempt is made to use a :py:class:`mitogen.core.Latch` that - has been marked closed. - -.. class:: StreamError (fmt, \*args) - - Raised when a stream cannot be established. - -.. class:: TimeoutError (fmt, \*args) - - Raised when a timeout occurs on a stream. +.. autoclass:: Error +.. autoclass:: CallError +.. autoclass:: ChannelError +.. autoclass:: LatchError +.. autoclass:: StreamError +.. autoclass:: TimeoutError diff --git a/docs/changelog.rst b/docs/changelog.rst index 14eeaae6..099b253b 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -15,14 +15,232 @@ Release Notes -.. comment +v0.2.3 (2018-10-23) +------------------- + +Mitogen for Ansible +~~~~~~~~~~~~~~~~~~~ + +Enhancements +^^^^^^^^^^^^ + +* `#315 `_, + `#392 `_: Ansible 2.6 and 2.7 are + supported. + +* `#321 `_, + `#336 `_: temporary file handling + was simplified, undoing earlier damage caused by compatibility fixes, + improving 2.6 compatibility, and avoiding two network roundtrips for every + related action + (`assemble `_, + `aws_s3 `_, + `copy `_, + `patch `_, + `script `_, + `template `_, + `unarchive `_, + `uri `_). See + :ref:`ansible_tempfiles` for a complete description. + +* `#376 `_, + `#377 `_: the ``kubectl`` connection + type is now supported. Contributed by Yannig Perré. + +* `084c0ac0 `_: avoid a + roundtrip in + `copy `_ and + `template `_ + due to an unfortunate default. + +* `7458dfae `_: avoid a + roundtrip when transferring files smaller than 124KiB. Copy and template + actions are now 2-RTT, reducing runtime for a 20-iteration template loop over + a 250 ms link from 30 seconds to 10 seconds compared to v0.2.2, down from 120 + seconds compared to vanilla. + +* `#337 `_: To avoid a scaling + limitation, a PTY is no longer allocated for an SSH connection unless the + configuration specifies a password. + +* `d62e6e2a `_: many-target + runs executed the dependency scanner redundantly due to missing + synchronization, wasting significant runtime in the connection multiplexer. + In one case work was reduced by 95%, which may manifest as faster runs. + +* `5189408e `_: threads are + cooperatively scheduled, minimizing `GIL + `_ contention, and + reducing context switching by around 90%. This manifests as an overall + improvement, but is easily noticeable on short many-target runs, where + startup overhead dominates runtime. + +* The `faulthandler `_ module is + automatically activated if it is installed, simplifying debugging of hangs. + See :ref:`diagnosing-hangs` for details. + +* The ``MITOGEN_DUMP_THREAD_STACKS`` environment variable's value now indicates + the number of seconds between stack dumps. See :ref:`diagnosing-hangs` for + details. + + +Fixes +^^^^^ + +* `#251 `_, + `#340 `_: Connection Delegation + could establish connections to the wrong target when ``delegate_to:`` is + present. + +* `#291 `_: when Mitogen had + previously been installed using ``pip`` or ``setuptools``, the globally + installed version could conflict with a newer version bundled with an + extension that had been installed using the documented steps. Now the bundled + library always overrides over any system-installed copy. + +* `#324 `_: plays with a + `custom module_utils `_ + would fail due to fallout from the Python 3 port and related tests being + disabled. + +* `#331 `_: the connection + multiplexer subprocess always exits before the main Ansible process, ensuring + logs generated by it do not overwrite the user's prompt when ``-vvv`` is + enabled. + +* `#332 `_: support a new + :func:`sys.excepthook`-based module exit mechanism added in Ansible 2.6. + +* `#338 `_: compatibility: changes to + ``/etc/environment`` and ``~/.pam_environment`` made by a task are reflected + in the runtime environment of subsequent tasks. See + :ref:`ansible_process_env` for a complete description. + +* `#343 `_: the sudo ``--login`` + option is supported. + +* `#344 `_: connections no longer + fail when the controller's login username contains slashes. + +* `#345 `_: the ``IdentitiesOnly + yes`` option is no longer supplied to OpenSSH by default, better matching + Ansible's behaviour. + +* `#355 `_: tasks configured to run + in an isolated forked subprocess were forked from the wrong parent context. + This meant built-in modules overridden via a custom ``module_utils`` search + path may not have had any effect. + +* `#362 `_: to work around a slow + algorithm in the :mod:`subprocess` module, the maximum number of open files + in processes running on the target is capped to 512, reducing the work + required to start a subprocess by >2000x in default CentOS configurations. + +* `#397 `_: recent Mitogen master + versions could fail to clean up temporary directories in a number of + circumstances, and newer Ansibles moved to using :mod:`atexit` to effect + temporary directory cleanup in some circumstances. + +* `b9112a9c `_, + `2c287801 `_: OpenSSH 7.5 + permission denied prompts are now recognized. Contributed by Alex Willmer. + +* A missing check caused an exception traceback to appear when using the + ``ansible`` command-line tool with a missing or misspelled module name. + +* Ansible since >=2.7 began importing :mod:`__main__` from + :mod:`ansible.module_utils.basic`, causing an error during execution, due to + the controller being configured to refuse network imports outside the + ``ansible.*`` namespace. Update the target implementation to construct a stub + :mod:`__main__` module to satisfy the otherwise seemingly vestigial import. + + +Core Library +~~~~~~~~~~~~ + +* A new :class:`mitogen.parent.CallChain` class abstracts safe pipelining of + related function calls to a target context, cancelling the chain if an + exception occurs. + +* `#305 `_: fix a long-standing minor + race relating to the logging framework, where *no route for Message..* + would frequently appear during startup. + +* `#313 `_: + :meth:`mitogen.parent.Context.call` was documented as capable of accepting + static methods. While possible on Python 2.x the result is ugly, and in every + case it should be trivial to replace with a classmethod. The documentation + was fixed. + +* `#337 `_: to avoid a scaling + limitation, a PTY is no longer allocated for each OpenSSH client if it can be + avoided. PTYs are only allocated if a password is supplied, or when + `host_key_checking=accept`. This is since Linux has a default of 4096 PTYs + (``kernel.pty.max``), while OS X has a default of 127 and an absolute maximum + of 999 (``kern.tty.ptmx_max``). + +* `#339 `_: the LXD connection method + was erroneously executing LXC Classic commands. + +* `#345 `_: the SSH connection method + allows optionally disabling ``IdentitiesOnly yes``. + +* `#356 `_: if the master Python + process does not have :data:`sys.executable` set, the default Python + interpreter used for new children on the local machine defaults to + ``"/usr/bin/python"``. + +* `#366 `_, + `#380 `_: attempts by children to + import :mod:`__main__` where the main program module lacks an execution guard + are refused, and an error is logged. This prevents a common and highly + confusing error when prototyping new scripts. - v0.2.3 (2018-07-??) - ------------------- +* `#371 `_: the LXC connection method + uses a more compatible method to establish an non-interactive session. + Contributed by Brian Candler. - * `#315 `_: Mitogen for Ansible is - supported under Ansible 2.6. Contributed by `Dan Quackenbush - `_. +* `af2ded66 `_: add + :func:`mitogen.fork.on_fork` to allow non-Mitogen managed process forks to + clean up Mitogen resources in the child. + +* `d6784242 `_: the setns method + always resets ``HOME``, ``SHELL``, ``LOGNAME`` and ``USER`` environment + variables to an account in the target container, defaulting to ``root``. + +* `830966bf `_: the UNIX + listener no longer crashes if the peer process disappears in the middle of + connection setup. + + +Thanks! +~~~~~~~ + +Mitogen would not be possible without the support of users. A huge thanks for +bug reports, features and fixes in this release contributed by +`Alex Russu `_, +`Alex Willmer `_, +`atoom `_, +`Berend De Schouwer `_, +`Brian Candler `_, +`Dan Quackenbush `_, +`dsgnr `_, +`Jesse London `_, +`John McGrath `_, +`Jonathan Rosser `_, +`Josh Smift `_, +`Luca Nunzi `_, +`Orion Poplawski `_, +`Peter V. Saveliev `_, +`Pierre-Henry Muller `_, +`Pierre-Louis Bonicoli `_, +`Prateek Jain `_, +`RedheatWei `_, +`Rick Box `_, +`nikitakazantsev12 `_, +`Tawana Musewe `_, +`Timo Beckers `_, and +`Yannig Perré `_. v0.2.2 (2018-07-26) @@ -90,6 +308,9 @@ Core Library could spuriously wake up due to ignoring an error bit set on events returned by the kernel, manifesting as a failure to read from an unrelated descriptor. +* `#342 `_: The ``network_cli`` + connection type would fail due to a missing internal SSH plugin method. + * Standard IO forwarding accidentally configured the replacement ``stdout`` and ``stderr`` write descriptors as non-blocking, causing subprocesses that generate more output than kernel buffer space existed to throw errors. The @@ -116,12 +337,13 @@ the bug reports and pull requests in this release contributed by `Colin McCarthy `_, `Dan Quackenbush `_, `Duane Zamrok `_, -`falbanese `_, `Gonzalo Servat `_, `Guy Knights `_, `Josh Smift `_, `Mark Janssen `_, `Mike Walker `_, +`Orion Poplawski `_, +`falbanese `_, `Tawana Musewe `_, and `Zach Swanson `_. @@ -166,7 +388,7 @@ within a stable series. Mitogen for Ansible ~~~~~~~~~~~~~~~~~~~ -* Support for Ansible 2.3 - 2.5.x and any mixture of Python 2.6, 2.7 or 3.6 on +* Support for Ansible 2.3 - 2.7.x and any mixture of Python 2.6, 2.7 or 3.6 on controller and target nodes. * Drop-in support for many Ansible connection types. @@ -198,15 +420,18 @@ Mitogen for Ansible - initech_app - y2k_fix -* When running with ``-vvv``, log messages such as *mitogen: Router(Broker(0x7f5a48921590)): no route - for Message(..., 102, ...), my ID is ...* may be visible. These are due to a - minor race while initializing logging and can be ignored. +.. * When running with ``-vvv``, log messages will be printed to the console + *after* the Ansible run completes, as connection multiplexer shutdown only + begins after Ansible exits. This is due to a lack of suitable shutdown hook + in Ansible, and is fairly harmless, albeit cosmetically annoying. A future + release may include a solution. -* When running with ``-vvv``, log messages will be printed to the console - *after* the Ansible run completes, as connection multiplexer shutdown only - begins after Ansible exits. This is due to a lack of suitable shutdown hook - in Ansible, and is fairly harmless, albeit cosmetically annoying. A future - release may include a solution. +.. * Configurations will break that rely on the `hashbang argument splitting + behaviour `_ of the + ``ansible_python_interpreter`` setting, contrary to the Ansible + documentation. This will be addressed in a future 0.2 release. + +* The Ansible 2.7 ``reboot`` module is not yet supported. * Performance does not scale linearly with target count. This requires significant additional work, as major bottlenecks exist in the surrounding @@ -235,11 +460,6 @@ Mitogen for Ansible actions, such as the ``synchronize`` module. This will be addressed in the 0.3 series. -* Configurations will break that rely on the `hashbang argument splitting - behaviour `_ of the - ``ansible_python_interpreter`` setting, contrary to the Ansible - documentation. This will be addressed in a future 0.2 release. - Core Library ~~~~~~~~~~~~ diff --git a/docs/compared.rst b/docs/compared.rst deleted file mode 100644 index b75ae3f2..00000000 --- a/docs/compared.rst +++ /dev/null @@ -1,226 +0,0 @@ - -Mitogen Compared To -------------------- - -This provides a little free-text summary of conceptual differences between -Mitogen and other tools, along with some basic perceptual metrics (project -maturity/age, quality of tests, function matrix) - - -Ansible -####### - -Ansible_ is a complete provisioning system, Mitogen is a small component of such a system. - -You should use Ansible if ... - -You should not use Ansible if ... - - -.. _Ansible: https://docs.ansible.com/ansible/latest/index.html -.. _ansible.src: https://github.com/ansible/ansible/ - -Baker -##### - - Baker_ lets you easily add a command line interface to your Python - functions using a simple decorator, to create scripts with "sub-commands", - similar to Django's ``manage.py``, ``svn``, ``hg``, etc. - -- Unmaintained since 2015 -- No obvious remote execution functionality - -.. _Baker: https://bitbucket.org/mchaput/baker - -Chopsticks -########## - -Chopsticks_ also supports recursion! but the recursively executed instance has no special knowledge of its identity in a tree structure, and little support for functions running in the master to directly invoke functions in a recursive context.. effectively each recursion produces a new master, from which function calls must be made. - -executing functions from __main__ entails picking just that function and deps -out of the main module, not transferring the module intact. that approach works -but it's much messier than just arranging for __main__ to be imported and -executed through the import mechanism. - -supports sudo but no support for require_tty or typing a sudo password. also supports SSH and Docker. - -good set of tests - -real PEP-302 module loader, but doesn't try to cope with master also relying on -a PEP-302 module loader (e.g. py2exe). - -Based on the tox configuration Python 2.7, and 3.3 to 3.6 are supported. - -I/O multiplexer in the master, but not in children. - -As with Execnet it includes its own serialization - pencode_ supports - -- most Python primitive types (``bytes``/``str``/``unicode``, ``list``, ``tuple`` ...) -- identity references -- self referencing (recursive) data srtuctures - -pencode lacks support for arbitrary classes. Byte strings require special -treatment if they contain non-ascii characters. Some primitive types -(e.g. ``complex``) are not handled. This would be straightforwar to address. -Values are length-prefixed with a 32 bit unsigned integer, meaning values -are limited to 4 billion bytes or items in length. - -design is reminiscent of Mitogen in places (Tunnel is practically identical to -Mitogen's Stream), and closer to Execnet elsewhere (lack of uniformity, -tendency to prefer logic expressed in if/else special case soup rather than the -type system, though some of that is due to supporting Python 3, so not judging -too harshly!) - -Chopsticks has its own `Chopsticks vs`_ comparisons. - -You should use Chopsticks if you need Python 3 support. - -.. _Chopsticks: https://chopsticks.readthedocs.io/en/stable/ -.. _Chopsticks.src: https://github.com/lordmauve/chopsticks/ -.. _Chopsticks vs: https://chopsticks.readthedocs.io/en/stable/intro.html#chopsticks-vs -.. _pencode: https://github.com/lordmauve/chopsticks/blob/master/doc/pencode.rst -.. _pencode.src: https://github.com/lordmauve/chopsticks/blob/master/chopsticks/pencode.py - -Disco -##### - - Disco_ is a lightweight, open-source framework for distributed computing - based on the MapReduce paradigm. - -- An Erlang core, with Python bindings -- Wire format is pickle, according to `Execnet vs NLTK for distributed NLTK`_ - -.. _Disco: http://discoproject.org/ -.. _Execnet vs NLTK for distributed NLTK: https://streamhacker.com/2009/12/14/execnet-disco-distributed-nltk/ - -Execnet -####### - -Execnet_ - -- Parent and children may use threads, gevent, or eventlet, Mitogen only supports threads. -- No recursion -- Similar Channel abstraction but better developed.. includes waiting for remote to close its end -- Heavier emphasis on passing chunks of Python source code around, modules are loaded one-at-a-time with no dependency resolution mechanism -- Built-in unidirectional rsync-alike, compared to Mitogen's SSH emulation which allows use of real rsync in any supported mode -- no support for sudo, but supports connecting to vagrant -- works with read-only filesystem -- includes its own serialization_ independent of the standard library - - The obj and all contained objects must be of a builtin python type - (so nested dicts, sets, etc. are all ok but not user-level instances). - -- Known uses include `pytest-xdist`_, and `Distributed NLTK`_ - -You should use Execnet if you value code maturity more than featureset. - -.. _Execnet: https://codespeak.net/execnet/ -.. _serialization: https://codespeak.net/execnet/basics.html#dumps-loads -.. _pytest-xdist: https://pypi.python.org/pypi/pytest-xdist -.. _Distributed NLTK: https://streamhacker.com/2009/12/14/execnet-disco-distributed-nltk/ - -Fabric -###### - -Fabric_ allows execution of shell snippets on remote machines, Python functions run -locally, any remote interaction is fundamentally done via shell, with all the -limitations that entails. prefers to depend on SSH features (e.g. tunnelling) -than reinvent them - -You should use Fabric if you enjoy being woken at 4am to pages about broken -shell snippets. - -.. _fabric: http://www.fabfile.org/ - -Invoke -###### - -Invoke_ - -Python 2.6+, 3.3+ - -Basically a Fabric-alike - -.. _invoke: http://www.pyinvoke.org/ - -Multiprocessing -############### - -multiprocessing_ was added to the stdlib in Python 2.6. - - multiprocessing is a package that supports spawning processes using an - API similar to the threading module. The multiprocessing package offers - both local and remote concurrency - -There is a backport_ for Python 2.4 & 2.5, but it is not pure Python. -pymultiprocessing_ appears to be a pure Python implementation. -An ecosystem_ of packages has built up around multiprocessing. - -The `programming guidelines`_ section notes - -- Arguments to proxies must be picklable. On Windows this also applies to - ``multiprocessing.Process.__init__()`` arguments. -- Callers should beware replacing ``sys.stdin``, because - ``multiprocessing.Process._bootstrap()`` - will close it and open /dev/null instead - -.. _programming guidelines: https://docs.python.org/2/library/multiprocessing.html#programming-guidelines -.. _backport: https://pypi.python.org/pypi/multiprocessing -.. _pymultiprocessing: https://pypi.python.org/pypi/pymultiprocessing -.. _ecosystem: https://pypi.python.org/pypi?%3Aaction=search&term=multiprocessing&submit=search - -Paver -##### - -Paver_ - -More or less another task execution framework / make-alike, doesn't really deal -with remote execution at all. - -.. _Paver: https://github.com/paver/paver/ - -Plumbum -####### - -Plumbum_ - -Shell-only - -Basically syntax sugar for running shell commands. Nicer than raw shell -(depending on your opinions of operating overloading), but it's still shell. - -.. _Plumbum: https://pypi.python.org/pypi/plumbum - -Pyro4 -##### - -Pyro4_ -... - -.. _Pyro4: https://pythonhosted.org/Pyro4/ - -RPyC -#### - -RPyC_ - -- supports transparent object proxies similar to Pyro (with all the pain and suffering hidden network IO entails) -- significantly more 'frameworkey' feel -- runs multiplexer in a thread too? -- bootstrap over SSH only, no recursion and no sudo -- requires a writable filesystem - -.. _RPyC: https://rpyc.readthedocs.io/en/latest/ - -Salt -#### - -Salt_ - -- no crappy deps - -You should use Salt if you enjoy firefighting endless implementation bugs, -otherwise you should prefer Ansible. - -.. _Salt: https://docs.saltstack.com/en/latest/topics/ -.. _Salt.src: https://github.com/saltstack/salt diff --git a/docs/conf.py b/docs/conf.py index 57adf597..abb6e97e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,7 +19,7 @@ html_theme_options = { 'head_font_family': "Georgia, serif", } htmlhelp_basename = 'mitogendoc' -intersphinx_mapping = {'python': ('https://docs.python.org/2', None)} +intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} language = None master_doc = 'toc' project = u'Mitogen' diff --git a/docs/contributors.rst b/docs/contributors.rst index ee5c3132..dcfb50fa 100644 --- a/docs/contributors.rst +++ b/docs/contributors.rst @@ -10,6 +10,7 @@ sponsorship and outstanding future-thinking of its early adopters. + + + + + + + + + + + + + diff --git a/docs/internals.rst b/docs/internals.rst index 3d4d4130..03f12e1e 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -8,17 +8,27 @@ Internal API Reference signals -mitogen.core -============ +Constants +========= + +.. currentmodule:: mitogen.core +.. autodata:: CHUNK_SIZE Latch Class ------------ +=========== .. currentmodule:: mitogen.core +.. autoclass:: Latch + :members: -.. autoclass:: Latch () +PidfulStreamHandler Class +========================= + +.. currentmodule:: mitogen.core +.. autoclass:: PidfulStreamHandler + :members: Side Class @@ -50,24 +60,24 @@ Side Class .. attribute:: fd - Integer file descriptor to perform IO on, or ``None`` if + Integer file descriptor to perform IO on, or :data:`None` if :py:meth:`close` has been called. .. attribute:: keep_alive - If ``True``, causes presence of this side in :py:class:`Broker`'s + If :data:`True`, causes presence of this side in :py:class:`Broker`'s active reader set to defer shutdown until the side is disconnected. .. method:: fileno - Return :py:attr:`fd` if it is not ``None``, otherwise raise + Return :py:attr:`fd` if it is not :data:`None`, otherwise raise :py:class:`StreamError`. This method is implemented so that :py:class:`Side` can be used directly by :py:func:`select.select`. .. method:: close - Call :py:func:`os.close` on :py:attr:`fd` if it is not ``None``, then - set it to ``None``. + Call :py:func:`os.close` on :py:attr:`fd` if it is not :data:`None`, + then set it to :data:`None`. .. method:: read (n=CHUNK_SIZE) @@ -89,12 +99,9 @@ Side Class wrapping the underlying :py:func:`os.write` call with :py:func:`io_op` to trap common disconnection connditions. - :py:meth:`read` always behaves as if it is writing to a regular UNIX - file; socket, pipe, and TTY disconnection errors are masked and result - in a 0-sized write. - :returns: - Number of bytes written, or ``None`` if disconnection was detected. + Number of bytes written, or :data:`None` if disconnection was + detected. Stream Classes @@ -302,123 +309,47 @@ mitogen.master Blocking I/O Functions ----------------------- +====================== These functions exist to support the blocking phase of setting up a new context. They will eventually be replaced with asynchronous equivalents. -.. currentmodule:: mitogen.master - -.. function:: iter_read(fd, deadline=None) - - Return a generator that arranges for up to 4096-byte chunks to be read at a - time from the file descriptor `fd` until the generator is destroyed. - - :param fd: - File descriptor to read from. - - :param deadline: - If not ``None``, an absolute UNIX timestamp after which timeout should - occur. - - :raises mitogen.core.TimeoutError: - Attempt to read beyond deadline. - - :raises mitogen.core.StreamError: - Attempt to read past end of file. - - -.. currentmodule:: mitogen.master - -.. function:: write_all (fd, s, deadline=None) - - Arrange for all of bytestring `s` to be written to the file descriptor - `fd`. - - :param int fd: - File descriptor to write to. - - :param bytes s: - Bytestring to write to file descriptor. - - :param float deadline: - If not ``None``, an absolute UNIX timestamp after which timeout should - occur. - - :raises mitogen.core.TimeoutError: - Bytestring could not be written entirely before deadline was exceeded. - - :raises mitogen.core.StreamError: - File descriptor was disconnected before write could complete. - - -Helper Functions ----------------- - -.. currentmodule:: mitogen.core - -.. function:: io_op (func, \*args) - - Wrap a function that may raise :py:class:`OSError`, trapping common error - codes relating to disconnection events in various subsystems: - - * When performing IO against a TTY, disconnection of the remote end is - signalled by :py:data:`errno.EIO`. - * When performing IO against a socket, disconnection of the remote end is - signalled by :py:data:`errno.ECONNRESET`. - * When performing IO against a pipe, disconnection of the remote end is - signalled by :py:data:`errno.EPIPE`. - - :returns: - Tuple of `(return_value, disconnected)`, where `return_value` is the - return value of `func(\*args)`, and `disconnected` is ``True`` if - disconnection was detected, otherwise ``False``. - - .. currentmodule:: mitogen.parent +.. autofunction:: discard_until +.. autofunction:: iter_read +.. autofunction:: write_all -.. autofunction:: create_child +Subprocess Creation Functions +============================= .. currentmodule:: mitogen.parent - +.. autofunction:: create_child +.. autofunction:: hybrid_tty_create_child .. autofunction:: tty_create_child -.. currentmodule:: mitogen.parent +Helper Functions +================ -.. autofunction:: hybrid_tty_create_child +.. currentmodule:: mitogen.core +.. autofunction:: to_text +.. autofunction:: has_parent_authority +.. autofunction:: set_cloexec +.. autofunction:: set_nonblock +.. autofunction:: set_block +.. autofunction:: io_op +.. currentmodule:: mitogen.parent +.. autofunction:: close_nonstandard_fds +.. autofunction:: create_socketpair .. currentmodule:: mitogen.master - -.. function:: get_child_modules (path) - - Return the suffixes of submodules directly neated beneath of the package - directory at `path`. - - :param str path: - Path to the module's source code on disk, or some PEP-302-recognized - equivalent. Usually this is the module's ``__file__`` attribute, but - is specified explicitly to avoid loading the module. - - :return: - List of submodule name suffixes. - +.. autofunction:: get_child_modules .. currentmodule:: mitogen.minify - -.. autofunction:: minimize_source (source) - - Remove comments and docstrings from Python `source`, preserving line - numbers and syntax of empty blocks. - - :param str source: - The source to minimize. - - :returns str: - The minimized source. +.. autofunction:: minimize_source Signals diff --git a/docs/signals.rst b/docs/signals.rst index 1c41353a..19533bb1 100644 --- a/docs/signals.rst +++ b/docs/signals.rst @@ -19,16 +19,10 @@ functions registered to receive it will be called back. Functions --------- -.. function:: mitogen.core.listen (obj, name, func) - - Arrange for `func(\*args, \*\*kwargs)` to be invoked when the named signal - is fired by `obj`. - -.. function:: mitogen.core.fire (obj, name, \*args, \*\*kwargs) - - Arrange for `func(\*args, \*\*kwargs)` to be invoked for every function - registered for the named signal on `obj`. +.. currentmodule:: mitogen.core +.. autofunction:: listen +.. autofunction:: fire List diff --git a/docs/toc.rst b/docs/toc.rst index 357fea3f..7b3274a9 100644 --- a/docs/toc.rst +++ b/docs/toc.rst @@ -15,3 +15,8 @@ Table Of Contents examples internals shame + +.. toctree:: + :hidden: + + services diff --git a/mitogen/__init__.py b/mitogen/__init__.py index 3fc02433..58ef2030 100644 --- a/mitogen/__init__.py +++ b/mitogen/__init__.py @@ -33,7 +33,7 @@ be expected. On the slave, it is built dynamically during startup. #: Library version as a tuple. -__version__ = (0, 2, 2) +__version__ = (0, 2, 3) #: This is :data:`False` in slave contexts. Previously it was used to prevent diff --git a/mitogen/core.py b/mitogen/core.py index dd706311..dadf0924 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -89,6 +89,18 @@ LOAD_MODULE = 107 FORWARD_MODULE = 108 DETACHING = 109 CALL_SERVICE = 110 + +#: Special value used to signal disconnection or the inability to route a +#: message, when it appears in the `reply_to` field. Usually causes +#: :class:`mitogen.core.ChannelError` to be raised when it is received. +#: +#: It indicates the sender did not know how to process the message, or wishes +#: no further messages to be delivered to it. It is used when: +#: +#: * a remote receiver is disconnected or explicitly closed. +#: * a related message could not be delivered due to no route existing for it. +#: * a router is being torn down, as a sentinel value to notify +#: :py:meth:`mitogen.core.Router.add_handler` callbacks to clean up. IS_DEAD = 999 try: @@ -116,7 +128,34 @@ AnyTextType = (BytesType, UnicodeType) if sys.version_info < (2, 5): next = lambda it: it.next() +#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the +#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This +#: value has many performance implications, 128KiB seems to be a sweet spot. +#: +#: * When set low, large messages cause many :class:`Broker` IO loop +#: iterations, burning CPU and reducing throughput. +#: * When set high, excessive RAM is reserved by the OS for socket buffers (2x +#: per child), and an identically sized temporary userspace buffer is +#: allocated on each read that requires zeroing, and over a particular size +#: may require two system calls to allocate/deallocate. +#: +#: Care must be taken to ensure the underlying kernel object and receiving +#: program support the desired size. For example, +#: +#: * Most UNIXes have TTYs with fixed 2KiB-4KiB buffers, making them unsuitable +#: for efficient IO. +#: * Different UNIXes have varying presets for pipes, which may not be +#: configurable. On recent Linux the default pipe buffer size is 64KiB, but +#: under memory pressure may be as low as 4KiB for unprivileged processes. +#: * When communication is via an intermediary process, its internal buffers +#: effect the speed OS buffers will drain. For example OpenSSH uses 64KiB +#: reads. +#: +#: An ideal :class:`Message` has a size that is a multiple of +#: :data:`CHUNK_SIZE` inclusive of headers, to avoid wasting IO loop iterations +#: writing small trailer chunks. CHUNK_SIZE = 131072 + _tls = threading.local() @@ -131,6 +170,13 @@ else: class Error(Exception): + """Base for all exceptions raised by Mitogen. + + :param str fmt: + Exception text, or format string if `args` is non-empty. + :param tuple args: + Format string arguments. + """ def __init__(self, fmt=None, *args): if args: fmt %= args @@ -140,10 +186,14 @@ class Error(Exception): class LatchError(Error): + """Raised when an attempt is made to use a :py:class:`mitogen.core.Latch` + that has been marked closed.""" pass class Blob(BytesType): + """A serializable bytes subclass whose content is summarized in repr() + output, making it suitable for logging binary data.""" def __repr__(self): return '[blob: %d bytes]' % len(self) @@ -152,6 +202,8 @@ class Blob(BytesType): class Secret(UnicodeType): + """A serializable unicode subclass whose content is masked in repr() + output, making it suitable for logging passwords.""" def __repr__(self): return '[secret]' @@ -165,6 +217,10 @@ class Secret(UnicodeType): class Kwargs(dict): + """A serializable dict subclass that indicates the contained keys should be + be coerced to Unicode on Python 3 as required. Python 2 produces keyword + argument dicts whose keys are bytestrings, requiring a helper to ensure + compatibility with Python 3.""" if PY3: def __init__(self, dct): for k, v in dct.items(): @@ -181,6 +237,10 @@ class Kwargs(dict): class CallError(Error): + """Serializable :class:`Error` subclass raised when + :py:meth:`Context.call() ` fails. A copy of + the traceback from the external context is appended to the exception + message.""" def __init__(self, fmt=None, *args): if not isinstance(fmt, BaseException): Error.__init__(self, fmt, *args) @@ -207,37 +267,54 @@ def _unpickle_call_error(s): class ChannelError(Error): + """Raised when a channel dies or has been closed.""" remote_msg = 'Channel closed by remote end.' local_msg = 'Channel closed by local end.' class StreamError(Error): + """Raised when a stream cannot be established.""" pass class TimeoutError(Error): + """Raised when a timeout occurs on a stream.""" pass def to_text(o): - if isinstance(o, UnicodeType): - return UnicodeType(o) + """Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of + :class:`bytes`, otherwise pass it to the :class:`str` constructor. The + returned object is always a plain :class:`str`, any subclass is removed.""" if isinstance(o, BytesType): return o.decode('utf-8') return UnicodeType(o) def has_parent_authority(msg, _stream=None): + """Policy function for use with :class:`Receiver` and + :meth:`Router.add_handler` that requires incoming messages to originate + from a parent context, or on a :class:`Stream` whose :attr:`auth_id + ` has been set to that of a parent context or the current + context.""" return (msg.auth_id == mitogen.context_id or msg.auth_id in mitogen.parent_ids) def listen(obj, name, func): + """ + Arrange for `func(*args, **kwargs)` to be invoked when the named signal is + fired by `obj`. + """ signals = vars(obj).setdefault('_signals', {}) signals.setdefault(name, []).append(func) def fire(obj, name, *args, **kwargs): + """ + Arrange for `func(*args, **kwargs)` to be invoked for every function + registered for the named signal on `obj`. + """ signals = vars(obj).get('_signals', {}) return [func(*args, **kwargs) for func in signals.get(name, ())] @@ -253,7 +330,8 @@ def takes_router(func): def is_blacklisted_import(importer, fullname): - """Return ``True`` if `fullname` is part of a blacklisted package, or if + """ + Return :data:`True` if `fullname` is part of a blacklisted package, or if any packages have been whitelisted and `fullname` is not part of one. NB: @@ -266,22 +344,51 @@ def is_blacklisted_import(importer, fullname): def set_cloexec(fd): + """Set the file descriptor `fd` to automatically close on + :func:`os.execve`. This has no effect on file descriptors inherited across + :func:`os.fork`, they must be explicitly closed through some other means, + such as :func:`mitogen.fork.on_fork`.""" flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert fd > 2 fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def set_nonblock(fd): + """Set the file descriptor `fd` to non-blocking mode. For most underlying + file types, this causes :func:`os.read` or :func:`os.write` to raise + :class:`OSError` with :data:`errno.EAGAIN` rather than block the thread + when the underlying kernel buffer is exhausted.""" flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) def set_block(fd): + """Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread + when the underlying kernel buffer is exhausted.""" flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) def io_op(func, *args): + """Wrap `func(*args)` that may raise :class:`select.error`, + :class:`IOError`, or :class:`OSError`, trapping UNIX error codes relating + to disconnection and retry events in various subsystems: + + * When a signal is delivered to the process on Python 2, system call retry + is signalled through :data:`errno.EINTR`. The invocation is automatically + restarted. + * When performing IO against a TTY, disconnection of the remote end is + signalled by :data:`errno.EIO`. + * When performing IO against a socket, disconnection of the remote end is + signalled by :data:`errno.ECONNRESET`. + * When performing IO against a pipe, disconnection of the remote end is + signalled by :data:`errno.EPIPE`. + + :returns: + Tuple of `(return_value, disconnected)`, where `return_value` is the + return value of `func(*args)`, and `disconnected` is :data:`True` if + disconnection was detected, otherwise :data:`False`. + """ while True: try: return func(*args), False @@ -296,7 +403,19 @@ def io_op(func, *args): class PidfulStreamHandler(logging.StreamHandler): + """A :class:`logging.StreamHandler` subclass used when + :meth:`Router.enable_debug() ` has been + called, or the `debug` parameter was specified during context construction. + Verifies the process ID has not changed on each call to :meth:`emit`, + reopening the associated log file when a change is detected. + + This ensures logging to the per-process output files happens correctly even + when uncooperative third party components call :func:`os.fork`. + """ + #: PID that last opened the log file. open_pid = None + + #: Output path template. template = '/tmp/mitogen.%s.%s.log' def _reopen(self): @@ -350,6 +469,7 @@ def enable_profiling(): try: return func(*args) finally: + profiler.dump_stats('/tmp/mitogen.%d.%s.pstat' % (os.getpid(), name)) profiler.create_stats() fp = open('/tmp/mitogen.stats.%d.%s.log' % (os.getpid(), name), 'w') try: @@ -609,10 +729,12 @@ class Importer(object): 'debug', 'doas', 'docker', + 'kubectl', 'fakessh', 'fork', 'jail', 'lxc', + 'lxd', 'master', 'minify', 'parent', @@ -841,6 +963,16 @@ class LogHandler(logging.Handler): logging.Handler.__init__(self) self.context = context self.local = threading.local() + self._buffer = [] + + def uncork(self): + self._send = self.context.send + for msg in self._buffer: + self._send(msg) + self._buffer = None + + def _send(self, msg): + self._buffer.append(msg) def emit(self, rec): if rec.name == 'mitogen.io' or \ @@ -854,7 +986,7 @@ class LogHandler(logging.Handler): if isinstance(encoded, UnicodeType): # Logging package emits both :( encoded = encoded.encode('utf-8') - self.context.send(Message(data=encoded, handle=FORWARD_LOG)) + self._send(Message(data=encoded, handle=FORWARD_LOG)) finally: self.local.in_emit = False @@ -934,7 +1066,7 @@ class Stream(BasicStream): :py:class:`BasicStream` subclass implementing mitogen's :ref:`stream protocol `. """ - #: If not ``None``, :py:class:`Router` stamps this into + #: If not :data:`None`, :py:class:`Router` stamps this into #: :py:attr:`Message.auth_id` of every message received on this stream. auth_id = None @@ -957,6 +1089,16 @@ class Stream(BasicStream): def construct(self): pass + def _internal_receive(self, broker, buf): + if self._input_buf and self._input_buf_len < 128: + self._input_buf[0] += buf + else: + self._input_buf.append(buf) + + self._input_buf_len += len(buf) + while self._receive_one(broker): + pass + def on_receive(self, broker): """Handle the next complete message on the stream. Raise :py:class:`StreamError` on failure.""" @@ -966,14 +1108,7 @@ class Stream(BasicStream): if not buf: return self.on_disconnect(broker) - if self._input_buf and self._input_buf_len < 128: - self._input_buf[0] += buf - else: - self._input_buf.append(buf) - - self._input_buf_len += len(buf) - while self._receive_one(broker): - pass + self._internal_receive(broker, buf) HEADER_FMT = '>LLLLLL' HEADER_LEN = struct.calcsize(HEADER_FMT) @@ -1827,15 +1962,76 @@ class Broker(object): return 'Broker(%#x)' % (id(self),) +class Dispatcher(object): + def __init__(self, econtext): + self.econtext = econtext + #: Chain ID -> CallError if prior call failed. + self._error_by_chain_id = {} + self.recv = Receiver(router=econtext.router, + handle=CALL_FUNCTION, + policy=has_parent_authority) + listen(econtext.broker, 'shutdown', self.recv.close) + + @classmethod + @takes_econtext + def forget_chain(cls, chain_id, econtext): + econtext.dispatcher._error_by_chain_id.pop(chain_id, None) + + def _parse_request(self, msg): + data = msg.unpickle(throw=False) + _v and LOG.debug('_dispatch_one(%r)', data) + + chain_id, modname, klass, func, args, kwargs = data + obj = import_module(modname) + if klass: + obj = getattr(obj, klass) + fn = getattr(obj, func) + if getattr(fn, 'mitogen_takes_econtext', None): + kwargs.setdefault('econtext', self.econtext) + if getattr(fn, 'mitogen_takes_router', None): + kwargs.setdefault('router', self.econtext.router) + + return chain_id, fn, args, kwargs + + def _dispatch_one(self, msg): + try: + chain_id, fn, args, kwargs = self._parse_request(msg) + except Exception: + return None, CallError(sys.exc_info()[1]) + + if chain_id in self._error_by_chain_id: + return chain_id, self._error_by_chain_id[chain_id] + + try: + return chain_id, fn(*args, **kwargs) + except Exception: + e = CallError(sys.exc_info()[1]) + if chain_id is not None: + self._error_by_chain_id[chain_id] = e + return chain_id, e + + def _dispatch_calls(self): + for msg in self.recv: + chain_id, ret = self._dispatch_one(msg) + _v and LOG.debug('_dispatch_calls: %r -> %r', msg, ret) + if msg.reply_to: + msg.reply(ret) + elif isinstance(ret, CallError) and chain_id is None: + LOG.error('No-reply function call failed: %s', ret) + + def run(self): + if self.econtext.config.get('on_start'): + self.econtext.config['on_start'](self.econtext) + + _profile_hook('main', self._dispatch_calls) + + class ExternalContext(object): detached = False def __init__(self, config): self.config = config - def _on_broker_shutdown(self): - self.recv.close() - def _on_broker_exit(self): if not self.config['profiling']: os.kill(os.getpid(), signal.SIGTERM) @@ -1919,16 +2115,12 @@ class ExternalContext(object): in_fd = self.config.get('in_fd', 100) out_fd = self.config.get('out_fd', 1) - self.recv = Receiver(router=self.router, - handle=CALL_FUNCTION, - policy=has_parent_authority) self.stream = Stream(self.router, parent_id) self.stream.name = 'parent' self.stream.accept(in_fd, out_fd) self.stream.receive_side.keep_alive = False listen(self.stream, 'disconnect', self._on_parent_disconnect) - listen(self.broker, 'shutdown', self._on_broker_shutdown) listen(self.broker, 'exit', self._on_broker_exit) os.close(in_fd) @@ -1940,9 +2132,10 @@ class ExternalContext(object): pass # No first stage exists (e.g. fakessh) def _setup_logging(self): + self.log_handler = LogHandler(self.master) root = logging.getLogger() root.setLevel(self.config['log_level']) - root.handlers = [LogHandler(self.master)] + root.handlers = [self.log_handler] if self.config['debug']: enable_debug_logging() @@ -2025,40 +2218,6 @@ class ExternalContext(object): # Reopen with line buffering. sys.stdout = os.fdopen(1, 'w', 1) - def _dispatch_one(self, msg): - data = msg.unpickle(throw=False) - _v and LOG.debug('_dispatch_calls(%r)', data) - - modname, klass, func, args, kwargs = data - obj = import_module(modname) - if klass: - obj = getattr(obj, klass) - fn = getattr(obj, func) - if getattr(fn, 'mitogen_takes_econtext', None): - kwargs.setdefault('econtext', self) - if getattr(fn, 'mitogen_takes_router', None): - kwargs.setdefault('router', self.router) - return fn(*args, **kwargs) - - def _dispatch_calls(self): - if self.config.get('on_start'): - self.config['on_start'](self) - - for msg in self.recv: - try: - ret = self._dispatch_one(msg) - _v and LOG.debug('_dispatch_calls: %r -> %r', msg, ret) - if msg.reply_to: - msg.reply(ret) - except Exception: - e = sys.exc_info()[1] - if msg.reply_to: - _v and LOG.debug('_dispatch_calls: %s', e) - msg.reply(CallError(e)) - else: - LOG.exception('_dispatch_calls: %r', msg) - self.dispatch_stopped = True - def main(self): self._setup_master() try: @@ -2072,14 +2231,16 @@ class ExternalContext(object): if self.config.get('setup_stdio', True): self._setup_stdio() + self.dispatcher = Dispatcher(self) self.router.register(self.parent, self.stream) + self.log_handler.uncork() sys.executable = os.environ.pop('ARGV0', sys.executable) _v and LOG.debug('Connected to %s; my ID is %r, PID is %r', self.parent, mitogen.context_id, os.getpid()) _v and LOG.debug('Recovered sys.executable: %r', sys.executable) - _profile_hook('main', self._dispatch_calls) + self.dispatcher.run() _v and LOG.debug('ExternalContext.main() normal exit') except KeyboardInterrupt: LOG.debug('KeyboardInterrupt received, exiting gracefully.') diff --git a/mitogen/debug.py b/mitogen/debug.py index 64d2292d..19cf1a89 100644 --- a/mitogen/debug.py +++ b/mitogen/debug.py @@ -183,15 +183,16 @@ def install_handler(): signal.signal(signal.SIGUSR2, _handler) -def _logging_main(): +def _logging_main(secs): while True: - time.sleep(5) + time.sleep(secs) LOG.info('PERIODIC THREAD DUMP\n\n%s', get_snapshot()) -def dump_to_logger(): +def dump_to_logger(secs=5): th = threading.Thread( target=_logging_main, + kwargs={'secs': secs}, name='mitogen.debug.dump_to_logger', ) th.setDaemon(True) diff --git a/mitogen/doas.py b/mitogen/doas.py index 1d9d04eb..cdcee0b0 100644 --- a/mitogen/doas.py +++ b/mitogen/doas.py @@ -45,8 +45,8 @@ class Stream(mitogen.parent.Stream): create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) child_is_immediate_subprocess = False - #: Once connected, points to the corresponding TtyLogStream, allowing it to - #: be disconnected at the same time this stream is being torn down. + #: Once connected, points to the corresponding DiagLogStream, allowing it + #: to be disconnected at the same time this stream is being torn down. tty_stream = None username = 'root' @@ -89,7 +89,7 @@ class Stream(mitogen.parent.Stream): password_required_msg = 'doas password is required' def _connect_bootstrap(self, extra_fd): - self.tty_stream = mitogen.parent.TtyLogStream(extra_fd, self) + self.tty_stream = mitogen.parent.DiagLogStream(extra_fd, self) password_sent = False it = mitogen.parent.iter_read( diff --git a/mitogen/docker.py b/mitogen/docker.py index 38ee9d4e..36b0635b 100644 --- a/mitogen/docker.py +++ b/mitogen/docker.py @@ -43,6 +43,11 @@ class Stream(mitogen.parent.Stream): username = None docker_path = 'docker' + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + def construct(self, container=None, image=None, docker_path=None, username=None, **kwargs): diff --git a/mitogen/fakessh.py b/mitogen/fakessh.py index 5667bcad..582017bc 100644 --- a/mitogen/fakessh.py +++ b/mitogen/fakessh.py @@ -436,7 +436,7 @@ def run(dest, router, args, deadline=None, econtext=None): ssh_path = os.path.join(tmp_path, 'ssh') fp = open(ssh_path, 'w') try: - fp.write('#!%s\n' % (sys.executable,)) + fp.write('#!%s\n' % (mitogen.parent.get_sys_executable(),)) fp.write(inspect.getsource(mitogen.core)) fp.write('\n') fp.write('ExternalContext(%r).main()\n' % ( @@ -449,7 +449,7 @@ def run(dest, router, args, deadline=None, econtext=None): env = os.environ.copy() env.update({ 'PATH': '%s:%s' % (tmp_path, env.get('PATH', '')), - 'ARGV0': sys.executable, + 'ARGV0': mitogen.parent.get_sys_executable(), 'SSH_PATH': ssh_path, }) diff --git a/mitogen/fork.py b/mitogen/fork.py index 12bb7dfa..cf769788 100644 --- a/mitogen/fork.py +++ b/mitogen/fork.py @@ -75,6 +75,17 @@ def reset_logging_framework(): ] +def on_fork(): + """ + Should be called by any program integrating Mitogen each time the process + is forked, in the context of the new child. + """ + reset_logging_framework() # Must be first! + fixup_prngs() + mitogen.core.Latch._on_fork() + mitogen.core.Side._on_fork() + + def handle_child_crash(): """ Respond to _child_main() crashing by ensuring the relevant exception is @@ -134,10 +145,7 @@ class Stream(mitogen.parent.Stream): handle_child_crash() def _child_main(self, childfp): - reset_logging_framework() # Must be first! - fixup_prngs() - mitogen.core.Latch._on_fork() - mitogen.core.Side._on_fork() + on_fork() if self.on_fork: self.on_fork() mitogen.core.set_block(childfp.fileno()) diff --git a/mitogen/kubectl.py b/mitogen/kubectl.py new file mode 100644 index 00000000..c2be24c1 --- /dev/null +++ b/mitogen/kubectl.py @@ -0,0 +1,65 @@ +# coding: utf-8 +# Copyright 2018, Yannig Perré +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import logging + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) + + +class Stream(mitogen.parent.Stream): + child_is_immediate_subprocess = True + + pod = None + kubectl_path = 'kubectl' + kubectl_args = None + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + + def construct(self, pod, kubectl_path=None, kubectl_args=None, **kwargs): + super(Stream, self).construct(**kwargs) + assert pod + self.pod = pod + if kubectl_path: + self.kubectl_path = kubectl_path + self.kubectl_args = kubectl_args or [] + + def connect(self): + super(Stream, self).connect() + self.name = u'kubectl.%s%s' % (self.pod, self.kubectl_args) + + def get_boot_command(self): + bits = [self.kubectl_path] + self.kubectl_args + ['exec', '-it', self.pod] + return bits + ["--"] + super(Stream, self).get_boot_command() diff --git a/mitogen/lxc.py b/mitogen/lxc.py index 4d6c21db..71b12221 100644 --- a/mitogen/lxc.py +++ b/mitogen/lxc.py @@ -52,7 +52,7 @@ class Stream(mitogen.parent.Stream): super(Stream, self).construct(**kwargs) self.container = container if lxc_attach_path: - self.lxc_attach_path = lxc_attach_apth + self.lxc_attach_path = lxc_attach_path def connect(self): super(Stream, self).connect() diff --git a/mitogen/lxd.py b/mitogen/lxd.py new file mode 100644 index 00000000..9e6702f4 --- /dev/null +++ b/mitogen/lxd.py @@ -0,0 +1,70 @@ +# Copyright 2017, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import logging + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) + + +class Stream(mitogen.parent.Stream): + child_is_immediate_subprocess = False + create_child_args = { + # If lxc finds any of stdin, stdout, stderr connected to a TTY, to + # prevent input injection it creates a proxy pty, forcing all IO to be + # buffered in <4KiB chunks. So ensure stderr is also routed to the + # socketpair. + 'merge_stdio': True + } + + container = None + lxc_path = 'lxc' + python_path = 'python' + + def construct(self, container, lxc_path=None, **kwargs): + super(Stream, self).construct(**kwargs) + self.container = container + if lxc_path: + self.lxc_path = lxc_path + + def connect(self): + super(Stream, self).connect() + self.name = u'lxd.' + self.container + + def get_boot_command(self): + bits = [ + self.lxc_path, + 'exec', + '--mode=noninteractive', + self.container, + '--', + ] + return bits + super(Stream, self).get_boot_command() diff --git a/mitogen/master.py b/mitogen/master.py index d057f7f1..d4ee607a 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -83,7 +83,46 @@ def _stdlib_paths(): for p in prefixes) +def is_stdlib_name(modname): + """Return :data:`True` if `modname` appears to come from the standard + library. + """ + if imp.is_builtin(modname) != 0: + return True + + module = sys.modules.get(modname) + if module is None: + return False + + # six installs crap with no __file__ + modpath = os.path.abspath(getattr(module, '__file__', '')) + return is_stdlib_path(modpath) + + +_STDLIB_PATHS = _stdlib_paths() + + +def is_stdlib_path(path): + return any( + os.path.commonprefix((libpath, path)) == libpath + and 'site-packages' not in path + and 'dist-packages' not in path + for libpath in _STDLIB_PATHS + ) + + def get_child_modules(path): + """Return the suffixes of submodules directly neated beneath of the package + directory at `path`. + + :param str path: + Path to the module's source code on disk, or some PEP-302-recognized + equivalent. Usually this is the module's ``__file__`` attribute, but + is specified explicitly to avoid loading the module. + + :return: + List of submodule name suffixes. + """ it = pkgutil.iter_modules([os.path.dirname(path)]) return [to_text(name) for _, name, _ in it] @@ -168,24 +207,35 @@ def scan_code_imports(co): class ThreadWatcher(object): """ - Manage threads that waits for nother threads to shutdown, before invoking - `on_join()`. In CPython it seems possible to use this method to ensure a - non-main thread is signalled when the main thread has exitted, using yet - another thread as a proxy. + Manage threads that wait for another thread to shut down, before invoking + `on_join()` for each associated ThreadWatcher. + + In CPython it seems possible to use this method to ensure a non-main thread + is signalled when the main thread has exited, using a third thread as a + proxy. """ - _lock = threading.Lock() - _pid = None - _instances_by_target = {} - _thread_by_target = {} + #: Protects remaining _cls_* members. + _cls_lock = threading.Lock() + + #: PID of the process that last modified the class data. If the PID + #: changes, it means the thread watch dict refers to threads that no longer + #: exist in the current process (since it forked), and so must be reset. + _cls_pid = None + + #: Map watched Thread -> list of ThreadWatcher instances. + _cls_instances_by_target = {} + + #: Map watched Thread -> watcher Thread for each watched thread. + _cls_thread_by_target = {} @classmethod def _reset(cls): """If we have forked since the watch dictionaries were initialized, all that has is garbage, so clear it.""" - if os.getpid() != cls._pid: - cls._pid = os.getpid() - cls._instances_by_target.clear() - cls._thread_by_target.clear() + if os.getpid() != cls._cls_pid: + cls._cls_pid = os.getpid() + cls._cls_instances_by_target.clear() + cls._cls_thread_by_target.clear() def __init__(self, target, on_join): self.target = target @@ -194,33 +244,34 @@ class ThreadWatcher(object): @classmethod def _watch(cls, target): target.join() - for watcher in cls._instances_by_target[target]: + for watcher in cls._cls_instances_by_target[target]: watcher.on_join() def install(self): - self._lock.acquire() + self._cls_lock.acquire() try: self._reset() - self._instances_by_target.setdefault(self.target, []).append(self) - if self.target not in self._thread_by_target: - self._thread_by_target[self.target] = threading.Thread( + lst = self._cls_instances_by_target.setdefault(self.target, []) + lst.append(self) + if self.target not in self._cls_thread_by_target: + self._cls_thread_by_target[self.target] = threading.Thread( name='mitogen.master.join_thread_async', target=self._watch, args=(self.target,) ) - self._thread_by_target[self.target].start() + self._cls_thread_by_target[self.target].start() finally: - self._lock.release() + self._cls_lock.release() def remove(self): - self._lock.acquire() + self._cls_lock.acquire() try: self._reset() - lst = self._instances_by_target.get(self.target, []) + lst = self._cls_instances_by_target.get(self.target, []) if self in lst: lst.remove(self) finally: - self._lock.release() + self._cls_lock.release() @classmethod def watch(cls, target, on_join): @@ -230,6 +281,25 @@ class ThreadWatcher(object): class LogForwarder(object): + """ + Install a :data:`mitogen.core.FORWARD_LOG` handler that delivers forwarded + log events into the local logging framework. This is used by the master's + :class:`Router`. + + The forwarded :class:`logging.LogRecord` objects are delivered to loggers + under ``mitogen.ctx.*`` corresponding to their + :attr:`mitogen.core.Context.name`, with the message prefixed with the + logger name used in the child. The records include some extra attributes: + + * ``mitogen_message``: Unicode original message without the logger name + prepended. + * ``mitogen_context``: :class:`mitogen.parent.Context` reference to the + source context. + * ``mitogen_name``: Original logger name. + + :param mitogen.master.Router router: + Router to install the handler on. + """ def __init__(self, router): self._router = router self._cache = {} @@ -246,7 +316,8 @@ class LogForwarder(object): if logger is None: context = self._router.context_by_id(msg.src_id) if context is None: - LOG.error('FORWARD_LOG received from src_id %d', msg.src_id) + LOG.error('%s: dropping log from unknown context ID %d', + self, msg.src_id) return name = '%s.%s' % (RLOG.name, context.name) @@ -263,33 +334,6 @@ class LogForwarder(object): return 'LogForwarder(%r)' % (self._router,) -_STDLIB_PATHS = _stdlib_paths() - - -def is_stdlib_path(path): - return any( - os.path.commonprefix((libpath, path)) == libpath - and 'site-packages' not in path - and 'dist-packages' not in path - for libpath in _STDLIB_PATHS - ) - - -def is_stdlib_name(modname): - """Return ``True`` if `modname` appears to come from the standard - library.""" - if imp.is_builtin(modname) != 0: - return True - - module = sys.modules.get(modname) - if module is None: - return False - - # six installs crap with no __file__ - modpath = os.path.abspath(getattr(module, '__file__', '')) - return is_stdlib_path(modpath) - - class ModuleFinder(object): def __init__(self): #: Import machinery is expensive, keep :py:meth`:get_module_source` @@ -360,7 +404,7 @@ class ModuleFinder(object): # requests.packages.urllib3.contrib.pyopenssl" e = sys.exc_info()[1] LOG.debug('%r: loading %r using %r failed: %s', - self, fullname, loader) + self, fullname, loader, e) return if path is None or source is None: @@ -412,8 +456,8 @@ class ModuleFinder(object): source code. :returns: - Tuple of `(module path, source text, is package?)`, or ``None`` if - the source cannot be found. + Tuple of `(module path, source text, is package?)`, or :data:`None` + if the source cannot be found. """ tup = self._found_cache.get(fullname) if tup: @@ -542,6 +586,14 @@ class ModuleResponder(object): return 'ModuleResponder(%r)' % (self._router,) MAIN_RE = re.compile(b(r'^if\s+__name__\s*==\s*.__main__.\s*:'), re.M) + main_guard_msg = ( + "A child context attempted to import __main__, however the main " + "module present in the master process lacks an execution guard. " + "Update %r to prevent unintended execution, using a guard like:\n" + "\n" + " if __name__ == '__main__':\n" + " # your code here.\n" + ) def whitelist_prefix(self, fullname): if self.whitelist == ['']: @@ -551,14 +603,19 @@ class ModuleResponder(object): def blacklist_prefix(self, fullname): self.blacklist.append(fullname) - def neutralize_main(self, src): + def neutralize_main(self, path, src): """Given the source for the __main__ module, try to find where it begins conditional execution based on a "if __name__ == '__main__'" guard, and remove any code after that point.""" match = self.MAIN_RE.search(src) if match: return src[:match.start()] - return src + + if b('mitogen.main(') in src: + return src + + LOG.error(self.main_guard_msg, path) + raise ImportError('refused') def _make_negative_response(self, fullname): return (fullname, None, None, None, ()) @@ -585,7 +642,7 @@ class ModuleResponder(object): pkg_present = None if fullname == '__main__': - source = self.neutralize_main(source) + source = self.neutralize_main(path, source) compressed = mitogen.core.Blob(zlib.compress(source, 9)) related = [ to_text(name) @@ -670,8 +727,7 @@ class ModuleResponder(object): ) ) - def _forward_module(self, context, fullname): - IOLOG.debug('%r._forward_module(%r, %r)', self, context, fullname) + def _forward_one_module(self, context, fullname): path = [] while fullname: path.append(fullname) @@ -682,8 +738,13 @@ class ModuleResponder(object): self._send_module_and_related(stream, fullname) self._send_forward_module(stream, context, fullname) - def forward_module(self, context, fullname): - self._router.broker.defer(self._forward_module, context, fullname) + def _forward_modules(self, context, fullnames): + IOLOG.debug('%r._forward_modules(%r, %r)', self, context, fullnames) + for fullname in fullnames: + self._forward_one_module(context, fullname) + + def forward_modules(self, context, fullnames): + self._router.broker.defer(self._forward_modules, context, fullnames) class Broker(mitogen.core.Broker): diff --git a/mitogen/minify.py b/mitogen/minify.py index 26ecf62f..a261bf6a 100644 --- a/mitogen/minify.py +++ b/mitogen/minify.py @@ -48,10 +48,16 @@ except ImportError: @lru_cache() def minimize_source(source): - """Remove most comments and docstrings from Python source code. + """Remove comments and docstrings from Python `source`, preserving line + numbers and syntax of empty blocks. + + :param str source: + The source to minimize. + + :returns str: + The minimized source. """ - if not isinstance(source, mitogen.core.UnicodeType): - source = source.decode('utf-8') + source = mitogen.core.to_text(source) tokens = tokenize.generate_tokens(StringIO(source).readline) tokens = strip_comments(tokens) tokens = strip_docstrings(tokens) diff --git a/mitogen/parent.py b/mitogen/parent.py index 4299d3cd..a57ca20b 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -47,7 +47,6 @@ import termios import textwrap import threading import time -import types import zlib # Absolute imports for <2.5. @@ -79,11 +78,48 @@ try: except: SC_OPEN_MAX = 1024 +OPENPTY_MSG = ( + "Failed to create a PTY: %s. It is likely the maximum number of PTYs has " + "been reached. Consider increasing the 'kern.tty.ptmx_max' sysctl on OS " + "X, the 'kernel.pty.max' sysctl on Linux, or modifying your configuration " + "to avoid PTY use." +) + +SYS_EXECUTABLE_MSG = ( + "The Python sys.executable variable is unset, indicating Python was " + "unable to determine its original program name. Unless explicitly " + "configured otherwise, child contexts will be started using " + "'/usr/bin/python'" +) +_sys_executable_warning_logged = False + +SIGNAL_BY_NUM = dict( + (getattr(signal, name), name) + for name in sorted(vars(signal), reverse=True) + if name.startswith('SIG') and not name.startswith('SIG_') +) + def get_log_level(): return (LOG.level or logging.getLogger().level or logging.INFO) +def get_sys_executable(): + """ + Return :data:`sys.executable` if it is set, otherwise return + ``"/usr/bin/python"`` and log a warning. + """ + if sys.executable: + return sys.executable + + global _sys_executable_warning_logged + if not _sys_executable_warning_logged: + LOG.warn(SYS_EXECUTABLE_MSG) + _sys_executable_warning_logged = True + + return '/usr/bin/python' + + def get_core_source(): """ In non-masters, simply fetch the cached mitogen.core source code via the @@ -93,6 +129,19 @@ def get_core_source(): return inspect.getsource(mitogen.core) +def get_default_remote_name(): + """ + Return the default name appearing in argv[0] of remote machines. + """ + s = u'%s@%s:%d' + s %= (getpass.getuser(), socket.gethostname(), os.getpid()) + # In mixed UNIX/Windows environments, the username may contain slashes. + return s.translate({ + ord(u'\\'): ord(u'_'), + ord(u'/'): ord(u'_') + }) + + def is_immediate_child(msg, stream): """ Handler policy that requires messages to arrive only from immediately @@ -144,6 +193,14 @@ def close_nonstandard_fds(): def create_socketpair(): + """ + Create a :func:`socket.socketpair` to use for use as a child process's UNIX + stdio channels. As socket pairs are bidirectional, they are economical on + file descriptor usage as the same descriptor can be used for ``stdin`` and + ``stdout``. As they are sockets their buffers are tunable, allowing large + buffers to be configured in order to improve throughput for file transfers + and reduce :class:`mitogen.core.Broker` IO loop iterations. + """ parentfp, childfp = socket.socketpair() parentfp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, @@ -154,7 +211,31 @@ def create_socketpair(): return parentfp, childfp -def create_child(args, merge_stdio=False, preexec_fn=None): +def detach_popen(*args, **kwargs): + """ + Use :class:`subprocess.Popen` to construct a child process, then hack the + Popen so that it forgets the child it created, allowing it to survive a + call to Popen.__del__. + + If the child process is not detached, there is a race between it exitting + and __del__ being called. If it exits before __del__ runs, then __del__'s + call to :func:`os.waitpid` will capture the one and only exit event + delivered to this process, causing later 'legitimate' calls to fail with + ECHILD. + + :returns: + Process ID of the new child. + """ + # This allows Popen() to be used for e.g. graceful post-fork error + # handling, without tying the surrounding code into managing a Popen + # object, which isn't possible for at least :mod:`mitogen.fork`. This + # should be replaced by a swappable helper class in a future version. + proc = subprocess.Popen(*args, **kwargs) + proc._child_created = False + return proc.pid + + +def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): """ Create a child process whose stdin/stdout is connected to a socket. @@ -165,8 +246,13 @@ def create_child(args, merge_stdio=False, preexec_fn=None): socketpair, rather than inherited from the parent process. This may be necessary to ensure that not TTY is connected to any stdio handle, for instance when using LXC. + :param bool stderr_pipe: + If :data:`True` and `merge_stdio` is :data:`False`, arrange for + `stderr` to be connected to a separate pipe, to allow any ongoing debug + logs generated by e.g. SSH to be outpu as the session progresses, + without interfering with `stdout`. :returns: - `(pid, socket_obj, :data:`None`)` + `(pid, socket_obj, :data:`None` or pipe_fd)` """ parentfp, childfp = create_socketpair() # When running under a monkey patches-enabled gevent, the socket module @@ -175,12 +261,17 @@ def create_child(args, merge_stdio=False, preexec_fn=None): # O_NONBLOCK from Python's future stdin fd. mitogen.core.set_block(childfp.fileno()) + stderr_r = None + extra = {} if merge_stdio: extra = {'stderr': childfp} - else: - extra = {} + elif stderr_pipe: + stderr_r, stderr_w = os.pipe() + mitogen.core.set_cloexec(stderr_r) + mitogen.core.set_cloexec(stderr_w) + extra = {'stderr': stderr_w} - proc = subprocess.Popen( + pid = detach_popen( args=args, stdin=childfp, stdout=childfp, @@ -188,14 +279,16 @@ def create_child(args, merge_stdio=False, preexec_fn=None): preexec_fn=preexec_fn, **extra ) + if stderr_pipe: + os.close(stderr_w) childfp.close() # Decouple the socket from the lifetime of the Python socket object. fd = os.dup(parentfp.fileno()) parentfp.close() LOG.debug('create_child() child %d fd %d, parent %d, cmd: %s', - proc.pid, fd, os.getpid(), Argv(args)) - return proc.pid, fd, None + pid, fd, os.getpid(), Argv(args)) + return pid, fd, stderr_r def _acquire_controlling_tty(): @@ -210,6 +303,22 @@ def _acquire_controlling_tty(): fcntl.ioctl(2, termios.TIOCSCTTY) +def openpty(): + """ + Call :func:`os.openpty`, raising a descriptive error if the call fails. + + :raises mitogen.core.StreamError: + Creating a PTY failed. + :returns: + See :func`os.openpty`. + """ + try: + return os.openpty() + except OSError: + e = sys.exc_info()[1] + raise mitogen.core.StreamError(OPENPTY_MSG, e) + + def tty_create_child(args): """ Return a file descriptor connected to the master end of a pseudo-terminal, @@ -224,12 +333,12 @@ def tty_create_child(args): :returns: `(pid, tty_fd, None)` """ - master_fd, slave_fd = os.openpty() + master_fd, slave_fd = openpty() mitogen.core.set_block(slave_fd) disable_echo(master_fd) disable_echo(slave_fd) - proc = subprocess.Popen( + pid = detach_popen( args=args, stdin=slave_fd, stdout=slave_fd, @@ -240,8 +349,8 @@ def tty_create_child(args): os.close(slave_fd) LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s', - proc.pid, master_fd, os.getpid(), Argv(args)) - return proc.pid, master_fd, None + pid, master_fd, os.getpid(), Argv(args)) + return pid, master_fd, None def hybrid_tty_create_child(args): @@ -256,14 +365,14 @@ def hybrid_tty_create_child(args): :returns: `(pid, socketpair_fd, tty_fd)` """ - master_fd, slave_fd = os.openpty() + master_fd, slave_fd = openpty() parentfp, childfp = create_socketpair() mitogen.core.set_block(slave_fd) mitogen.core.set_block(childfp) disable_echo(master_fd) disable_echo(slave_fd) - proc = subprocess.Popen( + pid = detach_popen( args=args, stdin=childfp, stdout=childfp, @@ -279,11 +388,27 @@ def hybrid_tty_create_child(args): parentfp.close() LOG.debug('hybrid_tty_create_child() pid=%d stdio=%d, tty=%d, cmd: %s', - proc.pid, stdio_fd, master_fd, Argv(args)) - return proc.pid, stdio_fd, master_fd + pid, stdio_fd, master_fd, Argv(args)) + return pid, stdio_fd, master_fd def write_all(fd, s, deadline=None): + """Arrange for all of bytestring `s` to be written to the file descriptor + `fd`. + + :param int fd: + File descriptor to write to. + :param bytes s: + Bytestring to write to file descriptor. + :param float deadline: + If not :data:`None`, absolute UNIX timestamp after which timeout should + occur. + + :raises mitogen.core.TimeoutError: + Bytestring could not be written entirely before deadline was exceeded. + :raises mitogen.core.StreamError: + File descriptor was disconnected before write could complete. + """ timeout = None written = 0 poller = PREFERRED_POLLER() @@ -312,6 +437,20 @@ def write_all(fd, s, deadline=None): def iter_read(fds, deadline=None): + """Return a generator that arranges for up to 4096-byte chunks to be read + at a time from the file descriptor `fd` until the generator is destroyed. + + :param int fd: + File descriptor to read from. + :param float deadline: + If not :data:`None`, an absolute UNIX timestamp after which timeout + should occur. + + :raises mitogen.core.TimeoutError: + Attempt to read beyond deadline. + :raises mitogen.core.StreamError: + Attempt to read past end of file. + """ poller = PREFERRED_POLLER() for fd in fds: poller.start_receive(fd) @@ -346,6 +485,24 @@ def iter_read(fds, deadline=None): def discard_until(fd, s, deadline): + """Read chunks from `fd` until one is encountered that ends with `s`. This + is used to skip output produced by ``/etc/profile``, ``/etc/motd`` and + mandatory SSH banners while waiting for :attr:`Stream.EC0_MARKER` to + appear, indicating the first stage is ready to receive the compressed + :mod:`mitogen.core` source. + + :param int fd: + File descriptor to read from. + :param bytes s: + Marker string to discard until encountered. + :param float deadline: + Absolute UNIX timestamp after which timeout should occur. + + :raises mitogen.core.TimeoutError: + Attempt to read beyond deadline. + :raises mitogen.core.StreamError: + Attempt to read past end of file. + """ for buf in iter_read([fd], deadline): if IOLOG.level == logging.DEBUG: for line in buf.splitlines(): @@ -396,23 +553,6 @@ def upgrade_router(econtext): ) -def make_call_msg(fn, *args, **kwargs): - if isinstance(fn, types.MethodType) and \ - isinstance(fn.im_self, (type, types.ClassType)): - klass = mitogen.core.to_text(fn.im_self.__name__) - else: - klass = None - - tup = ( - mitogen.core.to_text(fn.__module__), - klass, - mitogen.core.to_text(fn.__name__), - args, - mitogen.core.Kwargs(kwargs) - ) - return mitogen.core.Message.pickled(tup, handle=mitogen.core.CALL_FUNCTION) - - def stream_by_method_name(name): """ Given the name of a Mitogen connection method, import its implementation @@ -450,6 +590,21 @@ def _proxy_connect(name, method_name, kwargs, econtext): } +def wstatus_to_str(status): + """ + Parse and format a :func:`os.waitpid` exit status. + """ + if os.WIFEXITED(status): + return 'exited with return code %d' % (os.WEXITSTATUS(status),) + if os.WIFSIGNALED(status): + n = os.WTERMSIG(status) + return 'exited due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) + if os.WIFSTOPPED(status): + n = os.WSTOPSIG(status) + return 'stopped due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) + return 'unknown wait status (%d)' % (status,) + + class Argv(object): """ Wrapper to defer argv formatting when debug logging is disabled. @@ -486,8 +641,12 @@ class CallSpec(object): self.kwargs = kwargs def _get_name(self): - return u'%s.%s' % (self.func.__module__, - self.func.__name__) + bits = [self.func.__module__] + if inspect.ismethod(self.func): + bits.append(getattr(self.func.__self__, '__name__', None) or + getattr(type(self.func.__self__), '__name__', None)) + bits.append(self.func.__name__) + return u'.'.join(bits) def _get_args(self): return u', '.join(repr(a) for a in self.args) @@ -678,7 +837,7 @@ PREFERRED_POLLER = POLLER_BY_SYSNAME.get( mitogen.core.Latch.poller_class = PREFERRED_POLLER -class TtyLogStream(mitogen.core.BasicStream): +class DiagLogStream(mitogen.core.BasicStream): """ For "hybrid TTY/socketpair" mode, after a connection has been setup, a spare TTY file descriptor will exist that cannot be closed, and to which @@ -688,18 +847,21 @@ class TtyLogStream(mitogen.core.BasicStream): termination signal to any processes whose controlling TTY is the TTY that has been closed. - TtyLogStream takes over this descriptor and creates corresponding log + DiagLogStream takes over this descriptor and creates corresponding log messages for anything written to it. """ - def __init__(self, tty_fd, stream): - self.receive_side = mitogen.core.Side(self, tty_fd) + def __init__(self, fd, stream): + self.receive_side = mitogen.core.Side(self, fd) self.transmit_side = self.receive_side self.stream = stream self.buf = '' def __repr__(self): - return 'mitogen.parent.TtyLogStream(%r)' % (self.stream.name,) + return 'mitogen.parent.DiagLogStream(fd=%r, %r)' % ( + self.receive_side.fd, + self.stream.name, + ) def on_receive(self, broker): """ @@ -724,7 +886,7 @@ class Stream(mitogen.core.Stream): Base for streams capable of starting new slaves. """ #: The path to the remote Python interpreter. - python_path = sys.executable + python_path = get_sys_executable() #: Maximum time to wait for a connection attempt. connect_timeout = 30.0 @@ -765,8 +927,7 @@ class Stream(mitogen.core.Stream): if connect_timeout: self.connect_timeout = connect_timeout if remote_name is None: - remote_name = '%s@%s:%d' - remote_name %= (getpass.getuser(), socket.gethostname(), os.getpid()) + remote_name = get_default_remote_name() if '/' in remote_name or '\\' in remote_name: raise ValueError('remote_name= cannot contain slashes') self.remote_name = remote_name @@ -821,7 +982,7 @@ class Stream(mitogen.core.Stream): self._reaped = True if pid: - LOG.debug('%r: child process exit status was %d', self, status) + LOG.debug('%r: PID %d %s', self, pid, wstatus_to_str(status)) return # For processes like sudo we cannot actually send sudo a signal, @@ -968,7 +1129,9 @@ class Stream(mitogen.core.Stream): self._reap_child() raise - #: For ssh.py, this must be at least max(len('password'), len('debug1:')) + #: Sentinel value emitted by the first stage to indicate it is ready to + #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have + #: length of at least `max(len('password'), len('debug1:'))` EC0_MARKER = mitogen.core.b('MITO000\n') EC1_MARKER = mitogen.core.b('MITO001\n') @@ -1007,9 +1170,215 @@ class ChildIdAllocator(object): return self.allocate() +class CallChain(object): + """ + Deliver :data:`mitogen.core.CALL_FUNCTION` messages to a target context, + optionally threading related calls so an exception in an earlier call + cancels subsequent calls. + + :param mitogen.core.Context context: + Target context. + :param bool pipelined: + Enable pipelining. + + :meth:`call`, :meth:`call_no_reply` and :meth:`call_async` + normally issue calls and produce responses with no memory of prior + exceptions. If a call made with :meth:`call_no_reply` fails, the exception + is logged to the target context's logging framework. + + **Pipelining** + + When pipelining is enabled, if an exception occurs during a call, + subsequent calls made by the same :class:`CallChain` fail with the same + exception, including those already in-flight on the network, and no further + calls execute until :meth:`reset` is invoked. + + No exception is logged for calls made with :meth:`call_no_reply`, instead + the exception is saved and reported as the result of subsequent + :meth:`call` or :meth:`call_async` calls. + + Sequences of asynchronous calls can be made without wasting network + round-trips to discover if prior calls succeed, and chains originating from + multiple unrelated source contexts may overlap concurrently at a target + context without interference. + + In this example, 4 calls complete in one round-trip:: + + chain = mitogen.parent.CallChain(context, pipelined=True) + chain.call_no_reply(os.mkdir, '/tmp/foo') + + # If previous mkdir() failed, this never runs: + chain.call_no_reply(os.mkdir, '/tmp/foo/bar') + + # If either mkdir() failed, this never runs, and the exception is + # asynchronously delivered to the receiver. + recv = chain.call_async(subprocess.check_output, '/tmp/foo') + + # If anything so far failed, this never runs, and raises the exception. + chain.call(do_something) + + # If this code was executed, the exception would also be raised. + if recv.get().unpickle() == 'baz': + pass + + When pipelining is enabled, :meth:`reset` must be invoked to ensure any + exception is discarded, otherwise unbounded memory usage is possible in + long-running programs. The context manager protocol is supported to ensure + :meth:`reset` is always invoked:: + + with mitogen.parent.CallChain(context, pipelined=True) as chain: + chain.call_no_reply(...) + chain.call_no_reply(...) + chain.call_no_reply(...) + chain.call(...) + + # chain.reset() automatically invoked. + """ + def __init__(self, context, pipelined=False): + self.context = context + if pipelined: + self.chain_id = self.make_chain_id() + else: + self.chain_id = None + + @classmethod + def make_chain_id(cls): + return '%s-%s-%x-%x' % ( + socket.gethostname(), + os.getpid(), + threading.currentThread().ident, + int(1e6 * time.time()), + ) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.context) + + def __enter__(self): + return self + + def __exit__(self, _1, _2, _3): + self.reset() + + def reset(self): + """ + Instruct the target to forget any related exception. + """ + if not self.chain_id: + return + + saved, self.chain_id = self.chain_id, None + try: + self.call_no_reply(mitogen.core.Dispatcher.forget_chain, saved) + finally: + self.chain_id = saved + + def make_msg(self, fn, *args, **kwargs): + if inspect.ismethod(fn) and inspect.isclass(fn.__self__): + klass = mitogen.core.to_text(fn.__self__.__name__) + else: + klass = None + + tup = ( + self.chain_id, + mitogen.core.to_text(fn.__module__), + klass, + mitogen.core.to_text(fn.__name__), + args, + mitogen.core.Kwargs(kwargs) + ) + return mitogen.core.Message.pickled(tup, + handle=mitogen.core.CALL_FUNCTION) + + def call_no_reply(self, fn, *args, **kwargs): + """ + Like :meth:`call_async`, but do not wait for a return value, and inform + the target context no reply is expected. If the call fails and + pipelining is disabled, the exception will be logged to the target + context's logging framework. + """ + LOG.debug('%r.call_no_reply(): %r', self, CallSpec(fn, args, kwargs)) + self.context.send(self.make_msg(fn, *args, **kwargs)) + + def call_async(self, fn, *args, **kwargs): + """ + Arrange for `fn(*args, **kwargs)` to be invoked on the context's main + thread. + + :param fn: + A free function in module scope or a class method of a class + directly reachable from module scope: + + .. code-block:: python + + # mymodule.py + + def my_func(): + '''A free function reachable as mymodule.my_func''' + + class MyClass: + @classmethod + def my_classmethod(cls): + '''Reachable as mymodule.MyClass.my_classmethod''' + + def my_instancemethod(self): + '''Unreachable: requires a class instance!''' + + class MyEmbeddedClass: + @classmethod + def my_classmethod(cls): + '''Not directly reachable from module scope!''' + + :param tuple args: + Function arguments, if any. See :ref:`serialization-rules` for + permitted types. + :param dict kwargs: + Function keyword arguments, if any. See :ref:`serialization-rules` + for permitted types. + :returns: + :class:`mitogen.core.Receiver` configured to receive the result of + the invocation: + + .. code-block:: python + + recv = context.call_async(os.check_output, 'ls /tmp/') + try: + # Prints output once it is received. + msg = recv.get() + print(msg.unpickle()) + except mitogen.core.CallError, e: + print('Call failed:', str(e)) + + Asynchronous calls may be dispatched in parallel to multiple + contexts and consumed as they complete using + :class:`mitogen.select.Select`. + """ + LOG.debug('%r.call_async(): %r', self, CallSpec(fn, args, kwargs)) + return self.context.send_async(self.make_msg(fn, *args, **kwargs)) + + def call(self, fn, *args, **kwargs): + """ + Like :meth:`call_async`, but block until the return value is available. + Equivalent to:: + + call_async(fn, *args, **kwargs).get().unpickle() + + :returns: + The function's return value. + :raises mitogen.core.CallError: + An exception was raised in the remote context during execution. + """ + receiver = self.call_async(fn, *args, **kwargs) + return receiver.get().unpickle(throw_dead=False) + + class Context(mitogen.core.Context): + call_chain_class = CallChain via = None + def __init__(self, *args, **kwargs): + super(Context, self).__init__(*args, **kwargs) + self.default_call_chain = self.call_chain_class(self) + def __eq__(self, other): return (isinstance(other, mitogen.core.Context) and (other.context_id == self.context_id) and @@ -1019,17 +1388,13 @@ class Context(mitogen.core.Context): return hash((self.router, self.context_id)) def call_async(self, fn, *args, **kwargs): - LOG.debug('%r.call_async(): %r', self, CallSpec(fn, args, kwargs)) - return self.send_async(make_call_msg(fn, *args, **kwargs)) + return self.default_call_chain.call_async(fn, *args, **kwargs) def call(self, fn, *args, **kwargs): - receiver = self.call_async(fn, *args, **kwargs) - return receiver.get().unpickle(throw_dead=False) + return self.default_call_chain.call(fn, *args, **kwargs) def call_no_reply(self, fn, *args, **kwargs): - LOG.debug('%r.call_no_reply(%r, *%r, **%r)', - self, fn, args, kwargs) - self.send(make_call_msg(fn, *args, **kwargs)) + self.default_call_chain.call_no_reply(fn, *args, **kwargs) def shutdown(self, wait=False): LOG.debug('%r.shutdown() sending SHUTDOWN', self) @@ -1276,6 +1641,9 @@ class Router(mitogen.core.Router): def docker(self, **kwargs): return self.connect(u'docker', **kwargs) + def kubectl(self, **kwargs): + return self.connect(u'kubectl', **kwargs) + def fork(self, **kwargs): return self.connect(u'fork', **kwargs) @@ -1288,6 +1656,9 @@ class Router(mitogen.core.Router): def lxc(self, **kwargs): return self.connect(u'lxc', **kwargs) + def lxd(self, **kwargs): + return self.connect(u'lxd', **kwargs) + def setns(self, **kwargs): return self.connect(u'setns', **kwargs) diff --git a/mitogen/service.py b/mitogen/service.py index 923ec04a..ffb7308e 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -372,8 +372,9 @@ class DeduplicatingInvoker(Invoker): class Service(object): - #: Sentinel object to suppress reply generation, since returning ``None`` - #: will trigger a response message containing the pickled ``None``. + #: Sentinel object to suppress reply generation, since returning + #: :data:`None` will trigger a response message containing the pickled + #: :data:`None`. NO_REPLY = object() invoker_class = Invoker @@ -635,8 +636,7 @@ class PushFileService(Service): """ for path in paths: self.propagate_to(context, path) - for fullname in modules: - self.router.responder.forward_module(context, fullname) + self.router.responder.forward_modules(context, modules) @expose(policy=AllowParents()) @arg_spec({ @@ -873,7 +873,14 @@ class FileService(Service): raise Error(self.context_mismatch_msg) LOG.debug('Serving %r', path) - fp = open(path, 'rb', self.IO_SIZE) + try: + fp = open(path, 'rb', self.IO_SIZE) + except IOError: + msg.reply(mitogen.core.CallError( + sys.exc_info()[1] + )) + return + # Response must arrive first so requestee can begin receive loop, # otherwise first ack won't arrive until all pending chunks were # delivered. In that case max BDP would always be 128KiB, aka. max diff --git a/mitogen/setns.py b/mitogen/setns.py index 1779ca77..be87e063 100644 --- a/mitogen/setns.py +++ b/mitogen/setns.py @@ -94,6 +94,16 @@ def get_lxc_pid(path, name): raise Error("could not find PID from lxc-info output.\n%s", output) +def get_lxd_pid(path, name): + output = _run_command([path, 'info', name]) + for line in output.splitlines(): + bits = line.split() + if bits and bits[0] == 'Pid:': + return int(bits[1]) + + raise Error("could not find PID from lxc output.\n%s", output) + + def get_machinectl_pid(path, name): output = _run_command([path, 'status', name]) for line in output.splitlines(): @@ -108,20 +118,24 @@ class Stream(mitogen.parent.Stream): child_is_immediate_subprocess = False container = None - username = None + username = 'root' kind = None + python_path = 'python' docker_path = 'docker' + lxc_path = 'lxc' lxc_info_path = 'lxc-info' machinectl_path = 'machinectl' GET_LEADER_BY_KIND = { 'docker': ('docker_path', get_docker_pid), 'lxc': ('lxc_info_path', get_lxc_pid), + 'lxd': ('lxc_path', get_lxd_pid), 'machinectl': ('machinectl_path', get_machinectl_pid), } def construct(self, container, kind, username=None, docker_path=None, - lxc_info_path=None, machinectl_path=None, **kwargs): + lxc_path=None, lxc_info_path=None, machinectl_path=None, + **kwargs): super(Stream, self).construct(**kwargs) if kind not in self.GET_LEADER_BY_KIND: raise Error('unsupported container kind: %r', kind) @@ -132,6 +146,8 @@ class Stream(mitogen.parent.Stream): self.username = username if docker_path: self.docker_path = docker_path + if lxc_path: + self.lxc_path = lxc_path if lxc_info_path: self.lxc_info_path = lxc_info_path if machinectl_path: @@ -168,27 +184,26 @@ class Stream(mitogen.parent.Stream): except AttributeError: pass - if self.username: - try: - os.setgroups([grent.gr_gid - for grent in grp.getgrall() - if self.username in grent.gr_mem]) - pwent = pwd.getpwnam(self.username) - os.setreuid(pwent.pw_uid, pwent.pw_uid) - # shadow-4.4/libmisc/setupenv.c. Not done: MAIL, PATH - os.environ.update({ - 'HOME': pwent.pw_dir, - 'SHELL': pwent.pw_shell or '/bin/sh', - 'LOGNAME': self.username, - 'USER': self.username, - }) - if ((os.path.exists(pwent.pw_dir) and - os.access(pwent.pw_dir, os.X_OK))): - os.chdir(pwent.pw_dir) - except Exception: - e = sys.exc_info()[1] - raise Error(self.username_msg, self.username, self.container, - type(e).__name__, e) + try: + os.setgroups([grent.gr_gid + for grent in grp.getgrall() + if self.username in grent.gr_mem]) + pwent = pwd.getpwnam(self.username) + os.setreuid(pwent.pw_uid, pwent.pw_uid) + # shadow-4.4/libmisc/setupenv.c. Not done: MAIL, PATH + os.environ.update({ + 'HOME': pwent.pw_dir, + 'SHELL': pwent.pw_shell or '/bin/sh', + 'LOGNAME': self.username, + 'USER': self.username, + }) + if ((os.path.exists(pwent.pw_dir) and + os.access(pwent.pw_dir, os.X_OK))): + os.chdir(pwent.pw_dir) + except Exception: + e = sys.exc_info()[1] + raise Error(self.username_msg, self.username, self.container, + type(e).__name__, e) username_msg = 'while transitioning to user %r in container %r: %s: %s' diff --git a/mitogen/ssh.py b/mitogen/ssh.py index 25928b45..ee97425b 100644 --- a/mitogen/ssh.py +++ b/mitogen/ssh.py @@ -111,7 +111,6 @@ class HostKeyError(mitogen.core.StreamError): class Stream(mitogen.parent.Stream): - create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) child_is_immediate_subprocess = False #: Default to whatever is available as 'python' on the remote machine, @@ -121,8 +120,8 @@ class Stream(mitogen.parent.Stream): #: Number of -v invocations to pass on command line. ssh_debug_level = 0 - #: Once connected, points to the corresponding TtyLogStream, allowing it to - #: be disconnected at the same time this stream is being torn down. + #: If batch_mode=False, points to the corresponding DiagLogStream, allowing + #: it to be disconnected at the same time this stream is being torn down. tty_stream = None #: The path to the SSH binary. @@ -142,7 +141,7 @@ class Stream(mitogen.parent.Stream): check_host_keys='enforce', password=None, identity_file=None, compression=True, ssh_args=None, keepalive_enabled=True, keepalive_count=3, keepalive_interval=15, - ssh_debug_level=None, **kwargs): + identities_only=True, ssh_debug_level=None, **kwargs): super(Stream, self).construct(**kwargs) if check_host_keys not in ('accept', 'enforce', 'ignore'): raise ValueError(self.check_host_keys_msg) @@ -153,6 +152,7 @@ class Stream(mitogen.parent.Stream): self.check_host_keys = check_host_keys self.password = password self.identity_file = identity_file + self.identities_only = identities_only self.compression = compression self.keepalive_enabled = keepalive_enabled self.keepalive_count = keepalive_count @@ -164,8 +164,33 @@ class Stream(mitogen.parent.Stream): if ssh_debug_level: self.ssh_debug_level = ssh_debug_level + self._init_create_child() + + def _requires_pty(self): + """ + Return :data:`True` if the configuration requires a PTY to be + allocated. This is only true if we must interactively accept host keys, + or type a password. + """ + return (self.check_host_keys == 'accept' or + self.password is not None) + + def _init_create_child(self): + """ + Initialize the base class :attr:`create_child` and + :attr:`create_child_args` according to whether we need a PTY or not. + """ + if self._requires_pty(): + self.create_child = mitogen.parent.hybrid_tty_create_child + else: + self.create_child = mitogen.parent.create_child + self.create_child_args = { + 'stderr_pipe': True, + } + def on_disconnect(self, broker): - self.tty_stream.on_disconnect(broker) + if self.tty_stream is not None: + self.tty_stream.on_disconnect(broker) super(Stream, self).on_disconnect(broker) def get_boot_command(self): @@ -181,7 +206,7 @@ class Stream(mitogen.parent.Stream): bits += ['-l', self.username] if self.port is not None: bits += ['-p', str(self.port)] - if self.identity_file or self.password: + if self.identities_only and (self.identity_file or self.password): bits += ['-o', 'IdentitiesOnly yes'] if self.identity_file: bits += ['-i', self.identity_file] @@ -192,6 +217,8 @@ class Stream(mitogen.parent.Stream): '-o', 'ServerAliveInterval %s' % (self.keepalive_interval,), '-o', 'ServerAliveCountMax %s' % (self.keepalive_count,), ] + if not self._requires_pty(): + bits += ['-o', 'BatchMode yes'] if self.check_host_keys == 'enforce': bits += ['-o', 'StrictHostKeyChecking yes'] if self.check_host_keys == 'accept': @@ -231,7 +258,7 @@ class Stream(mitogen.parent.Stream): def _host_key_prompt(self): if self.check_host_keys == 'accept': LOG.debug('%r: accepting host key', self) - self.tty_stream.transmit_side.write('y\n') + self.tty_stream.transmit_side.write(b('y\n')) return # _host_key_prompt() should never be reached with ignore or enforce @@ -239,38 +266,51 @@ class Stream(mitogen.parent.Stream): # with ours. raise HostKeyError(self.hostkey_config_msg) + def _ec0_received(self): + if self.tty_stream is not None: + self._router.broker.start_receive(self.tty_stream) + return super(Stream, self)._ec0_received() + def _connect_bootstrap(self, extra_fd): - self.tty_stream = mitogen.parent.TtyLogStream(extra_fd, self) + fds = [self.receive_side.fd] + if extra_fd is not None: + self.tty_stream = mitogen.parent.DiagLogStream(extra_fd, self) + fds.append(extra_fd) - password_sent = False - it = mitogen.parent.iter_read( - fds=[self.receive_side.fd, extra_fd], - deadline=self.connect_deadline - ) + it = mitogen.parent.iter_read(fds=fds, deadline=self.connect_deadline) + password_sent = False for buf, partial in filter_debug(self, it): LOG.debug('%r: received %r', self, buf) if buf.endswith(self.EC0_MARKER): - self._router.broker.start_receive(self.tty_stream) self._ec0_received() return elif HOSTKEY_REQ_PROMPT in buf.lower(): self._host_key_prompt() elif HOSTKEY_FAIL in buf.lower(): raise HostKeyError(self.hostkey_failed_msg) - elif buf.lower().startswith(PERMDENIED_PROMPT): + elif buf.lower().startswith(( + PERMDENIED_PROMPT, + b("%s@%s: " % (self.username, self.hostname)) + + PERMDENIED_PROMPT, + )): # issue #271: work around conflict with user shell reporting # 'permission denied' e.g. during chdir($HOME) by only matching # it at the start of the line. if self.password is not None and password_sent: raise PasswordError(self.password_incorrect_msg) + elif PASSWORD_PROMPT in buf and self.password is None: + # Permission denied (password,pubkey) + raise PasswordError(self.password_required_msg) else: raise PasswordError(self.auth_incorrect_msg) elif partial and PASSWORD_PROMPT in buf.lower(): if self.password is None: raise PasswordError(self.password_required_msg) LOG.debug('%r: sending password', self) - self.tty_stream.transmit_side.write((self.password + '\n').encode()) + self.tty_stream.transmit_side.write( + (self.password + '\n').encode() + ) password_sent = True raise mitogen.core.StreamError('bootstrap failed') diff --git a/mitogen/su.py b/mitogen/su.py index 45229d6d..7e2e5f08 100644 --- a/mitogen/su.py +++ b/mitogen/su.py @@ -49,7 +49,7 @@ class Stream(mitogen.parent.Stream): create_child = staticmethod(mitogen.parent.tty_create_child) child_is_immediate_subprocess = False - #: Once connected, points to the corresponding TtyLogStream, allowing it to + #: Once connected, points to the corresponding DiagLogStream, allowing it to #: be disconnected at the same time this stream is being torn down. username = 'root' diff --git a/mitogen/sudo.py b/mitogen/sudo.py index 1ec5c2f6..c410dac9 100644 --- a/mitogen/sudo.py +++ b/mitogen/sudo.py @@ -49,7 +49,7 @@ SUDO_OPTIONS = [ #(False, 'str', '--group', '-g') (True, 'bool', '--set-home', '-H'), #(False, 'str', '--host', '-h') - #(False, 'bool', '--login', '-i') + (False, 'bool', '--login', '-i'), #(False, 'bool', '--remove-timestamp', '-K') #(False, 'bool', '--reset-timestamp', '-k') #(False, 'bool', '--list', '-l') @@ -107,7 +107,7 @@ class Stream(mitogen.parent.Stream): create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) child_is_immediate_subprocess = False - #: Once connected, points to the corresponding TtyLogStream, allowing it to + #: Once connected, points to the corresponding DiagLogStream, allowing it to #: be disconnected at the same time this stream is being torn down. tty_stream = None @@ -116,10 +116,11 @@ class Stream(mitogen.parent.Stream): password = None preserve_env = False set_home = False + login = False def construct(self, username=None, sudo_path=None, password=None, preserve_env=None, set_home=None, sudo_args=None, - **kwargs): + login=None, **kwargs): super(Stream, self).construct(**kwargs) opts = parse_sudo_flags(sudo_args or []) @@ -133,6 +134,8 @@ class Stream(mitogen.parent.Stream): self.preserve_env = preserve_env or opts.preserve_env if (set_home or opts.set_home) is not None: self.set_home = set_home or opts.set_home + if (login or opts.login) is not None: + self.login = True def connect(self): super(Stream, self).connect() @@ -144,13 +147,16 @@ class Stream(mitogen.parent.Stream): def get_boot_command(self): # Note: sudo did not introduce long-format option processing until July - # 2013, so even though we parse long-format options, we always supply - # short-form to the sudo command. + # 2013, so even though we parse long-format options, supply short-form + # to the sudo command. bits = [self.sudo_path, '-u', self.username] if self.preserve_env: bits += ['-E'] if self.set_home: bits += ['-H'] + if self.login: + bits += ['-i'] + bits = bits + super(Stream, self).get_boot_command() LOG.debug('sudo command line: %r', bits) return bits @@ -159,7 +165,7 @@ class Stream(mitogen.parent.Stream): password_required_msg = 'sudo password is required' def _connect_bootstrap(self, extra_fd): - self.tty_stream = mitogen.parent.TtyLogStream(extra_fd, self) + self.tty_stream = mitogen.parent.DiagLogStream(extra_fd, self) password_sent = False it = mitogen.parent.iter_read( diff --git a/mitogen/unix.py b/mitogen/unix.py index efcc59cc..4a4dfb65 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -63,7 +63,7 @@ def make_socket_path(): class Listener(mitogen.core.BasicStream): keep_alive = True - def __init__(self, router, path=None, backlog=30): + def __init__(self, router, path=None, backlog=100): self._router = router self.path = path or make_socket_path() self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) @@ -78,22 +78,39 @@ class Listener(mitogen.core.BasicStream): self.receive_side = mitogen.core.Side(self, self._sock.fileno()) router.broker.start_receive(self) - def on_receive(self, broker): - sock, _ = self._sock.accept() + def _accept_client(self, sock): sock.setblocking(True) - pid, = struct.unpack('>L', sock.recv(4)) + try: + pid, = struct.unpack('>L', sock.recv(4)) + except socket.error: + LOG.error('%r: failed to read remote identity: %s', + self, sys.exc_info()[1]) + return context_id = self._router.id_allocator.allocate() context = mitogen.parent.Context(self._router, context_id) stream = mitogen.core.Stream(self._router, context_id) - stream.accept(sock.fileno(), sock.fileno()) stream.name = u'unix_client.%d' % (pid,) stream.auth_id = mitogen.context_id stream.is_privileged = True + + try: + sock.send(struct.pack('>LLL', context_id, mitogen.context_id, + os.getpid())) + except socket.error: + LOG.error('%r: failed to assign identity to PID %d: %s', + self, pid, sys.exc_info()[1]) + return + + stream.accept(sock.fileno(), sock.fileno()) self._router.register(context, stream) - sock.send(struct.pack('>LLL', context_id, mitogen.context_id, - os.getpid())) - sock.close() + + def on_receive(self, broker): + sock, _ = self._sock.accept() + try: + self._accept_client(sock) + finally: + sock.close() def connect(path, broker=None): diff --git a/preamble_size.py b/preamble_size.py index 66d5ccf3..bf3b5950 100644 --- a/preamble_size.py +++ b/preamble_size.py @@ -4,6 +4,7 @@ contexts. """ import inspect +import sys import zlib import mitogen.fakessh @@ -24,6 +25,10 @@ print('Preamble size: %s (%.2fKiB)' % ( len(stream.get_preamble()), len(stream.get_preamble()) / 1024.0, )) +if '--dump' in sys.argv: + print(zlib.decompress(stream.get_preamble())) + exit() + print( ' ' diff --git a/run_tests b/run_tests index 122cd79e..65bf1fef 100755 --- a/run_tests +++ b/run_tests @@ -6,15 +6,32 @@ echo '-------------------' echo set -o errexit -set -o nounset set -o pipefail UNIT2="$(which unit2)" coverage erase -coverage run "${UNIT2}" discover \ - --start-directory "tests" \ - --pattern '*_test.py' \ - "$@" + +# First run overwites coverage output. +[ "$SKIP_MITOGEN" ] || { + coverage run "${UNIT2}" discover \ + --start-directory "tests" \ + --pattern '*_test.py' \ + "$@" +} + +# Second run appends. This is since 'discover' treats subdirs as packages and +# the 'ansible' subdir shadows the real Ansible package when it contains +# __init__.py, so hack around it by just running again with 'ansible' as the +# start directory. Alternative seems to be renaming tests/ansible/ and making a +# mess of Git history. +[ "$SKIP_ANSIBLE" ] || { + export PYTHONPATH=`pwd`/tests:$PYTHONPATH + coverage run -a "${UNIT2}" discover \ + --start-directory "tests/ansible" \ + --pattern '*_test.py' \ + "$@" +} + coverage html echo coverage report is at "file://$(pwd)/htmlcov/index.html" diff --git a/tests/README.md b/tests/README.md index f5bbbc41..51464989 100644 --- a/tests/README.md +++ b/tests/README.md @@ -73,13 +73,16 @@ also by Ansible's `osx_setup.yml`. used to target this account, the parent session requires a TTY and the account password must be entered. -`mitogen__user1` .. `mitogen__user21` +`mitogen__user1` .. `mitogen__user5` These accounts do not have passwords set. They exist to test the Ansible interpreter recycling logic. +`mitogen__sudo1` .. `mitogen__sudo4` + May passwordless sudo to any account. + `mitogen__webapp` A plain old account with no sudo access, used as the target for fakessh - tddests. + tests. # Ansible Integration Test Environment diff --git a/tests/ansible/.gitignore b/tests/ansible/.gitignore index 1ea0ada7..8d473777 100644 --- a/tests/ansible/.gitignore +++ b/tests/ansible/.gitignore @@ -1,2 +1,3 @@ lib/modules/custom_binary_producing_junk lib/modules/custom_binary_producing_json +hosts/*.local diff --git a/tests/ansible/Makefile b/tests/ansible/Makefile index d9bdc521..1d4ab1dd 100644 --- a/tests/ansible/Makefile +++ b/tests/ansible/Makefile @@ -1,10 +1,16 @@ -all: \ - lib/modules/custom_binary_producing_junk \ - lib/modules/custom_binary_producing_json +SYSTEM=$(shell uname -s) -lib/modules/custom_binary_producing_junk: lib/modules.src/custom_binary_producing_junk.c +TARGETS+=lib/modules/custom_binary_producing_junk_$(SYSTEM) +TARGETS+=lib/modules/custom_binary_producing_json_$(SYSTEM) + +all: clean $(TARGETS) + +lib/modules/custom_binary_producing_junk_$(SYSTEM): lib/modules.src/custom_binary_producing_junk.c $(CC) -o $@ $< -lib/modules/custom_binary_producing_json: lib/modules.src/custom_binary_producing_json.c +lib/modules/custom_binary_producing_json_$(SYSTEM): lib/modules.src/custom_binary_producing_json.c $(CC) -o $@ $< + +clean: + rm -f $(TARGETS) diff --git a/tests/ansible/README.md b/tests/ansible/README.md index a76c7c1f..46320951 100644 --- a/tests/ansible/README.md +++ b/tests/ansible/README.md @@ -1,5 +1,5 @@ -# ``tests/ansible`` Directory +# `tests/ansible` Directory This is an an organically growing collection of integration and regression tests used for development and end-user bug reports. @@ -10,10 +10,10 @@ demonstrator for what does and doesn't work. ## Preparation -For OS X, run the ``osx_setup.yml`` script to create a bunch of users. +See `../image_prep/README.md`. -## ``run_ansible_playbook.sh`` +## `run_ansible_playbook.sh` This is necessary to set some environment variables used by future tests, as there appears to be no better way to inject them into the top-level process @@ -22,6 +22,19 @@ environment before the Mitogen connection process forks. ## Running Everything -``` -ANSIBLE_STRATEGY=mitogen_linear ./run_ansible_playbook.sh all.yml -``` +`ANSIBLE_STRATEGY=mitogen_linear ./run_ansible_playbook.sh all.yml` + + +## `hosts/` and `common-hosts` + +To support running the tests against a dev machine that has the requisite user +accounts, the the default inventory is a directory containing a 'localhost' +file that defines 'localhost' to be named 'target' in Ansible inventory, and a +symlink to 'common-hosts', which defines additional targets that all derive +from 'target'. + +This allows `ansible_tests.sh` to reuse the common-hosts definitions while +replacing localhost as the test target by creating a new directory that +similarly symlinks in common-hosts. + +There may be a better solution for this, but it works fine for now. diff --git a/tests/ansible/ansible.cfg b/tests/ansible/ansible.cfg index 7bf849d5..3897519b 100644 --- a/tests/ansible/ansible.cfg +++ b/tests/ansible/ansible.cfg @@ -7,9 +7,12 @@ callback_plugins = lib/callback stdout_callback = nice_stdout vars_plugins = lib/vars library = lib/modules -# module_utils = lib/module_utils +module_utils = lib/module_utils retry_files_enabled = False -forks = 50 +display_args_to_stdout = True +forks = 100 + +no_target_syslog = True # Required by integration/ssh/timeouts.yml timeout = 10 @@ -17,10 +20,6 @@ timeout = 10 # On Travis, paramiko check fails due to host key checking enabled. host_key_checking = False -# "mitogen-tests" required by integration/runner/remote_tmp.yml -# "$HOME" required by integration/action/make_tmp_path.yml -remote_tmp = $HOME/.ansible/mitogen-tests/ - [ssh_connection] ssh_args = -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s pipelining = True diff --git a/tests/ansible/bench/loop-100-items.yml b/tests/ansible/bench/loop-100-items.yml new file mode 100644 index 00000000..0feb57c5 --- /dev/null +++ b/tests/ansible/bench/loop-100-items.yml @@ -0,0 +1,10 @@ +# Execute 'hostname' 100 times in a loop. Loops execute within TaskExecutor +# within a single WorkerProcess, each iteration is a fair approximation of the +# non-controller overhead involved in executing a task. +# +# See also: loop-100-tasks.yml +# +- hosts: all + tasks: + - command: hostname + with_sequence: start=1 end=100 diff --git a/tests/ansible/bench/loop-100-tasks.yml b/tests/ansible/bench/loop-100-tasks.yml new file mode 100644 index 00000000..bf6e31b8 --- /dev/null +++ b/tests/ansible/bench/loop-100-tasks.yml @@ -0,0 +1,112 @@ +# Execute 'hostname' 100 times, using 100 individual tasks. Each task causes a +# new WorkerProcess to be forked, along with get_vars() calculation, and in the +# Mitogen extension, reestablishment of the UNIX socket connectionto the +# multiplexer process. +# +# It does not measure at least module dependency scanning (cached after first +# iteration). +# +# See also: loop-100-items.yml +# +- hosts: all + tasks: + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname + - command: hostname diff --git a/tests/ansible/bench/loop-20-templates.yml b/tests/ansible/bench/loop-20-templates.yml new file mode 100644 index 00000000..df994bd8 --- /dev/null +++ b/tests/ansible/bench/loop-20-templates.yml @@ -0,0 +1,14 @@ + +- hosts: all + tasks: + - file: + dest: /tmp/templates + state: "{{item}}" + with_items: ["absent", "directory"] + + - copy: + dest: /tmp/templates/{{item}} + mode: 0755 + content: + Hello from {{item}} + with_sequence: start=1 end=20 diff --git a/tests/ansible/compare_output_test.py b/tests/ansible/compare_output_test.py index 5a091f15..e4a3565f 100755 --- a/tests/ansible/compare_output_test.py +++ b/tests/ansible/compare_output_test.py @@ -6,7 +6,6 @@ import re import subprocess import tempfile - LOG = logging.getLogger(__name__) suffixes = [ @@ -42,21 +41,22 @@ def run(s): return fp.read() -logging.basicConfig(level=logging.DEBUG) - -for suffix in suffixes: - ansible = run('ansible localhost %s' % (suffix,)) - mitogen = run('ANSIBLE_STRATEGY=mitogen ansible localhost %s' % (suffix,)) - - diff = list(difflib.unified_diff( - a=fixup(ansible).splitlines(), - b=fixup(mitogen).splitlines(), - fromfile='ansible-output.txt', - tofile='mitogen-output.txt', - )) - if diff: - print('++ differ! suffix: %r' % (suffix,)) - for line in diff: - print(line) - print - print +if __name__ == '__main__': + logging.basicConfig(level=logging.DEBUG) + + for suffix in suffixes: + ansible = run('ansible localhost %s' % (suffix,)) + mitogen = run('ANSIBLE_STRATEGY=mitogen ansible localhost %s' % (suffix,)) + + diff = list(difflib.unified_diff( + a=fixup(ansible).splitlines(), + b=fixup(mitogen).splitlines(), + fromfile='ansible-output.txt', + tofile='mitogen-output.txt', + )) + if diff: + print('++ differ! suffix: %r' % (suffix,)) + for line in diff: + print(line) + print + print diff --git a/tests/ansible/gcloud/controller.yml b/tests/ansible/gcloud/controller.yml index 48f233d9..494c2164 100644 --- a/tests/ansible/gcloud/controller.yml +++ b/tests/ansible/gcloud/controller.yml @@ -1,6 +1,28 @@ - hosts: controller + vars: + git_username: '{{ lookup("pipe", "git config --global user.name") }}' + git_email: '{{ lookup("pipe", "git config --global user.email") }}' + tasks: + - lineinfile: + line: "{{item}}" + path: /etc/sysctl.conf + register: sysctl_conf + become: true + with_items: + - "net.ipv4.ip_forward=1" + - "kernel.perf_event_paranoid=-1" + + - copy: + src: ~/.ssh/id_gitlab + dest: ~/.ssh/id_gitlab + mode: 0600 + + - template: + dest: ~/.ssh/config + src: ssh_config.j2 + - lineinfile: line: "net.ipv4.ip_forward=1" path: /etc/sysctl.conf @@ -32,6 +54,11 @@ - shell: "rsync -a ~/.ssh {{inventory_hostname}}:" connection: local + - shell: | + git config --global user.email "{{git_username}}" + git config --global user.name "{{git_email}}" + name: set_git_config + - git: dest: ~/mitogen repo: https://github.com/dw/mitogen.git @@ -56,6 +83,10 @@ editable: true name: ~/ansible + - pip: + virtualenv: ~/venv + name: debops + - lineinfile: line: "source $HOME/venv/bin/activate" path: ~/.profile diff --git a/tests/ansible/gcloud/hosts b/tests/ansible/gcloud/hosts index b4562cb5..453320e6 100644 --- a/tests/ansible/gcloud/hosts +++ b/tests/ansible/gcloud/hosts @@ -1,2 +1,2 @@ [controller] -35.206.145.240 +c diff --git a/tests/ansible/gcloud/templates/ansible.cfg.j2 b/tests/ansible/gcloud/templates/ansible.cfg.j2 new file mode 100644 index 00000000..aa31c571 --- /dev/null +++ b/tests/ansible/gcloud/templates/ansible.cfg.j2 @@ -0,0 +1,19 @@ +[defaults] +inventory = hosts,~/mitogen/tests/ansible/lib/inventory +gathering = explicit +strategy_plugins = ~/mitogen/ansible_mitogen/plugins/strategy +action_plugins = ~/mitogen/tests/ansible/lib/action +callback_plugins = ~/mitogen/tests/ansible/lib/callback +stdout_callback = nice_stdout +vars_plugins = ~/mitogen/tests/ansible/lib/vars +library = ~/mitogen/tests/ansible/lib/modules +retry_files_enabled = False +forks = 50 + +strategy = mitogen_linear + +host_key_checking = False + +[ssh_connection] +ssh_args = -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s +pipelining = True diff --git a/tests/ansible/gcloud/templates/ssh_config.j2 b/tests/ansible/gcloud/templates/ssh_config.j2 new file mode 100644 index 00000000..2a65bfe7 --- /dev/null +++ b/tests/ansible/gcloud/templates/ssh_config.j2 @@ -0,0 +1,6 @@ + +Host localhost-* + Hostname localhost + +Host gitlab.com + IdentityFile ~/.ssh/id_gitlab diff --git a/tests/ansible/hosts.docker b/tests/ansible/hosts.docker deleted file mode 100644 index 01a2aff7..00000000 --- a/tests/ansible/hosts.docker +++ /dev/null @@ -1,100 +0,0 @@ -mydeb9-1 ansible_connection=docker -mydeb9-2 ansible_connection=docker -mydeb9-3 ansible_connection=docker -mydeb9-4 ansible_connection=docker -mydeb9-5 ansible_connection=docker -mydeb9-6 ansible_connection=docker -mydeb9-7 ansible_connection=docker -mydeb9-8 ansible_connection=docker -mydeb9-9 ansible_connection=docker -mydeb9-10 ansible_connection=docker -mydeb9-11 ansible_connection=docker -mydeb9-12 ansible_connection=docker -mydeb9-13 ansible_connection=docker -mydeb9-14 ansible_connection=docker -mydeb9-15 ansible_connection=docker -mydeb9-16 ansible_connection=docker -mydeb9-17 ansible_connection=docker -mydeb9-18 ansible_connection=docker -mydeb9-19 ansible_connection=docker -mydeb9-20 ansible_connection=docker -mydeb9-21 ansible_connection=docker -mydeb9-22 ansible_connection=docker -mydeb9-23 ansible_connection=docker -mydeb9-24 ansible_connection=docker -mydeb9-25 ansible_connection=docker -mydeb9-26 ansible_connection=docker -mydeb9-27 ansible_connection=docker -mydeb9-28 ansible_connection=docker -mydeb9-29 ansible_connection=docker -mydeb9-30 ansible_connection=docker -mydeb9-31 ansible_connection=docker -mydeb9-32 ansible_connection=docker -mydeb9-33 ansible_connection=docker -mydeb9-34 ansible_connection=docker -mydeb9-35 ansible_connection=docker -mydeb9-36 ansible_connection=docker -mydeb9-37 ansible_connection=docker -mydeb9-38 ansible_connection=docker -mydeb9-39 ansible_connection=docker -mydeb9-40 ansible_connection=docker -mydeb9-41 ansible_connection=docker -mydeb9-42 ansible_connection=docker -mydeb9-43 ansible_connection=docker -mydeb9-44 ansible_connection=docker -mydeb9-45 ansible_connection=docker -mydeb9-46 ansible_connection=docker -mydeb9-47 ansible_connection=docker -mydeb9-48 ansible_connection=docker -mydeb9-49 ansible_connection=docker -mydeb9-50 ansible_connection=docker -mydeb9-51 ansible_connection=docker -mydeb9-52 ansible_connection=docker -mydeb9-53 ansible_connection=docker -mydeb9-54 ansible_connection=docker -mydeb9-55 ansible_connection=docker -mydeb9-56 ansible_connection=docker -mydeb9-57 ansible_connection=docker -mydeb9-58 ansible_connection=docker -mydeb9-59 ansible_connection=docker -mydeb9-60 ansible_connection=docker -mydeb9-61 ansible_connection=docker -mydeb9-62 ansible_connection=docker -mydeb9-63 ansible_connection=docker -mydeb9-64 ansible_connection=docker -mydeb9-65 ansible_connection=docker -mydeb9-66 ansible_connection=docker -mydeb9-67 ansible_connection=docker -mydeb9-68 ansible_connection=docker -mydeb9-69 ansible_connection=docker -mydeb9-70 ansible_connection=docker -mydeb9-71 ansible_connection=docker -mydeb9-72 ansible_connection=docker -mydeb9-73 ansible_connection=docker -mydeb9-74 ansible_connection=docker -mydeb9-75 ansible_connection=docker -mydeb9-76 ansible_connection=docker -mydeb9-77 ansible_connection=docker -mydeb9-78 ansible_connection=docker -mydeb9-79 ansible_connection=docker -mydeb9-80 ansible_connection=docker -mydeb9-81 ansible_connection=docker -mydeb9-82 ansible_connection=docker -mydeb9-83 ansible_connection=docker -mydeb9-84 ansible_connection=docker -mydeb9-85 ansible_connection=docker -mydeb9-86 ansible_connection=docker -mydeb9-87 ansible_connection=docker -mydeb9-88 ansible_connection=docker -mydeb9-89 ansible_connection=docker -mydeb9-90 ansible_connection=docker -mydeb9-91 ansible_connection=docker -mydeb9-92 ansible_connection=docker -mydeb9-93 ansible_connection=docker -mydeb9-94 ansible_connection=docker -mydeb9-95 ansible_connection=docker -mydeb9-96 ansible_connection=docker -mydeb9-97 ansible_connection=docker -mydeb9-98 ansible_connection=docker -mydeb9-99 ansible_connection=docker -mydeb9-100 ansible_connection=docker diff --git a/tests/ansible/hosts/common-hosts b/tests/ansible/hosts/common-hosts new file mode 100644 index 00000000..cf84d2d1 --- /dev/null +++ b/tests/ansible/hosts/common-hosts @@ -0,0 +1,43 @@ +# vim: syntax=dosini + + +# This must be defined explicitly, otherwise _create_implicit_localhost() +# generates its own copy, which includes an ansible_python_interpreter that +# varies according to host machine. +localhost + +[connection-delegation-test] +cd-bastion +cd-rack11 mitogen_via=ssh-user@cd-bastion +cd-rack11a mitogen_via=root@cd-rack11 +cd-rack11a-docker mitogen_via=docker-admin@cd-rack11a ansible_connection=docker + +[connection-delegation-cycle] +# Create cycle with Docker container. +cdc-bastion mitogen_via=cdc-rack11a-docker +cdc-rack11 mitogen_via=ssh-user@cdc-bastion +cdc-rack11a mitogen_via=root@cdc-rack11 +cdc-rack11a-docker mitogen_via=docker-admin@cdc-rack11a ansible_connection=docker + +[conn-delegation] +cd-user1 ansible_user=mitogen__user1 ansible_connection=mitogen_sudo mitogen_via=target + + +# Connection delegation scenarios. It's impossible to connection to them, but +# you can inspect the would-be config via "mitogen_get_stack" action. +[cd-no-connect] +# Normal inventory host, no aliasing. +cd-normal ansible_connection=mitogen_doas ansible_user=normal-user +# Inventory host that is really a different host. +cd-alias ansible_connection=ssh ansible_user=alias-user ansible_host=alias-host + +# Via one normal host. +cd-normal-normal mitogen_via=cd-normal +# Via one aliased host. +cd-normal-alias mitogen_via=cd-alias + +# newuser@host via host with explicit username. +cd-newuser-normal-normal mitogen_via=cd-normal ansible_user=newuser-normal-normal-user + +# doas:newuser via host. +cd-newuser-doas-normal mitogen_via=cd-normal ansible_connection=mitogen_doas ansible_user=newuser-doas-normal-user diff --git a/tests/ansible/hosts b/tests/ansible/hosts/connection-delegation similarity index 94% rename from tests/ansible/hosts rename to tests/ansible/hosts/connection-delegation index 45bfb9ef..2fb87455 100644 --- a/tests/ansible/hosts +++ b/tests/ansible/hosts/connection-delegation @@ -1,7 +1,3 @@ - -[test-targets] -localhost - [connection-delegation-test] cd-bastion cd-rack11 mitogen_via=ssh-user@cd-bastion diff --git a/tests/ansible/hosts/group_vars/osa-all-containers.yml b/tests/ansible/hosts/group_vars/osa-all-containers.yml new file mode 100644 index 00000000..4f38fcb4 --- /dev/null +++ b/tests/ansible/hosts/group_vars/osa-all-containers.yml @@ -0,0 +1,4 @@ +--- + +ansible_connection: setns +mitogen_kind: lxc diff --git a/tests/ansible/hosts/issue340 b/tests/ansible/hosts/issue340 new file mode 100644 index 00000000..3caa95a9 --- /dev/null +++ b/tests/ansible/hosts/issue340 @@ -0,0 +1,12 @@ +# Connection Delegation issue #340 reproduction. +# Path to jails is SSH to H -> mitogen_sudo to root -> jail to J + +[issue340] +# 'target' plays the role of the normal host machine H. +# 'mitogen__sudo1' plays the role of root@H via mitogen_sudo. +# 'mitogen__user1' plays the role of root@J via mitogen__user1. +# 'mitogen__user2' plays the role of E, the delgate_to target for certs. + +i340-root ansible_user=mitogen__sudo1 ansible_connection=mitogen_sudo mitogen_via=target +i340-jail ansible_user=mitogen__user1 ansible_connection=mitogen_sudo mitogen_via=i340-root +i340-certs ansible_user=mitogen__user2 ansible_connection=mitogen_sudo mitogen_via=target diff --git a/tests/ansible/hosts/k3 b/tests/ansible/hosts/k3 new file mode 100644 index 00000000..1a7190d8 --- /dev/null +++ b/tests/ansible/hosts/k3 @@ -0,0 +1,25 @@ +k3 + +[k3-x10] +k3-[01:10] + +[k3-x20] +k3-[01:20] + +[k3-x50] +k3-[01:50] + +[k3-x100] +k3-[001:100] + +[k3-x200] +k3-[001:200] + +[k3-x300] +k3-[001:300] + +[k3-x400] +k3-[001:400] + +[k3-x500] +k3-[001:500] diff --git a/tests/ansible/hosts/localhost b/tests/ansible/hosts/localhost new file mode 100644 index 00000000..f4dab2ab --- /dev/null +++ b/tests/ansible/hosts/localhost @@ -0,0 +1,8 @@ +localhost +target ansible_host=localhost + +[test-targets] +target + +[localhost-x10] +localhost-[01:10] diff --git a/tests/ansible/hosts/nessy b/tests/ansible/hosts/nessy new file mode 100644 index 00000000..5cdef123 --- /dev/null +++ b/tests/ansible/hosts/nessy @@ -0,0 +1,10 @@ +nessy + +[nessy-x10] +nessy-[00:10] + +[nessy-x20] +nessy-[00:20] + +[nessy-x50] +nessy-[00:50] diff --git a/tests/ansible/hosts/osa-containers b/tests/ansible/hosts/osa-containers new file mode 100644 index 00000000..7ff2c2b6 --- /dev/null +++ b/tests/ansible/hosts/osa-containers @@ -0,0 +1,9 @@ +# integration/delegation/delegate_to_container.yml + +# Patterned after openstack-ansible/all_containers.yml +osa-host-machine ansible_host=172.29.236.100 + +[osa-all-containers] +osa-container-1 container_tech=lxc +osa-container-2 container_tech=lxc +osa-container-3 container_tech=lxc diff --git a/tests/ansible/integration/action/all.yml b/tests/ansible/integration/action/all.yml index ebbff26a..018973a9 100644 --- a/tests/ansible/integration/action/all.yml +++ b/tests/ansible/integration/action/all.yml @@ -1,5 +1,9 @@ -- import_playbook: remote_file_exists.yml -- import_playbook: remote_expand_user.yml +- import_playbook: copy.yml +- import_playbook: fixup_perms2__copy.yml - import_playbook: low_level_execute_command.yml - import_playbook: make_tmp_path.yml +- import_playbook: remote_expand_user.yml +- import_playbook: remote_file_exists.yml +- import_playbook: remove_tmp_path.yml +- import_playbook: synchronize.yml - import_playbook: transfer_data.yml diff --git a/tests/ansible/integration/action/copy.yml b/tests/ansible/integration/action/copy.yml new file mode 100644 index 00000000..d799be90 --- /dev/null +++ b/tests/ansible/integration/action/copy.yml @@ -0,0 +1,83 @@ +# Verify copy module for small and large files, and inline content. + +- name: integration/action/synchronize.yml + hosts: test-targets + any_errors_fatal: true + tasks: + - copy: + dest: /tmp/copy-tiny-file + content: + this is a tiny file. + connection: local + + - copy: + dest: /tmp/copy-large-file + # Must be larger than Connection.SMALL_SIZE_LIMIT. + content: "{% for x in range(200000) %}x{% endfor %}" + connection: local + + # end of making files + + - file: + state: absent + path: "{{item}}" + with_items: + - /tmp/copy-tiny-file.out + - /tmp/copy-large-file.out + - /tmp/copy-tiny-inline-file.out + - /tmp/copy-large-inline-file.out + + # end of cleaning out files + + - copy: + dest: /tmp/copy-large-file.out + src: /tmp/copy-large-file + + - copy: + dest: /tmp/copy-tiny-file.out + src: /tmp/copy-tiny-file + + - copy: + dest: /tmp/copy-tiny-inline-file.out + content: "tiny inline content" + + - copy: + dest: /tmp/copy-large-inline-file.out + content: | + {% for x in range(200000) %}y{% endfor %} + + # stat results + + - stat: + path: "{{item}}" + with_items: + - /tmp/copy-tiny-file.out + - /tmp/copy-large-file.out + - /tmp/copy-tiny-inline-file.out + - /tmp/copy-large-inline-file.out + register: stat + + - assert: + that: + - stat.results[0].stat.checksum == "f29faa9a6f19a700a941bf2aa5b281643c4ec8a0" + - stat.results[1].stat.checksum == "62951f943c41cdd326e5ce2b53a779e7916a820d" + - stat.results[2].stat.checksum == "b26dd6444595e2bdb342aa0a91721b57478b5029" + - stat.results[3].stat.checksum == "d675f47e467eae19e49032a2cc39118e12a6ee72" + + - file: + state: absent + path: "{{item}}" + with_items: + - /tmp/copy-tiny-file + - /tmp/copy-tiny-file.out + - /tmp/copy-no-mode + - /tmp/copy-no-mode.out + - /tmp/copy-with-mode + - /tmp/copy-with-mode.out + - /tmp/copy-large-file + - /tmp/copy-large-file.out + - /tmp/copy-tiny-inline-file.out + - /tmp/copy-large-inline-file + - /tmp/copy-large-inline-file.out + + # end of cleaning out files (again) diff --git a/tests/ansible/integration/action/fixup_perms2__copy.yml b/tests/ansible/integration/action/fixup_perms2__copy.yml new file mode 100644 index 00000000..c92b158e --- /dev/null +++ b/tests/ansible/integration/action/fixup_perms2__copy.yml @@ -0,0 +1,117 @@ +# Verify action plugins still set file modes correctly even though +# fixup_perms2() avoids setting execute bit despite being asked to. + +- name: integration/action/fixup_perms2__copy.yml + hosts: test-targets + any_errors_fatal: true + tasks: + - name: Get default remote file mode + shell: python -c 'import os; print("%04o" % (int("0666", 8) & ~os.umask(0)))' + register: py_umask + + - name: Set default file mode + set_fact: + mode: "{{py_umask.stdout}}" + + # + # copy module (no mode). + # + + - name: "Copy files (no mode)" + copy: + content: "" + dest: /tmp/copy-no-mode + + - stat: path=/tmp/copy-no-mode + register: out + - assert: + that: + - out.stat.mode == mode + + # + # copy module (explicit mode). + # + + - name: "Copy files from content: arg" + copy: + content: "" + mode: 0400 + dest: /tmp/copy-with-mode + + - stat: path=/tmp/copy-with-mode + register: out + - assert: + that: + - out.stat.mode == "0400" + + # + # copy module (existing disk files, no mode). + # + + - file: + path: /tmp/weird-mode.out + state: absent + + - name: Create local test file. + connection: local + copy: + content: "weird mode" + dest: "/tmp/weird-mode" + mode: "1462" + + - copy: + src: "/tmp/weird-mode" + dest: "/tmp/weird-mode.out" + + - stat: + path: "/tmp/weird-mode.out" + register: out + - assert: + that: + - out.stat.mode == mode + + # + # copy module (existing disk files, preserve mode). + # + + - copy: + src: "/tmp/weird-mode" + dest: "/tmp/weird-mode" + mode: preserve + + - stat: + path: "/tmp/weird-mode" + register: out + - assert: + that: + - out.stat.mode == "1462" + + # + # copy module (existing disk files, explicit mode). + # + + - copy: + src: "/tmp/weird-mode" + dest: "/tmp/weird-mode" + mode: "1461" + + - stat: + path: "/tmp/weird-mode" + register: out + + - assert: + that: + - out.stat.mode == "1461" + + - file: + state: absent + path: "{{item}}" + with_items: + - /tmp/weird-mode + - /tmp/weird-mode.out + - /tmp/copy-no-mode + - /tmp/copy-no-mode.out + - /tmp/copy-with-mode + - /tmp/copy-with-mode.out + + # end of cleaning out files diff --git a/tests/ansible/integration/action/low_level_execute_command.yml b/tests/ansible/integration/action/low_level_execute_command.yml index 842d99d2..a42fa877 100644 --- a/tests/ansible/integration/action/low_level_execute_command.yml +++ b/tests/ansible/integration/action/low_level_execute_command.yml @@ -23,7 +23,6 @@ register: raw # Can't test stdout because TTY inserts \r in Ansible version. - - debug: msg={{raw}} - name: Verify raw module output. assert: that: diff --git a/tests/ansible/integration/action/make_tmp_path.yml b/tests/ansible/integration/action/make_tmp_path.yml index 83261208..0631727d 100644 --- a/tests/ansible/integration/action/make_tmp_path.yml +++ b/tests/ansible/integration/action/make_tmp_path.yml @@ -1,63 +1,156 @@ +# +# Ensure _make_tmp_path returns the same result across invocations for a single +# user account, and that the path returned cleans itself up on connection +# termination. +# +# Related bugs prior to the new-style handling: +# https://github.com/dw/mitogen/issues/239 +# https://github.com/dw/mitogen/issues/301 - name: integration/action/make_tmp_path.yml hosts: test-targets any_errors_fatal: true tasks: - - name: "Find out root's homedir." - # Runs first because it blats regular Ansible facts with junk, so - # non-become run fixes that up. - setup: gather_subset=min - become: true - register: root_facts - - - name: "Find regular homedir" - setup: gather_subset=min - register: user_facts + - meta: end_play + when: not is_mitogen # - # non-become + # non-root # - - action_passthrough: + - name: "Find regular temp path" + action_passthrough: method: _make_tmp_path + register: tmp_path + + - name: "Find regular temp path (new task)" + action_passthrough: + method: _make_tmp_path + register: tmp_path2 + + - name: "Find good temp path" + set_fact: + good_temp_path: "{{tmp_path.result|dirname}}" + + - name: "Find good temp path (new task)" + set_fact: + good_temp_path2: "{{tmp_path2.result|dirname}}" + + - name: "Verify common base path for both tasks" + assert: + that: + - good_temp_path == good_temp_path2 + + - name: "Verify different subdir for both tasks" + assert: + that: + - tmp_path.result != tmp_path2.result + + # + # Verify subdirectory removal. + # + + - name: Stat temp path + stat: + path: "{{tmp_path.result}}" + register: stat1 + + - name: Stat temp path (new task) + stat: + path: "{{tmp_path2.result}}" + register: stat2 + + - name: "Verify neither subdir exists any more" + assert: + that: + - not stat1.stat.exists + - not stat2.stat.exists + + # + # Verify good directory persistence. + # + + - name: Stat good temp path (new task) + stat: + path: "{{good_temp_path}}" + register: stat + + - name: "Verify good temp path is persistent" + assert: + that: + - stat.stat.exists + + # + # Write some junk into the temp path. + # + + - name: "Write junk to temp path and verify it disappears" + custom_python_run_script: + script: | + from ansible.module_utils.basic import get_module_path + path = get_module_path() + '/foo.txt' + result['path'] = path + open(path, 'w').write("bar") + register: out + + - name: "Verify junk disappeared." + stat: + path: "{{out.path}}" register: out - assert: - # This string must match ansible.cfg::remote_tmp - that: out.result.startswith("{{user_facts.ansible_facts.ansible_user_dir}}/.ansible/mitogen-tests/") + that: + - not out.stat.exists - - stat: - path: "{{out.result}}" - register: st + # + # root + # - - assert: - that: st.stat.exists and st.stat.isdir and st.stat.mode == "0700" + - name: "Find root temp path" + become: true + action_passthrough: + method: _make_tmp_path + register: tmp_path_root - - file: - path: "{{out.result}}" - state: absent + - name: "Verify root temp path differs from regular path" + assert: + that: + - tmp_path2.result != tmp_path_root.result # - # become. make_tmp_path() must evaluate HOME in the context of the SSH - # user, not the become user. + # readonly homedir # - - action_passthrough: - method: _make_tmp_path - register: out + - name: "Try writing to temp directory for the readonly_homedir user" become: true + become_user: mitogen__readonly_homedir + custom_python_run_script: + script: | + from ansible.module_utils.basic import get_module_path + path = get_module_path() + '/foo.txt' + result['path'] = path + open(path, 'w').write("bar") + register: tmp_path - - assert: - # This string must match ansible.cfg::remote_tmp - that: out.result.startswith("{{user_facts.ansible_facts.ansible_user_dir}}/.ansible/mitogen-tests/") + # + # modules get the same base dir + # - - stat: - path: "{{out.result}}" - register: st + - name: "Verify modules get the same tmpdir as the action plugin" + custom_python_detect_environment: + register: out - - assert: - that: st.stat.exists and st.stat.isdir and st.stat.mode == "0700" + # v2.6 related: https://github.com/ansible/ansible/pull/39833 + - name: "Verify modules get the same tmpdir as the action plugin (<2.5)" + when: ansible_version.full < '2.5' + assert: + that: + - out.module_path.startswith(good_temp_path2) + - out.module_tmpdir == None - - file: - path: "{{out.result}}" - state: absent + - name: "Verify modules get the same tmpdir as the action plugin (>2.5)" + when: ansible_version.full > '2.5' + assert: + that: + - out.module_path.startswith(good_temp_path2) + - out.module_tmpdir.startswith(good_temp_path2) diff --git a/tests/ansible/integration/action/remove_tmp_path.yml b/tests/ansible/integration/action/remove_tmp_path.yml new file mode 100644 index 00000000..566e4f3f --- /dev/null +++ b/tests/ansible/integration/action/remove_tmp_path.yml @@ -0,0 +1,40 @@ +# +# Ensure _remove_tmp_path cleans up the temporary path. +# +# +- name: integration/action/remove_tmp_path.yml + hosts: test-targets + any_errors_fatal: true + tasks: + - meta: end_play + when: not is_mitogen + + # + # Use the copy module to cause a temporary directory to be created, and + # return a result with a 'src' attribute pointing into that directory. + # + + - copy: + dest: /tmp/remove_tmp_path_test + content: "{{ 123123 | random }}" + register: out + + - stat: + path: "{{out.src}}" + register: out2 + + - assert: + that: + - not out2.stat.exists + + - stat: + path: "{{out.src|dirname}}" + register: out2 + + - assert: + that: + - not out2.stat.exists + + - file: + path: /tmp/remove_tmp_path_test + state: absent diff --git a/tests/ansible/integration/action/synchronize.yml b/tests/ansible/integration/action/synchronize.yml new file mode 100644 index 00000000..25649fbf --- /dev/null +++ b/tests/ansible/integration/action/synchronize.yml @@ -0,0 +1,58 @@ +# Verify basic operation of the synchronize module. + +- name: integration/action/synchronize.yml + hosts: test-targets + any_errors_fatal: true + vars: + ansible_user: mitogen__has_sudo_pubkey + ansible_ssh_private_key_file: /tmp/synchronize-action-key + tasks: + # must copy git file to set proper file mode. + - copy: + dest: /tmp/synchronize-action-key + src: ../../../data/docker/mitogen__has_sudo_pubkey.key + mode: u=rw,go= + connection: local + + - file: + path: /tmp/sync-test + state: absent + connection: local + + - file: + path: /tmp/sync-test + state: directory + connection: local + + - copy: + dest: /tmp/sync-test/item + content: "item!" + connection: local + + - file: + path: /tmp/sync-test.out + state: absent + become: true + + - synchronize: + private_key: /tmp/synchronize-action-key + dest: /tmp/sync-test.out + src: /tmp/sync-test/ + + - slurp: + src: /tmp/sync-test.out/item + register: out + + - set_fact: outout="{{out.content|b64decode}}" + + - assert: + that: outout == "item!" + + - file: + path: "{{item}}" + state: absent + become: true + with_items: + - /tmp/synchronize-action-key + - /tmp/sync-test + - /tmp/sync-test.out diff --git a/tests/ansible/integration/action/transfer_data.yml b/tests/ansible/integration/action/transfer_data.yml index c6845cff..bbd39309 100644 --- a/tests/ansible/integration/action/transfer_data.yml +++ b/tests/ansible/integration/action/transfer_data.yml @@ -37,8 +37,6 @@ src: /tmp/transfer-data register: out - - debug: msg={{out}} - - assert: that: out.content|b64decode == 'I am text.' diff --git a/tests/ansible/integration/all.yml b/tests/ansible/integration/all.yml index 264ae716..e9a12ec8 100644 --- a/tests/ansible/integration/all.yml +++ b/tests/ansible/integration/all.yml @@ -6,13 +6,14 @@ - import_playbook: action/all.yml - import_playbook: async/all.yml - import_playbook: become/all.yml +- import_playbook: connection/all.yml - import_playbook: connection_loader/all.yml - import_playbook: context_service/all.yml +- import_playbook: delegation/all.yml +- import_playbook: glibc_caches/all.yml - import_playbook: local/all.yml -#- import_playbook: module_utils/all.yml +- import_playbook: module_utils/all.yml - import_playbook: playbook_semantics/all.yml -- import_playbook: remote_tmp/all.yml - import_playbook: runner/all.yml - import_playbook: ssh/all.yml - import_playbook: strategy/all.yml -- import_playbook: glibc_caches/all.yml diff --git a/tests/ansible/integration/async/result_binary_producing_json.yml b/tests/ansible/integration/async/result_binary_producing_json.yml index 61d63a08..f81d0bb2 100644 --- a/tests/ansible/integration/async/result_binary_producing_json.yml +++ b/tests/ansible/integration/async/result_binary_producing_json.yml @@ -5,10 +5,21 @@ any_errors_fatal: true tasks: - - custom_binary_producing_json: - async: 100 - poll: 0 - register: job + - block: + - custom_binary_producing_json_Darwin: + async: 100 + poll: 0 + register: job_darwin + - set_fact: job={{job_darwin}} + when: ansible_system == "Darwin" + + - block: + - custom_binary_producing_json_Linux: + async: 100 + poll: 0 + register: job_linux + - set_fact: job={{job_linux}} + when: ansible_system == "Linux" - assert: that: | @@ -30,9 +41,9 @@ src: "{{ansible_user_dir}}/.ansible_async/{{job.ansible_job_id}}" register: result - - debug: msg={{async_out}} - vars: - async_out: "{{result.content|b64decode|from_json}}" + #- debug: msg={{async_out}} + #vars: + #async_out: "{{result.content|b64decode|from_json}}" - assert: that: diff --git a/tests/ansible/integration/async/result_binary_producing_junk.yml b/tests/ansible/integration/async/result_binary_producing_junk.yml index 37f31704..87877db7 100644 --- a/tests/ansible/integration/async/result_binary_producing_junk.yml +++ b/tests/ansible/integration/async/result_binary_producing_junk.yml @@ -5,10 +5,21 @@ any_errors_fatal: true tasks: - - custom_binary_producing_junk: - async: 100 - poll: 0 - register: job + - block: + - custom_binary_producing_junk_Darwin: + async: 100 + poll: 0 + register: job_darwin + - set_fact: job={{job_darwin}} + when: ansible_system == "Darwin" + + - block: + - custom_binary_producing_junk_Linux: + async: 100 + poll: 0 + register: job_linux + - set_fact: job={{job_linux}} + when: ansible_system == "Linux" - shell: sleep 1 @@ -16,9 +27,9 @@ src: "{{ansible_user_dir}}/.ansible_async/{{job.ansible_job_id}}" register: result - - debug: msg={{async_out}} - vars: - async_out: "{{result.content|b64decode|from_json}}" + #- debug: msg={{async_out}} + #vars: + #async_out: "{{result.content|b64decode|from_json}}" - assert: that: diff --git a/tests/ansible/integration/async/result_shell_echo_hi.yml b/tests/ansible/integration/async/result_shell_echo_hi.yml index 77678318..8858037a 100644 --- a/tests/ansible/integration/async/result_shell_echo_hi.yml +++ b/tests/ansible/integration/async/result_shell_echo_hi.yml @@ -16,9 +16,9 @@ src: "{{ansible_user_dir}}/.ansible_async/{{job.ansible_job_id}}" register: result - - debug: msg={{async_out}} - vars: - async_out: "{{result.content|b64decode|from_json}}" + #- debug: msg={{async_out}} + #vars: + #async_out: "{{result.content|b64decode|from_json}}" - assert: that: diff --git a/tests/ansible/integration/become/sudo_flags_failure.yml b/tests/ansible/integration/become/sudo_flags_failure.yml index 484134c5..52404019 100644 --- a/tests/ansible/integration/become/sudo_flags_failure.yml +++ b/tests/ansible/integration/become/sudo_flags_failure.yml @@ -11,12 +11,11 @@ vars: ansible_become_flags: --derps - - debug: msg={{out}} - name: Verify raw module output. assert: that: - out.failed - | ('sudo: no such option: --derps' in out.msg) or - ("sudo: unrecognized option `--derps'" in out.module_stderr) or + ("sudo: unrecognized option `--derps'" in out.module_stderr) or ("sudo: unrecognized option '--derps'" in out.module_stderr) diff --git a/tests/ansible/integration/connection/_put_file.yml b/tests/ansible/integration/connection/_put_file.yml new file mode 100644 index 00000000..a0fea4ed --- /dev/null +++ b/tests/ansible/integration/connection/_put_file.yml @@ -0,0 +1,22 @@ +--- + +- shell: dd if=/dev/urandom of=/tmp/{{file_name}} bs=1024 count={{file_size}} + args: + creates: /tmp/{{file_name}} + connection: local + +- copy: + dest: /tmp/{{file_name}}.out + src: /tmp/{{file_name}} + +- stat: path=/tmp/{{file_name}} + register: original + connection: local + +- stat: path=/tmp/{{file_name}}.out + register: copied + +- assert: + that: + - original.stat.checksum == copied.stat.checksum + - original.stat.mtime|int == copied.stat.mtime|int diff --git a/tests/ansible/integration/connection/all.yml b/tests/ansible/integration/connection/all.yml new file mode 100644 index 00000000..123e11c4 --- /dev/null +++ b/tests/ansible/integration/connection/all.yml @@ -0,0 +1,5 @@ +--- + +- import_playbook: exec_command.yml +- import_playbook: put_small_file.yml +- import_playbook: put_large_file.yml diff --git a/tests/ansible/integration/connection/exec_command.yml b/tests/ansible/integration/connection/exec_command.yml new file mode 100644 index 00000000..6a632961 --- /dev/null +++ b/tests/ansible/integration/connection/exec_command.yml @@ -0,0 +1,19 @@ +# Test basic functinality of exec_command. +--- + +- name: integration/connection/exec_command.yml + hosts: test-targets + gather_facts: no + any_errors_fatal: true + tasks: + - connection_passthrough: + method: exec_command + kwargs: + cmd: echo "hello, world" + register: out + + - assert: + that: + - out.result[0] == 0 + - out.result[1] == "hello, world\r\n" + - out.result[2].startswith("Shared connection to ") diff --git a/tests/ansible/integration/connection/put_large_file.yml b/tests/ansible/integration/connection/put_large_file.yml new file mode 100644 index 00000000..210c5d6a --- /dev/null +++ b/tests/ansible/integration/connection/put_large_file.yml @@ -0,0 +1,12 @@ +# Test transfers made via FileService. +--- + +- name: integration/connection/put_large_file.yml + hosts: test-targets + gather_facts: no + any_errors_fatal: true + vars: + file_name: large-file + file_size: 512 + tasks: + - include_tasks: _put_file.yml diff --git a/tests/ansible/integration/connection/put_small_file.yml b/tests/ansible/integration/connection/put_small_file.yml new file mode 100644 index 00000000..aa6cc0d7 --- /dev/null +++ b/tests/ansible/integration/connection/put_small_file.yml @@ -0,0 +1,12 @@ +# Test small transfers made via RPC. +--- + +- name: integration/connection/put_small_file.yml + hosts: test-targets + gather_facts: no + any_errors_fatal: true + vars: + file_name: small-file + file_size: 123 + tasks: + - include_tasks: _put_file.yml diff --git a/tests/ansible/integration/delegation/all.yml b/tests/ansible/integration/delegation/all.yml new file mode 100644 index 00000000..743ce157 --- /dev/null +++ b/tests/ansible/integration/delegation/all.yml @@ -0,0 +1,4 @@ +- import_playbook: delegate_to_template.yml +- import_playbook: osa_container_standalone.yml +- import_playbook: osa_delegate_to_self.yml +- import_playbook: stack_construction.yml diff --git a/tests/ansible/integration/delegation/delegate_to_template.yml b/tests/ansible/integration/delegation/delegate_to_template.yml new file mode 100644 index 00000000..2f0830c4 --- /dev/null +++ b/tests/ansible/integration/delegation/delegate_to_template.yml @@ -0,0 +1,69 @@ +# Ensure templated delegate_to field works. + +- name: integration/delegation/delegate_to_template.yml + vars: + physical_host: "cd-normal-alias" + physical_hosts: ["cd-normal-alias", "cd-normal-normal"] + hosts: test-targets + gather_facts: no + any_errors_fatal: true + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + delegate_to: "{{ physical_host }}" + register: out + + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'alias-host', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': None, + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': 'alias-user', + }, + 'method': 'ssh', + }, + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'cd-normal-alias', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': None, + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': None, + }, + 'method': 'ssh', + } + ] diff --git a/tests/ansible/integration/delegation/osa_container_standalone.yml b/tests/ansible/integration/delegation/osa_container_standalone.yml new file mode 100644 index 00000000..b942ef63 --- /dev/null +++ b/tests/ansible/integration/delegation/osa_container_standalone.yml @@ -0,0 +1,28 @@ +# Verify one OSA-style container has the correct config. + +- name: integration/delegation/container_standalone.yml + hosts: dtc-container-1 + gather_facts: false + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + register: out + + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'container': 'dtc-container-1', + 'docker_path': None, + 'kind': 'lxc', + 'lxc_info_path': None, + 'machinectl_path': None, + 'python_path': ['/usr/bin/python'], + 'username': None, + }, + 'method': 'setns', + }, + ] diff --git a/tests/ansible/integration/delegation/osa_delegate_to_self.yml b/tests/ansible/integration/delegation/osa_delegate_to_self.yml new file mode 100644 index 00000000..0915bbb8 --- /dev/null +++ b/tests/ansible/integration/delegation/osa_delegate_to_self.yml @@ -0,0 +1,31 @@ +# OSA: Verify delegating the connection back to the container succeeds. + +- name: integration/delegation/osa_delegate_to_self.yml + hosts: osa-container-1 + vars: + target: osa-container-1 + gather_facts: false + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + delegate_to: "{{target}}" + register: out + + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'container': 'osa-container-1', + 'docker_path': None, + 'kind': 'lxc', + 'lxc_info_path': None, + 'machinectl_path': None, + 'python_path': None, + 'username': None, + }, + 'method': 'setns', + }, + ] diff --git a/tests/ansible/integration/delegation/stack_construction.yml b/tests/ansible/integration/delegation/stack_construction.yml new file mode 100644 index 00000000..4d9c75f4 --- /dev/null +++ b/tests/ansible/integration/delegation/stack_construction.yml @@ -0,0 +1,371 @@ +# https://github.com/dw/mitogen/issues/251 + +# ansible_mitogen.connection internally reinterprets Ansible state into a +# 'connection stack' -- this is just a list of dictionaries specifying a +# sequence of proxied Router connection methods and their kwargs used to +# establish the connection. That list is passed to ContextService, which loops +# over the stack specifying via=(None or previous entry) for each connection +# method. + +# mitogen_get_stack is a magic action that returns the stack, so we can test +# all kinds of scenarios without actually needing a real environmnt. + +# Updating this file? Install 'pprintpp' and hack lib/callbacks/nice_stdout.py +# to use it instead of the built-in function, then simply s/'/'/ to get the +# cutpasteable formatted dicts below. WARNING: remove the trailing comma from +# the result list element, it seems to cause assert to silently succeed! + + +- name: integration/delegation/stack_construction.yml + hosts: cd-normal + tasks: + - meta: end_play + when: not is_mitogen + + # used later for local_action test. + - local_action: custom_python_detect_environment + register: local_env + + +- hosts: cd-normal + any_errors_fatal: true + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + register: out + - assert: + that: | + out.result == [ + { + "kwargs": { + "connect_timeout": 10, + "doas_path": None, + "password": None, + "python_path": ["/usr/bin/python"], + "username": "normal-user", + }, + "method": "doas", + } + ] + + +- hosts: cd-normal + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + delegate_to: cd-alias + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'alias-host', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': None, + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': 'alias-user', + }, + 'method': 'ssh', + }, + ] + + +- hosts: cd-alias + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'alias-host', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': ['/usr/bin/python'], + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': 'alias-user', + }, + 'method': 'ssh', + }, + ] + + +- hosts: cd-normal-normal + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'connect_timeout': 10, + 'doas_path': None, + 'password': None, + 'python_path': None, + 'username': 'normal-user', + }, + 'method': 'doas', + }, + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'cd-normal-normal', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': ['/usr/bin/python'], + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': None, + }, + 'method': 'ssh', + }, + ] + + +- hosts: cd-normal-alias + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'alias-host', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': None, + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': 'alias-user', + }, + 'method': 'ssh', + }, + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'cd-normal-alias', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': ['/usr/bin/python'], + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': None, + }, + 'method': 'ssh', + }, + ] + + +- hosts: cd-newuser-normal-normal + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'connect_timeout': 10, + 'doas_path': None, + 'password': None, + 'python_path': None, + 'username': 'normal-user', + }, + 'method': 'doas', + }, + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'cd-newuser-normal-normal', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': ['/usr/bin/python'], + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': 'newuser-normal-normal-user', + }, + 'method': 'ssh', + }, + ] + + +- hosts: cd-newuser-normal-normal + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + delegate_to: cd-alias + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'check_host_keys': 'ignore', + 'connect_timeout': 10, + 'hostname': 'alias-host', + 'identities_only': False, + 'identity_file': None, + 'password': None, + 'port': None, + 'python_path': None, + 'ssh_args': [ + '-o', + 'ForwardAgent=yes', + '-o', + 'ControlMaster=auto', + '-o', + 'ControlPersist=60s', + ], + 'ssh_debug_level': None, + 'ssh_path': 'ssh', + 'username': 'alias-user', + }, + 'method': 'ssh', + }, + ] + + +- hosts: cd-newuser-normal-normal + tasks: + - meta: end_play + when: not is_mitogen + + - local_action: mitogen_get_stack + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'python_path': None + }, + 'method': 'local', + }, + ] + + +- hosts: cd-newuser-doas-normal + tasks: + - meta: end_play + when: not is_mitogen + + - mitogen_get_stack: + register: out + - assert: + that: | + out.result == [ + { + 'kwargs': { + 'connect_timeout': 10, + 'doas_path': None, + 'password': None, + 'python_path': None, + 'username': 'normal-user', + }, + 'method': 'doas', + }, + { + 'kwargs': { + 'connect_timeout': 10, + 'doas_path': None, + 'password': None, + 'python_path': ['/usr/bin/python'], + 'username': 'newuser-doas-normal-user', + }, + 'method': 'doas', + }, + ] diff --git a/tests/ansible/integration/glibc_caches/resolv_conf.yml b/tests/ansible/integration/glibc_caches/resolv_conf.yml index d1a466e9..643b83ec 100644 --- a/tests/ansible/integration/glibc_caches/resolv_conf.yml +++ b/tests/ansible/integration/glibc_caches/resolv_conf.yml @@ -9,7 +9,6 @@ ansible_become_pass: has_sudo_pubkey_password tasks: - - debug: msg={{hostvars}} - mitogen_test_gethostbyname: name: www.google.com register: out diff --git a/tests/ansible/integration/module_utils/adjacent_to_playbook.yml b/tests/ansible/integration/module_utils/adjacent_to_playbook.yml index 34cf1c5d..63bd90b2 100644 --- a/tests/ansible/integration/module_utils/adjacent_to_playbook.yml +++ b/tests/ansible/integration/module_utils/adjacent_to_playbook.yml @@ -9,7 +9,6 @@ - custom_python_external_module: register: out - - debug: msg={{out}} - assert: that: - out.external1_path == "ansible/integration/module_utils/module_utils/external1.py" diff --git a/tests/ansible/integration/module_utils/all.yml b/tests/ansible/integration/module_utils/all.yml index 920b5d1c..c8b8f2fb 100644 --- a/tests/ansible/integration/module_utils/all.yml +++ b/tests/ansible/integration/module_utils/all.yml @@ -1,6 +1,6 @@ -- import_playbook: from_config_path.yml -- import_playbook: from_config_path_pkg.yml -- import_playbook: adjacent_to_playbook.yml +#- import_playbook: from_config_path.yml +#- import_playbook: from_config_path_pkg.yml +#- import_playbook: adjacent_to_playbook.yml - import_playbook: adjacent_to_role.yml -- import_playbook: overrides_builtin.yml +#- import_playbook: overrides_builtin.yml diff --git a/tests/ansible/integration/module_utils/roles/modrole/tasks/main.yml b/tests/ansible/integration/module_utils/roles/modrole/tasks/main.yml index 857abae5..2c7c3372 100644 --- a/tests/ansible/integration/module_utils/roles/modrole/tasks/main.yml +++ b/tests/ansible/integration/module_utils/roles/modrole/tasks/main.yml @@ -3,7 +3,6 @@ - uses_external3: register: out -- debug: msg={{out}} - assert: that: - out.external3_path == "integration/module_utils/roles/modrole/module_utils/external3.py" diff --git a/tests/ansible/integration/module_utils/roles/overrides_modrole/tasks/main.yml b/tests/ansible/integration/module_utils/roles/overrides_modrole/tasks/main.yml index 24717693..6ef4703a 100644 --- a/tests/ansible/integration/module_utils/roles/overrides_modrole/tasks/main.yml +++ b/tests/ansible/integration/module_utils/roles/overrides_modrole/tasks/main.yml @@ -3,7 +3,6 @@ - uses_custom_known_hosts: register: out -- debug: msg={{out}} - assert: that: - out.path == "ansible/integration/module_utils/roles/override_modrole/module_utils/known_hosts.py" diff --git a/tests/ansible/integration/playbook_semantics/environment.yml b/tests/ansible/integration/playbook_semantics/environment.yml index 1c183a5a..1ac7f71d 100644 --- a/tests/ansible/integration/playbook_semantics/environment.yml +++ b/tests/ansible/integration/playbook_semantics/environment.yml @@ -9,7 +9,5 @@ SOME_ENV: 123 register: result - - debug: msg={{result}} - - assert: that: "result.stdout == '123'" diff --git a/tests/ansible/integration/remote_tmp/all.yml b/tests/ansible/integration/remote_tmp/all.yml deleted file mode 100644 index 5dff88d8..00000000 --- a/tests/ansible/integration/remote_tmp/all.yml +++ /dev/null @@ -1,2 +0,0 @@ - -- import_playbook: readonly_homedir.yml diff --git a/tests/ansible/integration/remote_tmp/readonly_homedir.yml b/tests/ansible/integration/remote_tmp/readonly_homedir.yml deleted file mode 100644 index ffad455a..00000000 --- a/tests/ansible/integration/remote_tmp/readonly_homedir.yml +++ /dev/null @@ -1,20 +0,0 @@ -# https://github.com/dw/mitogen/issues/239 -# While remote_tmp is used in the context of the SSH user by action code -# running on the controller, Ansiballz ignores it and uses the system default -# instead. - -- name: integration/remote_tmp/readonly_homedir.yml - hosts: test-targets - any_errors_fatal: true - tasks: - - custom_python_detect_environment: - become: true - become_user: mitogen__readonly_homedir - register: out - vars: - ansible_become_pass: readonly_homedir_password - - - name: Verify system temp directory was used. - assert: - that: - - out.__file__.startswith("/tmp/ansible_") diff --git a/tests/ansible/integration/runner/all.yml b/tests/ansible/integration/runner/all.yml index 5242a405..9dd209d7 100644 --- a/tests/ansible/integration/runner/all.yml +++ b/tests/ansible/integration/runner/all.yml @@ -1,7 +1,8 @@ +- import_playbook: atexit.yml - import_playbook: builtin_command_module.yml +- import_playbook: custom_bash_hashbang_argument.yml - import_playbook: custom_bash_old_style_module.yml - import_playbook: custom_bash_want_json_module.yml -- import_playbook: custom_bash_hashbang_argument.yml - import_playbook: custom_binary_producing_json.yml - import_playbook: custom_binary_producing_junk.yml - import_playbook: custom_binary_single_null.yml @@ -13,4 +14,8 @@ - import_playbook: custom_python_want_json_module.yml - import_playbook: custom_script_interpreter.yml - import_playbook: environment_isolation.yml -- import_playbook: forking_behaviour.yml +- import_playbook: etc_environment.yml +- import_playbook: forking_active.yml +- import_playbook: forking_correct_parent.yml +- import_playbook: forking_inactive.yml +- import_playbook: missing_module.yml diff --git a/tests/ansible/integration/runner/atexit.yml b/tests/ansible/integration/runner/atexit.yml new file mode 100644 index 00000000..872cdd57 --- /dev/null +++ b/tests/ansible/integration/runner/atexit.yml @@ -0,0 +1,31 @@ +# issue #397: newer Ansibles rely on atexit to cleanup their temporary +# directories. Ensure atexit handlers run during runner completion. + +- name: integration/runner/atexit.yml + hosts: test-targets + gather_facts: false + any_errors_fatal: false + tasks: + + # + # Verify a run with a healthy atexit handler. Broken handlers cause an + # exception to be raised. + # + + - custom_python_run_script: + script: | + import atexit + atexit.register(lambda: + open('/tmp/atexit-was-triggered', 'w').write('yep')) + + - slurp: + path: /tmp/atexit-was-triggered + register: out + + - assert: + that: + - out.content|b64decode == "yep" + + - file: + path: /tmp/atexit-was-triggered + state: absent diff --git a/tests/ansible/integration/runner/custom_binary_producing_json.yml b/tests/ansible/integration/runner/custom_binary_producing_json.yml index 00f03f07..a3b8a224 100644 --- a/tests/ansible/integration/runner/custom_binary_producing_json.yml +++ b/tests/ansible/integration/runner/custom_binary_producing_json.yml @@ -1,11 +1,23 @@ - name: integration/runner/custom_binary_producing_json.yml hosts: test-targets any_errors_fatal: true + gather_facts: true tasks: - - custom_binary_producing_json: - foo: true - with_sequence: start=1 end={{end|default(1)}} - register: out + - block: + - custom_binary_producing_json_Darwin: + foo: true + with_sequence: start=1 end={{end|default(1)}} + register: out_darwin + - set_fact: out={{out_darwin}} + when: ansible_system == "Darwin" + + - block: + - custom_binary_producing_json_Linux: + foo: true + with_sequence: start=1 end={{end|default(1)}} + register: out_linux + - set_fact: out={{out_linux}} + when: ansible_system == "Linux" - assert: that: | diff --git a/tests/ansible/integration/runner/custom_binary_producing_junk.yml b/tests/ansible/integration/runner/custom_binary_producing_junk.yml index 93d98065..41572aad 100644 --- a/tests/ansible/integration/runner/custom_binary_producing_junk.yml +++ b/tests/ansible/integration/runner/custom_binary_producing_junk.yml @@ -1,17 +1,29 @@ - name: integration/runner/custom_binary_producing_junk.yml hosts: test-targets + gather_facts: true tasks: - - custom_binary_producing_junk: - foo: true - with_sequence: start=1 end={{end|default(1)}} - ignore_errors: true - register: out + - block: + - custom_binary_producing_junk_Darwin: + foo: true + with_sequence: start=1 end={{end|default(1)}} + ignore_errors: true + register: out_darwin + - set_fact: out={{out_darwin}} + when: ansible_system == "Darwin" + + - block: + - custom_binary_producing_junk_Linux: + foo: true + with_sequence: start=1 end={{end|default(1)}} + ignore_errors: true + register: out_linux + - set_fact: out={{out_linux}} + when: ansible_system == "Linux" - hosts: test-targets any_errors_fatal: true tasks: - - debug: msg={{out}} - assert: that: | out.failed and diff --git a/tests/ansible/integration/runner/etc_environment.yml b/tests/ansible/integration/runner/etc_environment.yml new file mode 100644 index 00000000..0037698a --- /dev/null +++ b/tests/ansible/integration/runner/etc_environment.yml @@ -0,0 +1,80 @@ +# issue #338: ensure /etc/environment is reloaded if it changes. +# Actually this test uses ~/.pam_environment, which is using the same logic, +# but less likely to brick a development workstation + +- name: integration/runner/etc_environment.yml + hosts: test-targets + any_errors_fatal: true + gather_facts: true + tasks: + # ~/.pam_environment + + - file: + path: ~/.pam_environment + state: absent + + - shell: echo $MAGIC_PAM_ENV + register: echo + + - assert: + that: echo.stdout == "" + + - copy: + dest: ~/.pam_environment + content: | + MAGIC_PAM_ENV=321 + + - shell: echo $MAGIC_PAM_ENV + register: echo + + - assert: + that: echo.stdout == "321" + + - file: + path: ~/.pam_environment + state: absent + + - shell: echo $MAGIC_PAM_ENV + register: echo + + - assert: + that: echo.stdout == "" + + + # /etc/environment + - meta: end_play + when: ansible_virtualization_type != "docker" + + - file: + path: /etc/environment + state: absent + become: true + + - shell: echo $MAGIC_ETC_ENV + register: echo + + - assert: + that: echo.stdout == "" + + - copy: + dest: /etc/environment + content: | + MAGIC_ETC_ENV=555 + become: true + + - shell: echo $MAGIC_ETC_ENV + register: echo + + - assert: + that: echo.stdout == "555" + + - file: + path: /etc/environment + state: absent + become: true + + - shell: echo $MAGIC_ETC_ENV + register: echo + + - assert: + that: echo.stdout == "" diff --git a/tests/ansible/integration/runner/forking_behaviour.yml b/tests/ansible/integration/runner/forking_active.yml similarity index 65% rename from tests/ansible/integration/runner/forking_behaviour.yml rename to tests/ansible/integration/runner/forking_active.yml index 7268fce0..e3e63b71 100644 --- a/tests/ansible/integration/runner/forking_behaviour.yml +++ b/tests/ansible/integration/runner/forking_active.yml @@ -1,27 +1,13 @@ - -- name: integration/runner/forking_behaviour.yml +- name: integration/runner/forking_active.yml hosts: test-targets any_errors_fatal: true tasks: - # Verify non-async jobs run in-process. + # Verify mitogen_task_isolation=fork triggers forking. - - name: get process ID. + - name: get regular process ID. custom_python_detect_environment: register: sync_proc1 - when: is_mitogen - - - name: get process ID again. - custom_python_detect_environment: - register: sync_proc2 - when: is_mitogen - - - assert: - that: - - sync_proc1.pid == sync_proc2.pid - when: is_mitogen - - # Verify mitogen_task_isolation=fork triggers forking. - name: get force-forked process ID. custom_python_detect_environment: @@ -42,3 +28,4 @@ - fork_proc1.pid != sync_proc1.pid - fork_proc1.pid != fork_proc2.pid when: is_mitogen + diff --git a/tests/ansible/integration/runner/forking_correct_parent.yml b/tests/ansible/integration/runner/forking_correct_parent.yml new file mode 100644 index 00000000..e8207676 --- /dev/null +++ b/tests/ansible/integration/runner/forking_correct_parent.yml @@ -0,0 +1,26 @@ + +- name: integration/runner/forking_correct_parent.yml + hosts: test-targets + any_errors_fatal: true + tasks: + + # Verify mitogen_task_isolation=fork forks from "virginal fork parent", not + # shared interpreter. + + - name: get regular process ID. + custom_python_detect_environment: + register: regular_proc + when: is_mitogen + + - name: get force-forked process ID again. + custom_python_detect_environment: + register: fork_proc + vars: + mitogen_task_isolation: fork + when: is_mitogen + + - assert: + that: + - fork_proc.pid != regular_proc.pid + - fork_proc.ppid != regular_proc.pid + when: is_mitogen diff --git a/tests/ansible/integration/runner/forking_inactive.yml b/tests/ansible/integration/runner/forking_inactive.yml new file mode 100644 index 00000000..b84cec7e --- /dev/null +++ b/tests/ansible/integration/runner/forking_inactive.yml @@ -0,0 +1,23 @@ +# Verify non-async jobs run in-process. + +- name: integration/runner/forking_inactive.yml + hosts: test-targets + any_errors_fatal: true + tasks: + + - name: get process ID. + custom_python_detect_environment: + register: sync_proc1 + when: is_mitogen + + - name: get process ID again. + custom_python_detect_environment: + register: sync_proc2 + when: is_mitogen + + - assert: + that: + - sync_proc1.pid == sync_proc2.pid + when: is_mitogen + + diff --git a/tests/ansible/integration/runner/missing_module.yml b/tests/ansible/integration/runner/missing_module.yml new file mode 100644 index 00000000..064a9bf8 --- /dev/null +++ b/tests/ansible/integration/runner/missing_module.yml @@ -0,0 +1,19 @@ + +- name: integration/runner/missing_module.yml + hosts: test-targets + connection: local + tasks: + - connection: local + command: | + ansible -vvv + -i "{{inventory_file}}" + test-targets + -m missing_module + args: + chdir: ../.. + register: out + ignore_errors: true + + - assert: + that: | + 'The module missing_module was not found in configured module paths.' in out.stdout diff --git a/tests/ansible/integration/ssh/variables.yml b/tests/ansible/integration/ssh/variables.yml index 110d3340..dc4fe434 100644 --- a/tests/ansible/integration/ssh/variables.yml +++ b/tests/ansible/integration/ssh/variables.yml @@ -101,6 +101,11 @@ when: is_mitogen + - name: ansible_ssh_private_key_file + shell: chmod 0600 ../data/docker/mitogen__has_sudo_pubkey.key + args: + chdir: ../.. + - name: ansible_ssh_private_key_file shell: > ANSIBLE_STRATEGY=mitogen_linear diff --git a/tests/ansible/integration/transport/README.md b/tests/ansible/integration/transport/README.md new file mode 100644 index 00000000..9a31a530 --- /dev/null +++ b/tests/ansible/integration/transport/README.md @@ -0,0 +1,2 @@ + +# Integration tests that require a real target available. diff --git a/tests/ansible/integration/transport/all.yml b/tests/ansible/integration/transport/all.yml new file mode 100644 index 00000000..89949b58 --- /dev/null +++ b/tests/ansible/integration/transport/all.yml @@ -0,0 +1,2 @@ + +- import_playbook: kubectl.yml diff --git a/tests/ansible/integration/transport/kubectl.yml b/tests/ansible/integration/transport/kubectl.yml new file mode 100644 index 00000000..d2be9ba5 --- /dev/null +++ b/tests/ansible/integration/transport/kubectl.yml @@ -0,0 +1,146 @@ +--- + +- name: "Create pod" + tags: create + hosts: localhost + vars: + pod_count: 10 + loop_count: 5 + gather_facts: no + tasks: + - name: Create a test pod + k8s: + state: present + definition: + apiVersion: v1 + kind: Pod + metadata: + name: test-pod-{{item}} + namespace: default + spec: + containers: + - name: python2 + image: python:2 + args: [ "sleep", "100000" ] + - name: python3 + image: python:3 + args: [ "sleep", "100000" ] + loop: "{{ range(pod_count|int)|list }}" + + - name: "Wait pod to be running" + debug: { msg: "pod is running" } + # status and availableReplicas might not be there. Using default value (d(default_value)) + until: "pod_def.status.containerStatuses[0].ready" + # Waiting 100 s + retries: 50 + delay: 2 + vars: + pod_def: "{{lookup('k8s', kind='Pod', namespace='default', resource_name='test-pod-' ~ item)}}" + loop: "{{ range(pod_count|int)|list }}" + + - name: "Add pod to pods group" + add_host: + name: "test-pod-{{item}}" + groups: [ "pods" ] + ansible_connection: "kubectl" + changed_when: no + tags: "always" + loop: "{{ range(pod_count|int)|list }}" + +- name: "Test kubectl connection (default strategy)" + tags: default + hosts: pods + strategy: "linear" + vars: + pod_count: 10 + loop_count: 5 + gather_facts: no + tasks: + - name: "Simple shell with linear" + shell: ls /tmp + loop: "{{ range(loop_count|int)|list }}" + + - name: "Simple file with linear" + file: + path: "/etc" + state: directory + loop: "{{ range(loop_count|int)|list }}" + + - block: + - name: "Check python version on python3 container" + command: python --version + vars: + ansible_kubectl_container: python3 + register: _ + + - assert: { that: "'Python 3' in _.stdout" } + + - debug: var=_.stdout,_.stderr + run_once: yes + + - name: "Check python version on default container" + command: python --version + register: _ + + - assert: { that: "'Python 2' in _.stderr" } + + - debug: var=_.stdout,_.stderr + run_once: yes + +- name: "Test kubectl connection (mitogen strategy)" + tags: mitogen + hosts: pods + strategy: "mitogen_linear" + vars: + pod_count: 10 + loop_count: 5 + gather_facts: no + tasks: + - name: "Simple shell with mitogen" + shell: ls /tmp + loop: "{{ range(loop_count|int)|list }}" + + - name: "Simple file with mitogen" + file: + path: "/etc" + state: directory + loop: "{{ range(loop_count|int)|list }}" + + - block: + - name: "Check python version on python3 container" + command: python --version + vars: + ansible_kubectl_container: python3 + register: _ + + - assert: { that: "'Python 3' in _.stdout" } + + - debug: var=_.stdout,_.stderr + run_once: yes + + - name: "Check python version on default container" + command: python --version + register: _ + + - assert: { that: "'Python 2' in _.stderr" } + + - debug: var=_.stdout,_.stderr + run_once: yes + tags: check + +- name: "Destroy pod" + tags: cleanup + hosts: pods + gather_facts: no + vars: + ansible_connection: "local" + tasks: + - name: Destroy pod + k8s: + state: absent + definition: + apiVersion: v1 + kind: Pod + metadata: + name: "{{inventory_hostname}}" + namespace: default diff --git a/tests/ansible/lib/action/connection_passthrough.py b/tests/ansible/lib/action/connection_passthrough.py new file mode 100644 index 00000000..1e9211e4 --- /dev/null +++ b/tests/ansible/lib/action/connection_passthrough.py @@ -0,0 +1,28 @@ + +import traceback +import sys + +from ansible.plugins.strategy import StrategyBase +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + def run(self, tmp=None, task_vars=None): + try: + method = getattr(self._connection, self._task.args['method']) + args = tuple(self._task.args.get('args', ())) + kwargs = self._task.args.get('kwargs', {}) + + return { + 'changed': False, + 'failed': False, + 'result': method(*args, **kwargs) + } + except Exception as e: + traceback.print_exc() + return { + 'changed': False, + 'failed': True, + 'msg': str(e), + 'result': e, + } diff --git a/tests/ansible/lib/action/mitogen_get_stack.py b/tests/ansible/lib/action/mitogen_get_stack.py new file mode 100644 index 00000000..f1b87f35 --- /dev/null +++ b/tests/ansible/lib/action/mitogen_get_stack.py @@ -0,0 +1,22 @@ +""" +Fetch the connection configuration stack that would be used to connect to a +target, without actually connecting to it. +""" + +import ansible_mitogen.connection + +from ansible.plugins.action import ActionBase + + +class ActionModule(ActionBase): + def run(self, tmp=None, task_vars=None): + if not isinstance(self._connection, + ansible_mitogen.connection.Connection): + return { + 'skipped': True, + } + + return { + 'changed': True, + 'result': self._connection._build_stack(), + } diff --git a/tests/ansible/lib/action/mitogen_shutdown_all.py b/tests/ansible/lib/action/mitogen_shutdown_all.py index 6ebdbf5c..4909dfe9 100644 --- a/tests/ansible/lib/action/mitogen_shutdown_all.py +++ b/tests/ansible/lib/action/mitogen_shutdown_all.py @@ -3,9 +3,6 @@ Arrange for all ContextService connections to be torn down unconditionally, required for reliable LRU tests. """ -import traceback -import sys - import ansible_mitogen.connection import ansible_mitogen.services import mitogen.service diff --git a/tests/ansible/lib/callback/nice_stdout.py b/tests/ansible/lib/callback/nice_stdout.py index fa720fd2..1884ee5d 100644 --- a/tests/ansible/lib/callback/nice_stdout.py +++ b/tests/ansible/lib/callback/nice_stdout.py @@ -1,6 +1,8 @@ from __future__ import unicode_literals +import os import io +from ansible import constants as C from ansible.module_utils import six try: @@ -8,6 +10,11 @@ try: except ImportError: from ansible.plugins.loader import callback_loader +try: + pprint = __import__(os.environ['NICE_STDOUT_PPRINT']) +except KeyError: + pprint = None + def printi(tio, obj, key=None, indent=0): def write(s, *args): @@ -50,9 +57,43 @@ class CallbackModule(DefaultModule): def _dump_results(self, result, *args, **kwargs): try: tio = io.StringIO() - printi(tio, result) + if pprint: + pprint.pprint(result, stream=tio) + else: + printi(tio, result) return tio.getvalue() #.encode('ascii', 'replace') except: import traceback traceback.print_exc() raise + + def v2_runner_on_failed(self, result, ignore_errors=False): + delegated_vars = result._result.get('_ansible_delegated_vars') + self._clean_results(result._result, result._task.action) + + if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: + self._print_task_banner(result._task) + + self._handle_exception(result._result) + self._handle_warnings(result._result) + + if result._task.loop and 'results' in result._result: + return + + if delegated_vars: + msg = "[%s -> %s]: FAILED! => %s" % ( + result._host.get_name(), + delegated_vars['ansible_host'], + self._dump_results(result._result), + ) + else: + msg = "[%s]: FAILED! => %s" % ( + result._host.get_name(), + self._dump_results(result._result), + ) + + s = "fatal: %s: %s" % ( + result._task.get_path() or '(dynamic task)', + msg, + ) + self._display.display(s, color=C.COLOR_ERROR) diff --git a/tests/ansible/lib/inventory/gcloud.py b/tests/ansible/lib/inventory/gcloud.py index 2135d913..73e083f4 100755 --- a/tests/ansible/lib/inventory/gcloud.py +++ b/tests/ansible/lib/inventory/gcloud.py @@ -14,7 +14,7 @@ import googleapiclient.discovery def main(): project = 'mitogen-load-testing' zone = 'europe-west1-d' - group_name = 'target' + group_name = 'micro-debian9' client = googleapiclient.discovery.build('compute', 'v1') resp = client.instances().list(project=project, zone=zone).execute() diff --git a/tests/ansible/lib/modules/custom_binary_producing_json_Darwin b/tests/ansible/lib/modules/custom_binary_producing_json_Darwin new file mode 100755 index 00000000..69de2fea Binary files /dev/null and b/tests/ansible/lib/modules/custom_binary_producing_json_Darwin differ diff --git a/tests/ansible/lib/modules/custom_binary_producing_json_Linux b/tests/ansible/lib/modules/custom_binary_producing_json_Linux new file mode 100755 index 00000000..16e6d046 Binary files /dev/null and b/tests/ansible/lib/modules/custom_binary_producing_json_Linux differ diff --git a/tests/ansible/lib/modules/custom_binary_producing_junk_Darwin b/tests/ansible/lib/modules/custom_binary_producing_junk_Darwin new file mode 100755 index 00000000..108f0787 Binary files /dev/null and b/tests/ansible/lib/modules/custom_binary_producing_junk_Darwin differ diff --git a/tests/ansible/lib/modules/custom_binary_producing_junk_Linux b/tests/ansible/lib/modules/custom_binary_producing_junk_Linux new file mode 100755 index 00000000..4aadc9c1 Binary files /dev/null and b/tests/ansible/lib/modules/custom_binary_producing_junk_Linux differ diff --git a/tests/ansible/lib/modules/custom_python_detect_environment.py b/tests/ansible/lib/modules/custom_python_detect_environment.py index 8fe50bbc..2da9cddf 100644 --- a/tests/ansible/lib/modules/custom_python_detect_environment.py +++ b/tests/ansible/lib/modules/custom_python_detect_environment.py @@ -3,6 +3,7 @@ # interpreter I run within. from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import get_module_path from ansible.module_utils import six import os @@ -29,6 +30,8 @@ def main(): mitogen_loaded='mitogen.core' in sys.modules, hostname=socket.gethostname(), username=pwd.getpwuid(os.getuid()).pw_name, + module_tmpdir=getattr(module, 'tmpdir', None), + module_path=get_module_path(), ) if __name__ == '__main__': diff --git a/tests/ansible/lib/modules/custom_python_run_script.py b/tests/ansible/lib/modules/custom_python_run_script.py new file mode 100644 index 00000000..2313291b --- /dev/null +++ b/tests/ansible/lib/modules/custom_python_run_script.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +# I am an Ansible new-style Python module. I run the script provided in the +# parameter. + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import get_module_path +from ansible.module_utils import six + +import os +import pwd +import socket +import sys + + +def execute(s, gbls, lcls): + if sys.version_info > (3,): + exec(s, gbls, lcls) + else: + exec('exec s in gbls, lcls') + + +def main(): + module = AnsibleModule(argument_spec={ + 'script': { + 'type': str + } + }) + + lcls = { + 'module': module, + 'result': {} + } + execute(module.params['script'], globals(), lcls) + del lcls['module'] + module.exit_json(**lcls['result']) + + +if __name__ == '__main__': + main() diff --git a/tests/ansible/osx_setup.yml b/tests/ansible/osx_setup.yml deleted file mode 100644 index 7a6ff23f..00000000 --- a/tests/ansible/osx_setup.yml +++ /dev/null @@ -1,155 +0,0 @@ - -# -# Add users expected by tests to an OS X machine. Assumes passwordless sudo to -# root. -# -# WARNING: this creates non-privilged accounts with pre-set passwords! -# - -- hosts: test-targets - gather_facts: true - become: true - tasks: - - name: Disable non-localhost SSH for Mitogen users - blockinfile: - path: /etc/ssh/sshd_config - block: | - Match User mitogen__* Address !127.0.0.1 - DenyUsers * - - # - # Hashed passwords. - # - - name: Create Mitogen test group - group: - name: "mitogen__group" - - - name: Create Mitogen test users - user: - name: "mitogen__{{item}}" - shell: /bin/bash - groups: mitogen__group - password: "{{ (item + '_password') | password_hash('sha256') }}" - with_items: - - has_sudo - - has_sudo_pubkey - - require_tty - - pw_required - - readonly_homedir - - require_tty_pw_required - - slow_user - when: ansible_system != 'Darwin' - - - name: Create Mitogen test users - user: - name: "mitogen__user{{item}}" - shell: /bin/bash - password: "{{ ('user' + item + '_password') | password_hash('sha256') }}" - with_sequence: start=1 end=21 - when: ansible_system != 'Darwin' - - # - # Plaintext passwords - # - - name: Create Mitogen test users - user: - name: "mitogen__{{item}}" - shell: /bin/bash - groups: mitogen__group - password: "{{item}}_password" - with_items: - - has_sudo - - has_sudo_pubkey - - require_tty - - pw_required - - require_tty_pw_required - - readonly_homedir - - slow_user - when: ansible_system == 'Darwin' - - - name: Create Mitogen test users - user: - name: "mitogen__user{{item}}" - shell: /bin/bash - password: "user{{item}}_password" - with_sequence: start=1 end=21 - when: ansible_system == 'Darwin' - - - name: Hide test users from login window. - shell: > - defaults - write - /Library/Preferences/com.apple.loginwindow - HiddenUsersList - -array-add '{{item}}' - with_items: - - mitogen__require_tty - - mitogen__pw_required - - mitogen__require_tty_pw_required - when: ansible_system == 'Darwin' - - - name: Hide test users from login window. - shell: > - defaults - write - /Library/Preferences/com.apple.loginwindow - HiddenUsersList - -array-add 'mitogen__user{{item}}' - with_sequence: start=1 end=21 - when: ansible_distribution == 'MacOSX' - - - name: Readonly homedir for one account - shell: "chown -R root: ~mitogen__readonly_homedir" - - - name: Slow bash profile for one account - copy: - dest: ~mitogen__slow_user/.{{item}} - src: ../data/docker/mitogen__slow_user.profile - with_items: - - bashrc - - profile - - - name: Install pubkey for one account - file: - path: ~mitogen__has_sudo_pubkey/.ssh - state: directory - mode: go= - owner: mitogen__has_sudo_pubkey - - - name: Install pubkey for one account - copy: - dest: ~mitogen__has_sudo_pubkey/.ssh/authorized_keys - src: ../data/docker/mitogen__has_sudo_pubkey.key.pub - mode: go= - owner: mitogen__has_sudo_pubkey - - - name: Require a TTY for two accounts - lineinfile: - path: /etc/sudoers - line: "{{item}}" - with_items: - - Defaults>mitogen__pw_required targetpw - - Defaults>mitogen__require_tty requiretty - - Defaults>mitogen__require_tty_pw_required requiretty,targetpw - - - name: Require password for two accounts - lineinfile: - path: /etc/sudoers - line: "{{lookup('pipe', 'whoami')}} ALL = ({{item}}) ALL" - with_items: - - mitogen__pw_required - - mitogen__require_tty_pw_required - - - name: Allow passwordless for two accounts - lineinfile: - path: /etc/sudoers - line: "{{lookup('pipe', 'whoami')}} ALL = ({{item}}) NOPASSWD:ALL" - with_items: - - mitogen__require_tty - - mitogen__readonly_homedir - - - name: Allow passwordless for many accounts - lineinfile: - path: /etc/sudoers - line: "{{lookup('pipe', 'whoami')}} ALL = (mitogen__user{{item}}) NOPASSWD:ALL" - with_sequence: start=1 end=21 diff --git a/tests/ansible/regression/all.yml b/tests/ansible/regression/all.yml index ecb9638c..46798b3e 100644 --- a/tests/ansible/regression/all.yml +++ b/tests/ansible/regression/all.yml @@ -7,3 +7,4 @@ - import_playbook: issue_152__virtualenv_python_fails.yml - import_playbook: issue_154__module_state_leaks.yml - import_playbook: issue_177__copy_module_failing.yml +- import_playbook: issue_332_ansiblemoduleerror_first_occurrence.yml diff --git a/tests/ansible/regression/issue_140__thread_pileup.yml b/tests/ansible/regression/issue_140__thread_pileup.yml index 99f31896..c0158018 100644 --- a/tests/ansible/regression/issue_140__thread_pileup.yml +++ b/tests/ansible/regression/issue_140__thread_pileup.yml @@ -16,7 +16,7 @@ creates: /tmp/filetree.in - name: Delete remote file tree - shell: rm -rf /tmp/filetree.out + file: path=/tmp/filetree.out state=absent - file: state: directory @@ -26,6 +26,5 @@ copy: src: "{{item.src}}" dest: "/tmp/filetree.out/{{item.path}}" - with_filetree: - - /tmp/filetree.in + with_filetree: /tmp/filetree.in when: item.state == 'file' diff --git a/tests/ansible/regression/issue_332_ansiblemoduleerror_first_occurrence.yml b/tests/ansible/regression/issue_332_ansiblemoduleerror_first_occurrence.yml new file mode 100644 index 00000000..0162c210 --- /dev/null +++ b/tests/ansible/regression/issue_332_ansiblemoduleerror_first_occurrence.yml @@ -0,0 +1,14 @@ +# issue #332: Ansible 2.6 file.py started defining an excepthook and private +# AnsibleModuleError. Ensure file fails correctly. + +- name: regression/issue_332_ansiblemoduleerror_first_occurrence.yml + hosts: test-targets + tasks: + - file: path=/usr/bin/does-not-exist mode='a-s' state=file follow=yes + ignore_errors: true + register: out + + - assert: + that: + - out.state == 'absent' + - out.msg == 'file (/usr/bin/does-not-exist) is absent, cannot continue' diff --git a/tests/ansible/soak/_file_service_loop.yml b/tests/ansible/soak/_file_service_loop.yml new file mode 100644 index 00000000..96111b3c --- /dev/null +++ b/tests/ansible/soak/_file_service_loop.yml @@ -0,0 +1,6 @@ + - file: + path: /tmp/foo-{{inventory_hostname}} + state: absent + - copy: + dest: /tmp/foo-{{inventory_hostname}} + content: "{{content}}" diff --git a/tests/ansible/soak/file_service.yml b/tests/ansible/soak/file_service.yml new file mode 100644 index 00000000..3b338b3c --- /dev/null +++ b/tests/ansible/soak/file_service.yml @@ -0,0 +1,6 @@ +- hosts: all + tasks: + - set_fact: + content: "{% for x in range(126977) %}x{% endfor %}" + - include_tasks: _file_service_loop.yml + with_sequence: start=1 end=100 diff --git a/tests/ansible/tests/helpers_test.py b/tests/ansible/tests/helpers_test.py deleted file mode 100644 index 95973b1f..00000000 --- a/tests/ansible/tests/helpers_test.py +++ /dev/null @@ -1,20 +0,0 @@ - -import unittest2 - -import ansible_mitogen.helpers -import testlib - - -class ApplyModeSpecTest(unittest2.TestCase): - func = staticmethod(ansible_mitogen.helpers.apply_mode_spec) - - def test_simple(self): - spec = 'u+rwx,go=x' - self.assertEquals(0711, self.func(spec, 0)) - - spec = 'g-rw' - self.assertEquals(0717, self.func(spec, 0777)) - - -if __name__ == '__main__': - unittest2.main() diff --git a/tests/ansible/tests/target_test.py b/tests/ansible/tests/target_test.py new file mode 100644 index 00000000..e3d59433 --- /dev/null +++ b/tests/ansible/tests/target_test.py @@ -0,0 +1,77 @@ + +from __future__ import absolute_import +import os.path +import subprocess +import tempfile +import unittest2 + +import mock + +import ansible_mitogen.target +import testlib + + +LOGGER_NAME = ansible_mitogen.target.LOG.name + + +class NamedTemporaryDirectory(object): + def __enter__(self): + self.path = tempfile.mkdtemp() + return self.path + + def __exit__(self, _1, _2, _3): + subprocess.check_call(['rm', '-rf', self.path]) + + +class ApplyModeSpecTest(unittest2.TestCase): + func = staticmethod(ansible_mitogen.target.apply_mode_spec) + + def test_simple(self): + spec = 'u+rwx,go=x' + self.assertEquals(0711, self.func(spec, 0)) + + spec = 'g-rw' + self.assertEquals(0717, self.func(spec, 0777)) + + +class IsGoodTempDirTest(unittest2.TestCase): + func = staticmethod(ansible_mitogen.target.is_good_temp_dir) + + def test_creates(self): + with NamedTemporaryDirectory() as temp_path: + bleh = os.path.join(temp_path, 'bleh') + self.assertFalse(os.path.exists(bleh)) + self.assertTrue(self.func(bleh)) + self.assertTrue(os.path.exists(bleh)) + + def test_file_exists(self): + with NamedTemporaryDirectory() as temp_path: + bleh = os.path.join(temp_path, 'bleh') + with open(bleh, 'w') as fp: + fp.write('derp') + self.assertTrue(os.path.isfile(bleh)) + self.assertFalse(self.func(bleh)) + self.assertEquals(open(bleh).read(), 'derp') + + def test_unwriteable(self): + with NamedTemporaryDirectory() as temp_path: + os.chmod(temp_path, 0) + self.assertFalse(self.func(temp_path)) + os.chmod(temp_path, int('0700', 8)) + + @mock.patch('os.chmod') + def test_weird_filesystem(self, os_chmod): + os_chmod.side_effect = OSError('nope') + with NamedTemporaryDirectory() as temp_path: + self.assertFalse(self.func(temp_path)) + + @mock.patch('os.access') + def test_noexec(self, os_access): + os_access.return_value = False + with NamedTemporaryDirectory() as temp_path: + self.assertFalse(self.func(temp_path)) + + + +if __name__ == '__main__': + unittest2.main() diff --git a/tests/bench/roundtrip.py b/tests/bench/roundtrip.py index 40582d46..13b9413d 100644 --- a/tests/bench/roundtrip.py +++ b/tests/bench/roundtrip.py @@ -12,6 +12,6 @@ def do_nothing(): def main(router): f = router.fork() t0 = time.time() - for x in xrange(1000): + for x in xrange(10000): f.call(do_nothing) print '++', int(1e6 * ((time.time() - t0) / (1.0+x))), 'usec' diff --git a/tests/build_docker_images.py b/tests/build_docker_images.py deleted file mode 100755 index 7f856b2b..00000000 --- a/tests/build_docker_images.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python - -""" -Build the Docker images used for testing. -""" - -import commands -import os -import shlex -import subprocess -import tempfile - - -DEBIAN_DOCKERFILE = r""" -FROM debian:stretch -RUN apt-get update -RUN \ - apt-get install -y python2.7 openssh-server sudo rsync git strace \ - libjson-perl python-virtualenv && \ - apt-get clean && \ - rm -rf /var/cache/apt -""" - -CENTOS6_DOCKERFILE = r""" -FROM centos:6 -RUN yum clean all && \ - yum -y install -y python2.6 openssh-server sudo rsync git strace sudo \ - perl-JSON python-virtualenv && \ - yum clean all && \ - groupadd sudo && \ - ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key - -""" - -CENTOS7_DOCKERFILE = r""" -FROM centos:7 -RUN yum clean all && \ - yum -y install -y python2.7 openssh-server sudo rsync git strace sudo \ - perl-JSON python-virtualenv && \ - yum clean all && \ - groupadd sudo && \ - ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key - -""" - -DOCKERFILE = r""" -COPY data/001-mitogen.sudo /etc/sudoers.d/001-mitogen -COPY data/docker/ssh_login_banner.txt /etc/ssh/banner.txt -RUN \ - chsh -s /bin/bash && \ - mkdir -p /var/run/sshd && \ - echo i-am-mitogen-test-docker-image > /etc/sentinel && \ - echo "Banner /etc/ssh/banner.txt" >> /etc/ssh/sshd_config && \ - groupadd mitogen__sudo_nopw && \ - useradd -s /bin/bash -m mitogen__has_sudo -G SUDO_GROUP && \ - useradd -s /bin/bash -m mitogen__has_sudo_pubkey -G SUDO_GROUP && \ - useradd -s /bin/bash -m mitogen__has_sudo_nopw -G mitogen__sudo_nopw && \ - useradd -s /bin/bash -m mitogen__webapp && \ - useradd -s /bin/bash -m mitogen__pw_required && \ - useradd -s /bin/bash -m mitogen__require_tty && \ - useradd -s /bin/bash -m mitogen__require_tty_pw_required && \ - useradd -s /bin/bash -m mitogen__readonly_homedir && \ - useradd -s /bin/bash -m mitogen__slow_user && \ - chown -R root: ~mitogen__readonly_homedir && \ - ( for i in `seq 1 21`; do useradd -s /bin/bash -m mitogen__user${i}; done; ) && \ - ( for i in `seq 1 21`; do echo mitogen__user${i}:user${i}_password | chpasswd; done; ) && \ - ( echo 'root:rootpassword' | chpasswd; ) && \ - ( echo 'mitogen__has_sudo:has_sudo_password' | chpasswd; ) && \ - ( echo 'mitogen__has_sudo_pubkey:has_sudo_pubkey_password' | chpasswd; ) && \ - ( echo 'mitogen__has_sudo_nopw:has_sudo_nopw_password' | chpasswd; ) && \ - ( echo 'mitogen__webapp:webapp_password' | chpasswd; ) && \ - ( echo 'mitogen__pw_required:pw_required_password' | chpasswd; ) && \ - ( echo 'mitogen__require_tty:require_tty_password' | chpasswd; ) && \ - ( echo 'mitogen__require_tty_pw_required:require_tty_pw_required_password' | chpasswd; ) && \ - ( echo 'mitogen__readonly_homedir:readonly_homedir_password' | chpasswd; ) && \ - ( echo 'mitogen__slow_user:slow_user_password' | chpasswd; ) && \ - mkdir ~mitogen__has_sudo_pubkey/.ssh && \ - ( echo '#!/bin/bash\nexec strace -ff -o /tmp/pywrap$$.trace python2.7 "$@"' > /usr/local/bin/pywrap; chmod +x /usr/local/bin/pywrap; ) - -COPY data/docker/mitogen__has_sudo_pubkey.key.pub /home/mitogen__has_sudo_pubkey/.ssh/authorized_keys -COPY data/docker/mitogen__slow_user.profile /home/mitogen__slow_user/.profile -COPY data/docker/mitogen__slow_user.profile /home/mitogen__slow_user/.bashrc - -RUN \ - chown -R mitogen__has_sudo_pubkey ~mitogen__has_sudo_pubkey && \ - chmod -R go= ~mitogen__has_sudo_pubkey - -RUN sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config -RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd - -ENV NOTVISIBLE "in users profile" -RUN echo "export VISIBLE=now" >> /etc/profile - -EXPOSE 22 -CMD ["/usr/sbin/sshd", "-D"] -""" - - -def sh(s, *args): - if args: - s %= tuple(map(commands.mkarg, args)) - return shlex.split(s) - - -for (distro, wheel, prefix) in ( - ('debian', 'sudo', DEBIAN_DOCKERFILE), - ('centos6', 'wheel', CENTOS6_DOCKERFILE), - ('centos7', 'wheel', CENTOS7_DOCKERFILE), - ): - mydir = os.path.abspath(os.path.dirname(__file__)) - with tempfile.NamedTemporaryFile(dir=mydir) as dockerfile_fp: - dockerfile_fp.write(prefix) - dockerfile_fp.write(DOCKERFILE.replace('SUDO_GROUP', wheel)) - dockerfile_fp.flush() - - subprocess.check_call(sh('docker build %s -t %s -f %s', - mydir, - 'mitogen/%s-test' % (distro,), - dockerfile_fp.name - )) diff --git a/tests/call_function_test.py b/tests/call_function_test.py index f0074258..dc9a2298 100644 --- a/tests/call_function_test.py +++ b/tests/call_function_test.py @@ -4,6 +4,7 @@ import time import unittest2 import mitogen.core +import mitogen.parent import mitogen.master import testlib @@ -18,15 +19,15 @@ def function_that_adds_numbers(x, y): return x + y -def function_that_fails(): - raise plain_old_module.MyError('exception text') +def function_that_fails(s=''): + raise plain_old_module.MyError('exception text'+s) def func_with_bad_return_value(): return CrazyType() -def func_accepts_returns_context(context): +def func_returns_arg(context): return context @@ -36,7 +37,17 @@ def func_accepts_returns_sender(sender): return sender +class TargetClass: + + offset = 100 + + @classmethod + def add_numbers_with_offset(cls, x, y): + return cls.offset + x + y + + class CallFunctionTest(testlib.RouterMixin, testlib.TestCase): + def setUp(self): super(CallFunctionTest, self).setUp() self.local = self.router.fork() @@ -44,6 +55,12 @@ class CallFunctionTest(testlib.RouterMixin, testlib.TestCase): def test_succeeds(self): self.assertEqual(3, self.local.call(function_that_adds_numbers, 1, 2)) + def test_succeeds_class_method(self): + self.assertEqual( + self.local.call(TargetClass.add_numbers_with_offset, 1, 2), + 103, + ) + def test_crashes(self): exc = self.assertRaises(mitogen.core.CallError, lambda: self.local.call(function_that_fails)) @@ -85,7 +102,7 @@ class CallFunctionTest(testlib.RouterMixin, testlib.TestCase): self.assertEquals(exc.args[0], mitogen.core.ChannelError.local_msg) def test_accepts_returns_context(self): - context = self.local.call(func_accepts_returns_context, self.local) + context = self.local.call(func_returns_arg, self.local) self.assertIsNot(context, self.local) self.assertEqual(context.context_id, self.local.context_id) self.assertEqual(context.name, self.local.name) @@ -102,5 +119,40 @@ class CallFunctionTest(testlib.RouterMixin, testlib.TestCase): lambda: recv.get().unpickle()) +class ChainTest(testlib.RouterMixin, testlib.TestCase): + # Verify mitogen_chain functionality. + klass = mitogen.parent.CallChain + + def setUp(self): + super(ChainTest, self).setUp() + self.local = self.router.fork() + + def test_subsequent_calls_produce_same_error(self): + chain = self.klass(self.local, pipelined=True) + self.assertEquals('xx', chain.call(func_returns_arg, 'xx')) + chain.call_no_reply(function_that_fails, 'x1') + e1 = self.assertRaises(mitogen.core.CallError, + lambda: chain.call(function_that_fails, 'x2')) + e2 = self.assertRaises(mitogen.core.CallError, + lambda: chain.call(func_returns_arg, 'x3')) + self.assertEquals(str(e1), str(e2)) + + def test_unrelated_overlapping_failed_chains(self): + c1 = self.klass(self.local, pipelined=True) + c2 = self.klass(self.local, pipelined=True) + c1.call_no_reply(function_that_fails, 'c1') + self.assertEquals('yes', c2.call(func_returns_arg, 'yes')) + self.assertRaises(mitogen.core.CallError, + lambda: c1.call(func_returns_arg, 'yes')) + + def test_reset(self): + c1 = self.klass(self.local, pipelined=True) + c1.call_no_reply(function_that_fails, 'x1') + e1 = self.assertRaises(mitogen.core.CallError, + lambda: c1.call(function_that_fails, 'x2')) + c1.reset() + self.assertEquals('x3', c1.call(func_returns_arg, 'x3')) + + if __name__ == '__main__': unittest2.main() diff --git a/tests/data/001-mitogen.sudo b/tests/data/docker/001-mitogen.sudo similarity index 100% rename from tests/data/001-mitogen.sudo rename to tests/data/docker/001-mitogen.sudo diff --git a/tests/data/fake_lxc.py b/tests/data/fake_lxc.py new file mode 100755 index 00000000..2fedb961 --- /dev/null +++ b/tests/data/fake_lxc.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python + +import sys +import os + +os.environ['ORIGINAL_ARGV'] = repr(sys.argv) +os.execv(sys.executable, sys.argv[sys.argv.index('--') + 1:]) diff --git a/tests/data/fake_lxc_attach.py b/tests/data/fake_lxc_attach.py new file mode 100755 index 00000000..2fedb961 --- /dev/null +++ b/tests/data/fake_lxc_attach.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python + +import sys +import os + +os.environ['ORIGINAL_ARGV'] = repr(sys.argv) +os.execv(sys.executable, sys.argv[sys.argv.index('--') + 1:]) diff --git a/tests/data/fakessh.py b/tests/data/fakessh.py index 08a5da3e..8df5aa39 100755 --- a/tests/data/fakessh.py +++ b/tests/data/fakessh.py @@ -6,6 +6,52 @@ import shlex import subprocess import sys + +HOST_KEY_ASK_MSG = """ +The authenticity of host '[91.121.165.123]:9122 ([91.121.165.123]:9122)' can't be established. +ECDSA key fingerprint is SHA256:JvfPvazZzQ9/CUdKN7tiYlNZtDRdEgDsYVIzOgPrsR4. +Are you sure you want to continue connecting (yes/no)? +""".strip('\n') + +HOST_KEY_STRICT_MSG = """Host key verification failed.\n""" + + +def tty(msg): + fp = open('/dev/tty', 'wb', 0) + fp.write(msg.encode()) + fp.close() + + +def stderr(msg): + fp = open('/dev/stderr', 'wb', 0) + fp.write(msg.encode()) + fp.close() + + +def confirm(msg): + tty(msg) + fp = open('/dev/tty', 'rb', 0) + try: + return fp.readline().decode() + finally: + fp.close() + + +if os.getenv('FAKESSH_MODE') == 'ask': + assert 'y\n' == confirm(HOST_KEY_ASK_MSG) + +if os.getenv('FAKESSH_MODE') == 'strict': + stderr(HOST_KEY_STRICT_MSG) + sys.exit(255) + + +# +# Set an env var if stderr was a TTY to make ssh_test tests easier to write. +# +if os.isatty(2): + os.environ['STDERR_WAS_TTY'] = '1' + + parser = optparse.OptionParser() parser.add_option('--user', '-l', action='store') parser.add_option('-o', dest='options', action='append') diff --git a/tests/image_prep/README.md b/tests/image_prep/README.md new file mode 100644 index 00000000..d275672f --- /dev/null +++ b/tests/image_prep/README.md @@ -0,0 +1,25 @@ + +# `image_prep` + +This directory contains Ansible playbooks for building the Docker containers +used for testing, or for setting up an OS X laptop so the tests can (mostly) +run locally. + +The Docker config is more heavily jinxed to trigger adverse conditions in the +code, the OS X config just has the user accounts. + +See ../README.md for a (mostly) description of the accounts created. + + +## Building the containers + +``./build_docker_images.sh`` + + +## Preparing an OS X box + +WARNING: this creates a ton of accounts with preconfigured passwords. It is +generally impossible to restrict remote access to these, so your only option is +to disable remote login and sharing. + +``ansible-playbook -b -c local -i localhost, -l localhost setup.yml`` diff --git a/tests/image_prep/_container_setup.yml b/tests/image_prep/_container_setup.yml new file mode 100644 index 00000000..db0d3789 --- /dev/null +++ b/tests/image_prep/_container_setup.yml @@ -0,0 +1,117 @@ + +- hosts: all + strategy: linear + gather_facts: false + tasks: + - raw: > + if ! python -c ''; then + if type -p yum; then + yum -y install python; + else + apt-get -y update && apt-get -y install python; + fi; + fi + +- hosts: all + strategy: mitogen_free + # Can't gather facts before here. + gather_facts: true + vars: + distro: "{{ansible_distribution}}" + ver: "{{ansible_distribution_major_version}}" + + packages: + common: + - git + - openssh-server + - rsync + - strace + - sudo + Debian: + "9": + - libjson-perl + - python-virtualenv + CentOS: + "6": + - perl-JSON + "7": + - perl-JSON + - python-virtualenv + + tasks: + - when: ansible_virtualization_type != "docker" + meta: end_play + + - apt: + name: "{{packages.common + packages[distro][ver]}}" + state: installed + update_cache: true + when: distro == "Debian" + + - yum: + name: "{{packages.common + packages[distro][ver]}}" + state: installed + update_cache: true + when: distro == "CentOS" + + - command: apt-get clean + when: distro == "Debian" + + - command: yum clean all + when: distro == "CentOS" + + - shell: rm -rf {{item}}/* + with_items: + - /var/cache/apt + - /var/lib/apt/lists + when: distro == "Debian" + + - user: + name: root + password: "{{ 'rootpassword' | password_hash('sha256') }}" + shell: /bin/bash + + - file: + path: /var/run/sshd + state: directory + + - command: ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key + args: + creates: /etc/ssh/ssh_host_rsa_key + + - group: + name: "{{sudo_group[distro]}}" + + - copy: + dest: /etc/sentinel + content: | + i-am-mitogen-test-docker-image + + - copy: + dest: /etc/ssh/banner.txt + src: ../data/docker/ssh_login_banner.txt + + - copy: + dest: /etc/sudoers.d/001-mitogen + src: ../data/docker/001-mitogen.sudo + + - lineinfile: + path: /etc/ssh/sshd_config + line: Banner /etc/ssh/banner.txt + + - lineinfile: + path: /etc/ssh/sshd_config + line: PermitRootLogin yes + regexp: '.*PermitRootLogin.*' + + - lineinfile: + path: /etc/pam.d/sshd + regexp: '.*session.*required.*pam_loginuid.so' + line: session optional pam_loginuid.so + + - copy: + mode: 'u+rwx,go=rx' + dest: /usr/local/bin/pywrap + content: | + #!/bin/bash + exec strace -ff -o /tmp/pywrap$$.trace python2.7 "$@"' diff --git a/tests/image_prep/_user_accounts.yml b/tests/image_prep/_user_accounts.yml new file mode 100644 index 00000000..f9cac85c --- /dev/null +++ b/tests/image_prep/_user_accounts.yml @@ -0,0 +1,152 @@ +# +# Add users expected by tests. Assumes passwordless sudo to root. +# +# WARNING: this creates non-privilged accounts with pre-set passwords! +# + +- hosts: all + gather_facts: true + strategy: mitogen_free + become: true + vars: + distro: "{{ansible_distribution}}" + ver: "{{ansible_distribution_major_version}}" + + special_users: + - has_sudo + - has_sudo_nopw + - has_sudo_pubkey + - pw_required + - readonly_homedir + - require_tty + - require_tty_pw_required + - slow_user + - webapp + - sudo1 + - sudo2 + - sudo3 + - sudo4 + + user_groups: + has_sudo: ['mitogen__group', '{{sudo_group[distro]}}'] + has_sudo_pubkey: ['mitogen__group', '{{sudo_group[distro]}}'] + has_sudo_nopw: ['mitogen__group', 'mitogen__sudo_nopw'] + sudo1: ['mitogen__group', 'mitogen__sudo_nopw'] + sudo2: ['mitogen__group', '{{sudo_group[distro]}}'] + sudo3: ['mitogen__group', '{{sudo_group[distro]}}'] + sudo4: ['mitogen__group', '{{sudo_group[distro]}}'] + + normal_users: "{{ + lookup('sequence', 'start=1 end=5 format=user%d', wantlist=True) + }}" + + all_users: "{{ + special_users + + normal_users + }}" + tasks: + - name: Disable non-localhost SSH for Mitogen users + when: false + blockinfile: + path: /etc/ssh/sshd_config + block: | + Match User mitogen__* Address !127.0.0.1 + DenyUsers * + + - name: Create Mitogen test groups + group: + name: "mitogen__{{item}}" + with_items: + - group + - sudo_nopw + + - name: Create user accounts + block: + - user: + name: "mitogen__{{item}}" + shell: /bin/bash + groups: "{{user_groups[item]|default(['mitogen__group'])}}" + password: "{{ (item + '_password') | password_hash('sha256') }}" + loop: "{{all_users}}" + when: ansible_system != 'Darwin' + - user: + name: "mitogen__{{item}}" + shell: /bin/bash + groups: "{{user_groups[item]|default(['mitogen__group'])}}" + password: "{{item}}_password" + loop: "{{all_users}}" + when: ansible_system == 'Darwin' + + - name: Hide users from login window. + loop: "{{all_users}}" + when: ansible_system == 'Darwin' + osx_defaults: + array_add: true + domain: /Library/Preferences/com.apple.loginwindow + type: array + key: HiddenUsersList + value: ['mitogen_{{item}}'] + + - name: Readonly homedir for one account + shell: "chown -R root: ~mitogen__readonly_homedir" + + - name: Slow bash profile for one account + copy: + dest: ~mitogen__slow_user/.{{item}} + src: ../data/docker/mitogen__slow_user.profile + with_items: + - bashrc + - profile + + - name: Install pubkey for mitogen__has_sudo_pubkey + block: + - file: + path: ~mitogen__has_sudo_pubkey/.ssh + state: directory + mode: go= + owner: mitogen__has_sudo_pubkey + - copy: + dest: ~mitogen__has_sudo_pubkey/.ssh/authorized_keys + src: ../data/docker/mitogen__has_sudo_pubkey.key.pub + mode: go= + owner: mitogen__has_sudo_pubkey + + - name: Install slow profile for one account + block: + - copy: + dest: ~mitogen__slow_user/.profile + src: ../data/docker/mitogen__slow_user.profile + - copy: + dest: ~mitogen__slow_user/.bashrc + src: ../data/docker/mitogen__slow_user.profile + + - name: Require a TTY for two accounts + lineinfile: + path: /etc/sudoers + line: "{{item}}" + with_items: + - Defaults>mitogen__pw_required targetpw + - Defaults>mitogen__require_tty requiretty + - Defaults>mitogen__require_tty_pw_required requiretty,targetpw + + - name: Require password for two accounts + lineinfile: + path: /etc/sudoers + line: "{{lookup('pipe', 'whoami')}} ALL = ({{item}}) ALL" + with_items: + - mitogen__pw_required + - mitogen__require_tty_pw_required + + - name: Allow passwordless sudo for require_tty/readonly_homedir + lineinfile: + path: /etc/sudoers + line: "{{lookup('pipe', 'whoami')}} ALL = ({{item}}) NOPASSWD:ALL" + with_items: + - mitogen__require_tty + - mitogen__readonly_homedir + + - name: Allow passwordless for many accounts + lineinfile: + path: /etc/sudoers + line: "{{lookup('pipe', 'whoami')}} ALL = (mitogen__{{item}}) NOPASSWD:ALL" + loop: "{{normal_users}}" diff --git a/tests/image_prep/ansible.cfg b/tests/image_prep/ansible.cfg new file mode 100644 index 00000000..a3937825 --- /dev/null +++ b/tests/image_prep/ansible.cfg @@ -0,0 +1,4 @@ + +[defaults] +strategy_plugins = ../../ansible_mitogen/plugins/strategy +retry_files_enabled = false diff --git a/tests/image_prep/build_docker_images.py b/tests/image_prep/build_docker_images.py new file mode 100755 index 00000000..94a17104 --- /dev/null +++ b/tests/image_prep/build_docker_images.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +""" +Build the Docker images used for testing. +""" + +import commands +import os +import tempfile +import shlex +import subprocess + + +BASEDIR = os.path.dirname(os.path.abspath(__file__)) + + +def sh(s, *args): + if args: + s %= args + return shlex.split(s) + + + +label_by_id = {} + +for base_image, label in [ + ('debian:stretch', 'debian'), # Python 2.7.13, 3.5.3 + ('centos:6', 'centos6'), # Python 2.6.6 + ('centos:7', 'centos7') # Python 2.7.5 + ]: + args = sh('docker run --rm -it -d -h mitogen-%s %s /bin/bash', + label, base_image) + container_id = subprocess.check_output(args).strip() + label_by_id[container_id] = label + +with tempfile.NamedTemporaryFile() as fp: + fp.write('[all]\n') + for id_, label in label_by_id.items(): + fp.write('%s ansible_host=%s\n' % (label, id_)) + fp.flush() + + try: + subprocess.check_call( + cwd=BASEDIR, + args=sh('ansible-playbook -i %s -c docker setup.yml', fp.name), + ) + + for container_id, label in label_by_id.items(): + subprocess.check_call(sh(''' + docker commit + --change 'EXPOSE 22' + --change 'CMD ["/usr/sbin/sshd", "-D"]' + %s + mitogen/%s-test + ''', container_id, label)) + finally: + subprocess.check_call(sh('docker rm -f %s', ' '.join(label_by_id))) diff --git a/tests/image_prep/setup.yml b/tests/image_prep/setup.yml new file mode 100644 index 00000000..77a80e3b --- /dev/null +++ b/tests/image_prep/setup.yml @@ -0,0 +1,14 @@ + +- hosts: all + gather_facts: false + tasks: + - set_fact: + # Hacktacular.. but easiest place for it with current structure. + sudo_group: + MacOSX: admin + Debian: sudo + Ubuntu: sudo + CentOS: wheel + +- import_playbook: _container_setup.yml +- import_playbook: _user_accounts.yml diff --git a/tests/lxc_test.py b/tests/lxc_test.py new file mode 100644 index 00000000..a30cd186 --- /dev/null +++ b/tests/lxc_test.py @@ -0,0 +1,29 @@ +import os + +import mitogen + +import unittest2 + +import testlib + + +def has_subseq(seq, subseq): + return any(seq[x:x+len(subseq)] == subseq for x in range(0, len(seq))) + + +class FakeLxcAttachTest(testlib.RouterMixin, unittest2.TestCase): + def test_okay(self): + lxc_attach_path = testlib.data_path('fake_lxc_attach.py') + context = self.router.lxc( + container='container_name', + lxc_attach_path=lxc_attach_path, + ) + + argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV')) + self.assertEquals(argv[0], lxc_attach_path) + self.assertTrue('--clear-env' in argv) + self.assertTrue(has_subseq(argv, ['--name', 'container_name'])) + + +if __name__ == '__main__': + unittest2.main() diff --git a/tests/lxd_test.py b/tests/lxd_test.py new file mode 100644 index 00000000..9c2397a2 --- /dev/null +++ b/tests/lxd_test.py @@ -0,0 +1,26 @@ +import os + +import mitogen + +import unittest2 + +import testlib + + +class FakeLxcTest(testlib.RouterMixin, unittest2.TestCase): + def test_okay(self): + lxc_path = testlib.data_path('fake_lxc.py') + context = self.router.lxd( + container='container_name', + lxc_path=lxc_path, + ) + + argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV')) + self.assertEquals(argv[0], lxc_path) + self.assertEquals(argv[1], 'exec') + self.assertEquals(argv[2], '--mode=noninteractive') + self.assertEquals(argv[3], 'container_name') + + +if __name__ == '__main__': + unittest2.main() diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index 9c85e26c..1d5a0796 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -353,13 +353,9 @@ class DjangoFindRelatedTest(DjangoMixin, testlib.TestCase): 'django.utils.translation', 'django.utils.tree', 'django.utils.tzinfo', - 'pkg_resources', - 'pkg_resources.extern', - 'pkg_resources.extern.appdirs', - 'pkg_resources.extern.packaging', - 'pkg_resources.extern.six', 'pytz', 'pytz.exceptions', + 'pytz.lazy', 'pytz.tzfile', 'pytz.tzinfo', ]) diff --git a/tests/parent_test.py b/tests/parent_test.py index 06eac97e..c9ccaf3f 100644 --- a/tests/parent_test.py +++ b/tests/parent_test.py @@ -1,10 +1,12 @@ import errno import os +import signal import subprocess import sys import tempfile import time +import mock import unittest2 import testlib @@ -28,6 +30,56 @@ def wait_for_child(pid, timeout=1.0): assert False, "wait_for_child() timed out" +class GetDefaultRemoteNameTest(testlib.TestCase): + func = staticmethod(mitogen.parent.get_default_remote_name) + + @mock.patch('os.getpid') + @mock.patch('getpass.getuser') + @mock.patch('socket.gethostname') + def test_slashes(self, mock_gethostname, mock_getuser, mock_getpid): + # Ensure slashes appearing in the remote name are replaced with + # underscores. + mock_gethostname.return_value = 'box' + mock_getuser.return_value = 'ECORP\\Administrator' + mock_getpid.return_value = 123 + self.assertEquals("ECORP_Administrator@box:123", self.func()) + + +class WstatusToStrTest(testlib.TestCase): + func = staticmethod(mitogen.parent.wstatus_to_str) + + def test_return_zero(self): + pid = os.fork() + if not pid: + os._exit(0) + (pid, status), _ = mitogen.core.io_op(os.waitpid, pid, 0) + self.assertEquals(self.func(status), + 'exited with return code 0') + + def test_return_one(self): + pid = os.fork() + if not pid: + os._exit(1) + (pid, status), _ = mitogen.core.io_op(os.waitpid, pid, 0) + self.assertEquals( + self.func(status), + 'exited with return code 1' + ) + + def test_sigkill(self): + pid = os.fork() + if not pid: + time.sleep(600) + os.kill(pid, signal.SIGKILL) + (pid, status), _ = mitogen.core.io_op(os.waitpid, pid, 0) + self.assertEquals( + self.func(status), + 'exited due to signal %s (SIGKILL)' % (signal.SIGKILL,) + ) + + # can't test SIGSTOP without POSIX sessions rabbithole + + class ReapChildTest(testlib.RouterMixin, testlib.TestCase): def test_connect_timeout(self): # Ensure the child process is reaped if the connection times out. @@ -105,6 +157,25 @@ class ContextTest(testlib.RouterMixin, unittest2.TestCase): self.assertRaises(OSError, lambda: os.kill(pid, 0)) +class OpenPtyTest(testlib.TestCase): + func = staticmethod(mitogen.parent.openpty) + + def test_pty_returned(self): + master_fd, slave_fd = self.func() + self.assertTrue(isinstance(master_fd, int)) + self.assertTrue(isinstance(slave_fd, int)) + os.close(master_fd) + os.close(slave_fd) + + @mock.patch('os.openpty') + def test_max_reached(self, openpty): + openpty.side_effect = OSError(errno.ENXIO) + e = self.assertRaises(mitogen.core.StreamError, + lambda: self.func()) + msg = mitogen.parent.OPENPTY_MSG % (openpty.side_effect,) + self.assertEquals(e.args[0], msg) + + class TtyCreateChildTest(unittest2.TestCase): func = staticmethod(mitogen.parent.tty_create_child) diff --git a/tests/responder_test.py b/tests/responder_test.py index dfdd67fa..46400fce 100644 --- a/tests/responder_test.py +++ b/tests/responder_test.py @@ -1,5 +1,6 @@ import mock +import textwrap import subprocess import sys @@ -12,6 +13,60 @@ import plain_old_module import simple_pkg.a +class NeutralizeMainTest(testlib.RouterMixin, unittest2.TestCase): + klass = mitogen.master.ModuleResponder + + def call(self, *args, **kwargs): + return self.klass(self.router).neutralize_main(*args, **kwargs) + + def test_missing_exec_guard(self): + path = testlib.data_path('main_with_no_exec_guard.py') + args = [sys.executable, path] + proc = subprocess.Popen(args, stderr=subprocess.PIPE) + _, stderr = proc.communicate() + self.assertEquals(1, proc.returncode) + expect = self.klass.main_guard_msg % (path,) + self.assertTrue(expect in stderr.decode()) + + HAS_MITOGEN_MAIN = mitogen.core.b( + textwrap.dedent(""" + herp derp + + def myprog(): + pass + + @mitogen.main(maybe_some_option=True) + def main(router): + pass + """) + ) + + def test_mitogen_main(self): + untouched = self.call("derp.py", self.HAS_MITOGEN_MAIN) + self.assertEquals(untouched, self.HAS_MITOGEN_MAIN) + + HAS_EXEC_GUARD = mitogen.core.b( + textwrap.dedent(""" + herp derp + + def myprog(): + pass + + def main(): + pass + + if __name__ == '__main__': + main() + """) + ) + + def test_exec_guard(self): + touched = self.call("derp.py", self.HAS_EXEC_GUARD) + bits = touched.decode().split() + self.assertEquals(bits[-3:], ['def', 'main():', 'pass']) + + + class GoodModulesTest(testlib.RouterMixin, unittest2.TestCase): def test_plain_old_module(self): # The simplest case: a top-level module with no interesting imports or diff --git a/tests/ssh_test.py b/tests/ssh_test.py index a514c8ea..efca057d 100644 --- a/tests/ssh_test.py +++ b/tests/ssh_test.py @@ -1,3 +1,4 @@ +import os import sys import mitogen @@ -13,9 +14,9 @@ import plain_old_module class FakeSshTest(testlib.RouterMixin, unittest2.TestCase): def test_okay(self): context = self.router.ssh( - hostname='hostname', - username='mitogen__has_sudo', - ssh_path=testlib.data_path('fakessh.py'), + hostname='hostname', + username='mitogen__has_sudo', + ssh_path=testlib.data_path('fakessh.py'), ) #context.call(mitogen.utils.log_to_file, '/tmp/log') #context.call(mitogen.utils.disable_site_packages) @@ -123,6 +124,41 @@ class BannerTest(testlib.DockerMixin, unittest2.TestCase): self.assertEquals(name, context.name) +class RequirePtyTest(testlib.DockerMixin, testlib.TestCase): + stream_class = mitogen.ssh.Stream + + def fake_ssh(self, FAKESSH_MODE=None, **kwargs): + os.environ['FAKESSH_MODE'] = str(FAKESSH_MODE) + try: + return self.router.ssh( + hostname='hostname', + username='mitogen__has_sudo', + ssh_path=testlib.data_path('fakessh.py'), + **kwargs + ) + finally: + del os.environ['FAKESSH_MODE'] + + def test_check_host_keys_accept(self): + # required=true, host_key_checking=accept + context = self.fake_ssh(FAKESSH_MODE='ask', check_host_keys='accept') + self.assertEquals('1', context.call(os.getenv, 'STDERR_WAS_TTY')) + + def test_check_host_keys_enforce(self): + # required=false, host_key_checking=enforce + context = self.fake_ssh(check_host_keys='enforce') + self.assertEquals(None, context.call(os.getenv, 'STDERR_WAS_TTY')) + + def test_check_host_keys_ignore(self): + # required=false, host_key_checking=ignore + context = self.fake_ssh(check_host_keys='ignore') + self.assertEquals(None, context.call(os.getenv, 'STDERR_WAS_TTY')) + + def test_password_present(self): + # required=true, password is not None + context = self.fake_ssh(check_host_keys='ignore', password='willick') + self.assertEquals('1', context.call(os.getenv, 'STDERR_WAS_TTY')) + if __name__ == '__main__': unittest2.main() diff --git a/tests/testlib.py b/tests/testlib.py index d812609c..63d96233 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -158,22 +158,48 @@ def sync_with_broker(broker, timeout=10.0): sem.get(timeout=10.0) +class CaptureStreamHandler(logging.StreamHandler): + def __init__(self, *args, **kwargs): + super(CaptureStreamHandler, self).__init__(*args, **kwargs) + self.msgs = [] + + def emit(self, msg): + self.msgs.append(msg) + return super(CaptureStreamHandler, self).emit(msg) + + class LogCapturer(object): def __init__(self, name=None): self.sio = StringIO() self.logger = logging.getLogger(name) - self.handler = logging.StreamHandler(self.sio) + self.handler = CaptureStreamHandler(self.sio) self.old_propagate = self.logger.propagate self.old_handlers = self.logger.handlers + self.old_level = self.logger.level def start(self): self.logger.handlers = [self.handler] self.logger.propagate = False + self.logger.level = logging.DEBUG + + def raw(self): + return self.sio.getvalue() + + def msgs(self): + return self.handler.msgs + + def __enter__(self): + self.start() + return self + + def __exit__(self, _1, _2, _3): + self.stop() def stop(self): + self.logger.level = self.old_level self.logger.handlers = self.old_handlers self.logger.propagate = self.old_propagate - return self.sio.getvalue() + return self.raw() class TestCase(unittest2.TestCase): diff --git a/tests/utils_test.py b/tests/utils_test.py index 4c2e2e0f..b2e0aa9e 100644 --- a/tests/utils_test.py +++ b/tests/utils_test.py @@ -2,6 +2,7 @@ import unittest2 +import mitogen.core import mitogen.master import mitogen.utils @@ -32,5 +33,64 @@ class WithRouterTest(unittest2.TestCase): self.assertFalse(router.broker._thread.isAlive()) +class Dict(dict): pass +class List(list): pass +class Tuple(tuple): pass +class Unicode(mitogen.core.UnicodeType): pass +class Bytes(mitogen.core.BytesType): pass + + +class CastTest(unittest2.TestCase): + def test_dict(self): + self.assertEqual(type(mitogen.utils.cast({})), dict) + self.assertEqual(type(mitogen.utils.cast(Dict())), dict) + + def test_nested_dict(self): + specimen = mitogen.utils.cast(Dict({'k': Dict({'k2': 'v2'})})) + self.assertEqual(type(specimen), dict) + self.assertEqual(type(specimen['k']), dict) + + def test_list(self): + self.assertEqual(type(mitogen.utils.cast([])), list) + self.assertEqual(type(mitogen.utils.cast(List())), list) + + def test_nested_list(self): + specimen = mitogen.utils.cast(List((0, 1, List((None,))))) + self.assertEqual(type(specimen), list) + self.assertEqual(type(specimen[2]), list) + + def test_tuple(self): + self.assertEqual(type(mitogen.utils.cast(())), list) + self.assertEqual(type(mitogen.utils.cast(Tuple())), list) + + def test_nested_tuple(self): + specimen = mitogen.utils.cast(Tuple((0, 1, Tuple((None,))))) + self.assertEqual(type(specimen), list) + self.assertEqual(type(specimen[2]), list) + + def assertUnchanged(self, v): + self.assertIs(mitogen.utils.cast(v), v) + + def test_passthrough(self): + self.assertUnchanged(0) + self.assertUnchanged(0.0) + self.assertUnchanged(float('inf')) + self.assertUnchanged(True) + self.assertUnchanged(False) + self.assertUnchanged(None) + + def test_unicode(self): + self.assertEqual(type(mitogen.utils.cast(u'')), mitogen.core.UnicodeType) + self.assertEqual(type(mitogen.utils.cast(Unicode())), mitogen.core.UnicodeType) + + def test_bytes(self): + self.assertEqual(type(mitogen.utils.cast(b'')), mitogen.core.BytesType) + self.assertEqual(type(mitogen.utils.cast(Bytes())), mitogen.core.BytesType) + + def test_unknown(self): + self.assertRaises(TypeError, mitogen.utils.cast, set()) + self.assertRaises(TypeError, mitogen.utils.cast, 4j) + + if __name__ == '__main__': unittest2.main() diff --git a/tox.ini b/tox.ini index ae761121..6bf8bb53 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,8 @@ envlist = py26, py27, + py35, + py36, [testenv] deps =