diff --git a/.ci/README.md b/.ci/README.md
new file mode 100644
index 00000000..1b9f9dfa
--- /dev/null
+++ b/.ci/README.md
@@ -0,0 +1,44 @@
+
+# `.ci`
+
+This directory contains scripts for Travis CI and (more or less) Azure
+Pipelines, but they will also happily run on any Debian-like machine.
+
+The scripts are usually split into `_install` and `_test` steps. The `_install`
+step will damage your machine, the `_test` step will just run the tests the way
+CI runs them.
+
+There is a common library, `ci_lib.py`, which just centralized a bunch of
+random macros and also environment parsing.
+
+Some of the scripts allow you to pass extra flags through to the component
+under test, e.g. `../../.ci/ansible_tests.py -vvv` will run with verbose.
+
+Hack these scripts until your heart is content. There is no pride to be found
+here, just necessity.
+
+
+### `ci_lib.run_batches()`
+
+There are some weird looking functions to extract more paralellism from the
+build. The above function takes lists of strings, arranging for the strings in
+each list to run in order, but for the lists to run in parallel. That's great
+for doing `setup.py install` while pulling a Docker container, for example.
+
+
+### Environment Variables
+
+* `VER`: Ansible version the `_install` script should install. Default changes
+ over time.
+* `TARGET_COUNT`: number of targets for `debops_` run. Defaults to 2.
+* `DISTRO`: the `mitogen_` tests need a target Docker container distro. This
+ name comes from the Docker Hub `mitogen` user, i.e. `mitogen/$DISTRO-test`
+* `DISTROS`: the `ansible_` tests can run against multiple targets
+ simultaneously, which speeds things up. This is a space-separated list of
+ DISTRO names, but additionally, supports:
+ * `debian-py3`: when generating Ansible inventory file, set
+ `ansible_python_interpreter` to `python3`, i.e. run a test where the
+ target interpreter is Python 3.
+ * `debian*16`: generate 16 Docker containers running Debian. Also works
+ with -py3.
+
diff --git a/.ci/ansible_install.py b/.ci/ansible_install.py
new file mode 100755
index 00000000..86e57096
--- /dev/null
+++ b/.ci/ansible_install.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+import ci_lib
+
+batches = [
+ [
+ # Must be installed separately, as PyNACL indirect requirement causes
+ # newer version to be installed if done in a single pip run.
+ 'pip install "pycparser<2.19" "idna<2.7"',
+ 'pip install '
+ '-r tests/requirements.txt '
+ '-r tests/ansible/requirements.txt',
+ ]
+]
+
+batches.extend(
+ ['docker pull %s' % (ci_lib.image_for_distro(distro),)]
+ for distro in ci_lib.DISTROS
+)
+
+ci_lib.run_batches(batches)
diff --git a/.ci/ansible_tests.py b/.ci/ansible_tests.py
new file mode 100755
index 00000000..98e45ab8
--- /dev/null
+++ b/.ci/ansible_tests.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# Run tests/ansible/all.yml under Ansible and Ansible-Mitogen
+
+import glob
+import os
+import sys
+
+import ci_lib
+from ci_lib import run
+
+
+TESTS_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/ansible')
+HOSTS_DIR = os.path.join(ci_lib.TMP, 'hosts')
+
+
+with ci_lib.Fold('unit_tests'):
+ os.environ['SKIP_MITOGEN'] = '1'
+ ci_lib.run('./run_tests -v')
+
+
+with ci_lib.Fold('docker_setup'):
+ containers = ci_lib.make_containers()
+ ci_lib.start_containers(containers)
+
+
+with ci_lib.Fold('job_setup'):
+ # Don't set -U as that will upgrade Paramiko to a non-2.6 compatible version.
+ run("pip install -q ansible==%s", ci_lib.ANSIBLE_VERSION)
+
+ os.chdir(TESTS_DIR)
+ os.chmod('../data/docker/mitogen__has_sudo_pubkey.key', int('0600', 7))
+
+ run("mkdir %s", HOSTS_DIR)
+ for path in glob.glob(TESTS_DIR + '/hosts/*'):
+ if not path.endswith('default.hosts'):
+ run("ln -s %s %s", path, HOSTS_DIR)
+
+ inventory_path = os.path.join(HOSTS_DIR, 'target')
+ with open(inventory_path, 'w') as fp:
+ fp.write('[test-targets]\n')
+ fp.writelines(
+ "%(name)s "
+ "ansible_host=%(hostname)s "
+ "ansible_port=%(port)s "
+ "ansible_python_interpreter=%(python_path)s "
+ "ansible_user=mitogen__has_sudo_nopw "
+ "ansible_password=has_sudo_nopw_password"
+ "\n"
+ % container
+ for container in containers
+ )
+
+ ci_lib.dump_file(inventory_path)
+
+ if not ci_lib.exists_in_path('sshpass'):
+ run("sudo apt-get update")
+ run("sudo apt-get install -y sshpass")
+
+
+with ci_lib.Fold('ansible'):
+ playbook = os.environ.get('PLAYBOOK', 'all.yml')
+ run('./run_ansible_playbook.py %s -i "%s" %s',
+ playbook, HOSTS_DIR, ' '.join(sys.argv[1:]))
diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml
new file mode 100644
index 00000000..fbbb9640
--- /dev/null
+++ b/.ci/azure-pipelines.yml
@@ -0,0 +1,83 @@
+# Python package
+# Create and test a Python package on multiple Python versions.
+# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
+# https://docs.microsoft.com/azure/devops/pipelines/languages/python
+
+jobs:
+
+- job: 'MitogenTests'
+ pool:
+ vmImage: 'Ubuntu 16.04'
+ strategy:
+ matrix:
+ Mitogen27Debian_27:
+ python.version: '2.7'
+ MODE: mitogen
+ DISTRO: debian
+
+ MitogenPy27CentOS6_26:
+ python.version: '2.7'
+ MODE: mitogen
+ DISTRO: centos6
+
+ #Py26CentOS7:
+ #python.version: '2.7'
+ #MODE: mitogen
+ #DISTRO: centos6
+
+ Mitogen36CentOS6_26:
+ python.version: '3.6'
+ MODE: mitogen
+ DISTRO: centos6
+
+ DebOps_2460_27_27:
+ python.version: '2.7'
+ MODE: debops_common
+ VER: 2.4.6.0
+
+ DebOps_262_36_27:
+ python.version: '3.6'
+ MODE: debops_common
+ VER: 2.6.2
+
+ Ansible_2460_26:
+ python.version: '2.7'
+ MODE: ansible
+ VER: 2.4.6.0
+
+ Ansible_262_26:
+ python.version: '2.7'
+ MODE: ansible
+ VER: 2.6.2
+
+ Ansible_2460_36:
+ python.version: '3.6'
+ MODE: ansible
+ VER: 2.4.6.0
+
+ Ansible_262_36:
+ python.version: '3.6'
+ MODE: ansible
+ VER: 2.6.2
+
+ Vanilla_262_27:
+ python.version: '2.7'
+ MODE: ansible
+ VER: 2.6.2
+ DISTROS: debian
+ STRATEGY: linear
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '$(python.version)'
+ architecture: 'x64'
+
+ - script: .ci/prep_azure.py
+ displayName: "Install requirements."
+
+ - script: .ci/$(MODE)_install.py
+ displayName: "Install requirements."
+
+ - script: .ci/$(MODE)_tests.py
+ displayName: Run tests.
diff --git a/.ci/ci_lib.py b/.ci/ci_lib.py
new file mode 100644
index 00000000..10e9d11e
--- /dev/null
+++ b/.ci/ci_lib.py
@@ -0,0 +1,222 @@
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import atexit
+import os
+import shlex
+import shutil
+import subprocess
+import sys
+import tempfile
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+os.chdir(
+ os.path.join(
+ os.path.dirname(__file__),
+ '..'
+ )
+)
+
+
+#
+# check_output() monkeypatch cutpasted from testlib.py
+#
+
+def subprocess__check_output(*popenargs, **kwargs):
+ # Missing from 2.6.
+ process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
+ output, _ = process.communicate()
+ retcode = process.poll()
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise subprocess.CalledProcessError(retcode, cmd)
+ return output
+
+if not hasattr(subprocess, 'check_output'):
+ subprocess.check_output = subprocess__check_output
+
+
+# -----------------
+
+# Force stdout FD 1 to be a pipe, so tools like pip don't spam progress bars.
+
+if 'TRAVIS_HOME' in os.environ:
+ proc = subprocess.Popen(
+ args=['stdbuf', '-oL', 'cat'],
+ stdin=subprocess.PIPE
+ )
+
+ os.dup2(proc.stdin.fileno(), 1)
+ os.dup2(proc.stdin.fileno(), 2)
+
+ def cleanup_travis_junk(stdout=sys.stdout, stderr=sys.stderr, proc=proc):
+ stdout.close()
+ stderr.close()
+ proc.terminate()
+
+ atexit.register(cleanup_travis_junk)
+
+# -----------------
+
+def _argv(s, *args):
+ if args:
+ s %= args
+ return shlex.split(s)
+
+
+def run(s, *args, **kwargs):
+ argv = ['/usr/bin/time', '--'] + _argv(s, *args)
+ print('Running: %s' % (argv,))
+ ret = subprocess.check_call(argv, **kwargs)
+ print('Finished running: %s' % (argv,))
+ return ret
+
+
+def run_batches(batches):
+ combine = lambda batch: 'set -x; ' + (' && '.join(
+ '( %s; )' % (cmd,)
+ for cmd in batch
+ ))
+
+ procs = [
+ subprocess.Popen(combine(batch), shell=True)
+ for batch in batches
+ ]
+ assert [proc.wait() for proc in procs] == [0] * len(procs)
+
+
+def get_output(s, *args, **kwargs):
+ argv = _argv(s, *args)
+ print('Running: %s' % (argv,))
+ return subprocess.check_output(argv, **kwargs)
+
+
+def exists_in_path(progname):
+ return any(os.path.exists(os.path.join(dirname, progname))
+ for dirname in os.environ['PATH'].split(os.pathsep))
+
+
+class TempDir(object):
+ def __init__(self):
+ self.path = tempfile.mkdtemp(prefix='mitogen_ci_lib')
+ atexit.register(self.destroy)
+
+ def destroy(self, rmtree=shutil.rmtree):
+ rmtree(self.path)
+
+
+class Fold(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __enter__(self):
+ print('travis_fold:start:%s' % (self.name))
+
+ def __exit__(self, _1, _2, _3):
+ print('')
+ print('travis_fold:end:%s' % (self.name))
+
+
+os.environ.setdefault('ANSIBLE_STRATEGY',
+ os.environ.get('STRATEGY', 'mitogen_linear'))
+ANSIBLE_VERSION = os.environ.get('VER', '2.6.2')
+GIT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+DISTRO = os.environ.get('DISTRO', 'debian')
+DISTROS = os.environ.get('DISTROS', 'debian centos6 centos7').split()
+TARGET_COUNT = int(os.environ.get('TARGET_COUNT', '2'))
+BASE_PORT = 2200
+TMP = TempDir().path
+
+os.environ['PYTHONDONTWRITEBYTECODE'] = 'x'
+os.environ['PYTHONPATH'] = '%s:%s' % (
+ os.environ.get('PYTHONPATH', ''),
+ GIT_ROOT
+)
+
+def get_docker_hostname():
+ url = os.environ.get('DOCKER_HOST')
+ if url in (None, 'http+docker://localunixsocket'):
+ return 'localhost'
+
+ parsed = urlparse.urlparse(url)
+ return parsed.netloc.partition(':')[0]
+
+
+def image_for_distro(distro):
+ return 'mitogen/%s-test' % (distro.partition('-')[0],)
+
+
+def make_containers():
+ docker_hostname = get_docker_hostname()
+ firstbit = lambda s: (s+'-').split('-')[0]
+ secondbit = lambda s: (s+'-').split('-')[1]
+
+ i = 1
+ lst = []
+
+ for distro in DISTROS:
+ distro, star, count = distro.partition('*')
+ if star:
+ count = int(count)
+ else:
+ count = 1
+
+ for x in range(count):
+ lst.append({
+ "distro": firstbit(distro),
+ "name": "target-%s-%s" % (distro, i),
+ "hostname": docker_hostname,
+ "port": BASE_PORT + i,
+ "python_path": (
+ '/usr/bin/python3'
+ if secondbit(distro) == 'py3'
+ else '/usr/bin/python'
+ )
+ })
+ i += 1
+
+ return lst
+
+
+def start_containers(containers):
+ if os.environ.get('KEEP'):
+ return
+
+ run_batches([
+ [
+ "docker rm -f %(name)s || true" % container,
+ "docker run "
+ "--rm "
+ "--detach "
+ "--publish 0.0.0.0:%(port)s:22/tcp "
+ "--hostname=%(name)s "
+ "--name=%(name)s "
+ "mitogen/%(distro)s-test "
+ % container
+ ]
+ for container in containers
+ ])
+ return containers
+
+
+def dump_file(path):
+ print()
+ print('--- %s ---' % (path,))
+ print()
+ with open(path, 'r') as fp:
+ print(fp.read().rstrip())
+ print('---')
+ print()
+
+
+# SSH passes these through to the container when run interactively, causing
+# stdout to get messed up with libc warnings.
+os.environ.pop('LANG', None)
+os.environ.pop('LC_ALL', None)
diff --git a/.ci/debops_common_install.py b/.ci/debops_common_install.py
new file mode 100755
index 00000000..32241449
--- /dev/null
+++ b/.ci/debops_common_install.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+import ci_lib
+
+# Naturally DebOps only supports Debian.
+ci_lib.DISTROS = ['debian']
+
+ci_lib.run_batches([
+ [
+ # Must be installed separately, as PyNACL indirect requirement causes
+ # newer version to be installed if done in a single pip run.
+ 'pip install "pycparser<2.19"',
+ 'pip install -qqqU debops==0.7.2 ansible==%s' % ci_lib.ANSIBLE_VERSION,
+ ],
+ [
+ 'docker pull %s' % (ci_lib.image_for_distro('debian'),),
+ ],
+])
diff --git a/.ci/debops_common_tests.py b/.ci/debops_common_tests.py
new file mode 100755
index 00000000..8e9f2953
--- /dev/null
+++ b/.ci/debops_common_tests.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+import os
+
+import ci_lib
+
+
+# DebOps only supports Debian.
+ci_lib.DISTROS = ['debian'] * ci_lib.TARGET_COUNT
+
+project_dir = os.path.join(ci_lib.TMP, 'project')
+key_file = os.path.join(
+ ci_lib.GIT_ROOT,
+ 'tests/data/docker/mitogen__has_sudo_pubkey.key',
+)
+vars_path = 'ansible/inventory/group_vars/debops_all_hosts.yml'
+inventory_path = 'ansible/inventory/hosts'
+docker_hostname = ci_lib.get_docker_hostname()
+
+
+with ci_lib.Fold('docker_setup'):
+ containers = ci_lib.make_containers()
+ ci_lib.start_containers(containers)
+
+
+with ci_lib.Fold('job_setup'):
+ ci_lib.run('debops-init %s', project_dir)
+ os.chdir(project_dir)
+
+ with open('.debops.cfg', 'w') as fp:
+ fp.write(
+ "[ansible defaults]\n"
+ "strategy_plugins = %s/ansible_mitogen/plugins/strategy\n"
+ "strategy = mitogen_linear\n"
+ % (ci_lib.GIT_ROOT,)
+ )
+
+ ci_lib.run('chmod go= %s', key_file)
+ with open(vars_path, 'w') as fp:
+ fp.write(
+ "ansible_python_interpreter: /usr/bin/python2.7\n"
+ "\n"
+ "ansible_user: mitogen__has_sudo_pubkey\n"
+ "ansible_become_pass: has_sudo_pubkey_password\n"
+ "ansible_ssh_private_key_file: %s\n"
+ "\n"
+ # Speed up slow DH generation.
+ "dhparam__bits: ['128', '64']\n"
+ % (key_file,)
+ )
+
+ with open(inventory_path, 'a') as fp:
+ fp.writelines(
+ '%(name)s '
+ 'ansible_host=%(hostname)s '
+ 'ansible_port=%(port)d '
+ 'ansible_python_interpreter=%(python_path)s '
+ '\n'
+ % container
+ for container in containers
+ )
+
+ print()
+ print(' echo --- ansible/inventory/hosts: ---')
+ ci_lib.run('cat ansible/inventory/hosts')
+ print('---')
+ print()
+
+ # Now we have real host key checking, we need to turn it off
+ os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+
+
+with ci_lib.Fold('first_run'):
+ ci_lib.run('debops common')
+
+
+with ci_lib.Fold('second_run'):
+ ci_lib.run('debops common')
diff --git a/.ci/mitogen_install.py b/.ci/mitogen_install.py
new file mode 100755
index 00000000..10813b55
--- /dev/null
+++ b/.ci/mitogen_install.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+import ci_lib
+
+batches = [
+ [
+ 'pip install "pycparser<2.19" "idna<2.7"',
+ 'pip install -r tests/requirements.txt',
+ ],
+ [
+ 'docker pull %s' % (ci_lib.image_for_distro(ci_lib.DISTRO),),
+ ]
+]
+
+ci_lib.run_batches(batches)
diff --git a/.ci/mitogen_py24_install.py b/.ci/mitogen_py24_install.py
new file mode 100755
index 00000000..97370806
--- /dev/null
+++ b/.ci/mitogen_py24_install.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+import ci_lib
+
+batches = [
+ [
+ 'docker pull %s' % (ci_lib.image_for_distro(ci_lib.DISTRO),),
+ ],
+ [
+ 'sudo tar -C / -jxvf tests/data/ubuntu-python-2.4.6.tar.bz2',
+ ]
+]
+
+ci_lib.run_batches(batches)
diff --git a/.ci/mitogen_py24_tests.py b/.ci/mitogen_py24_tests.py
new file mode 100755
index 00000000..228e79bd
--- /dev/null
+++ b/.ci/mitogen_py24_tests.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+# Mitogen tests for Python 2.4.
+
+import os
+
+import ci_lib
+
+os.environ.update({
+ 'NOCOVERAGE': '1',
+ 'UNIT2': '/usr/local/python2.4.6/bin/unit2',
+
+ 'MITOGEN_TEST_DISTRO': ci_lib.DISTRO,
+ 'MITOGEN_LOG_LEVEL': 'debug',
+ 'SKIP_ANSIBLE': '1',
+})
+
+ci_lib.run('./run_tests -v')
diff --git a/.ci/mitogen_tests.py b/.ci/mitogen_tests.py
new file mode 100755
index 00000000..4ba796c2
--- /dev/null
+++ b/.ci/mitogen_tests.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+# Run the Mitogen tests.
+
+import os
+
+import ci_lib
+
+os.environ.update({
+ 'MITOGEN_TEST_DISTRO': ci_lib.DISTRO,
+ 'MITOGEN_LOG_LEVEL': 'debug',
+ 'SKIP_ANSIBLE': '1',
+})
+
+ci_lib.run('./run_tests -v')
diff --git a/.ci/prep_azure.py b/.ci/prep_azure.py
new file mode 100755
index 00000000..10126df2
--- /dev/null
+++ b/.ci/prep_azure.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+import ci_lib
+
+batches = []
+batches.append([
+ 'echo force-unsafe-io | sudo tee /etc/dpkg/dpkg.cfg.d/nosync',
+ 'sudo add-apt-repository ppa:deadsnakes/ppa',
+ 'sudo apt-get update',
+ 'sudo apt-get -y install python2.6 python2.6-dev libsasl2-dev libldap2-dev',
+])
+
+batches.append([
+ 'pip install -r dev_requirements.txt',
+])
+
+batches.extend(
+ ['docker pull %s' % (ci_lib.image_for_distro(distro),)]
+ for distro in ci_lib.DISTROS
+)
+
+ci_lib.run_batches(batches)
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index b73eae64..f1dc425c 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,15 +1,21 @@
+Please drag-drop large logs as text file attachments.
+
Feel free to write an issue in your preferred format, however if in doubt, use
the following checklist as a guide for what to include.
* Have you tried the latest master version from Git?
+* Do you have some idea of what the underlying problem may be?
+ https://mitogen.rtfd.io/en/stable/ansible.html#common-problems has
+ instructions to help figure out the likely cause and how to gather relevant
+ logs.
* Mention your host and target OS and versions
* Mention your host and target Python versions
* If reporting a performance issue, mention the number of targets and a rough
description of your workload (lots of copies, lots of tiny file edits, etc.)
-* If reporting a crash or hang in Ansible, please rerun with -vvvv and include
- the last 200 lines of output, along with a full copy of any traceback or
- error text in the log. Beware "-vvvv" may include secret data! Edit as
- necessary before posting.
+* If reporting a crash or hang in Ansible, please rerun with -vvv and include
+ 200 lines of output around the point of the error, along with a full copy of
+ any traceback or error text in the log. Beware "-vvv" may include secret
+ data! Edit as necessary before posting.
* If reporting any kind of problem with Ansible, please include the Ansible
version along with output of "ansible-config dump --only-changed".
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..116b0c79
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,16 @@
+
+Thanks for creating a PR! Here's a quick checklist to pay attention to:
+
+* Please add an entry to docs/changelog.rst as appropriate.
+
+* Has some new parameter been added or semantics modified somehow? Please
+ ensure relevant documentation is updated in docs/ansible.rst and
+ docs/api.rst.
+
+* If it's for new functionality, is there at least a basic test in either
+ tests/ or tests/ansible/ covering it?
+
+* If it's for a new connection method, please try to stub out the
+ implementation as in tests/data/stubs/, so that construction can be tested
+ without having a working configuration.
+
diff --git a/.gitignore b/.gitignore
index 6092d04e..e244ca12 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
.coverage
.tox
.venv
+venvs/**
**/.DS_Store
*.pyc
*.pyd
diff --git a/.travis.yml b/.travis.yml
index 5dfdae00..aee14c00 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,10 +1,8 @@
sudo: required
-addons:
- apt:
- update: true
notifications:
email: false
+ irc: "chat.freenode.net#mitogen-builds"
language: python
@@ -14,19 +12,10 @@ cache:
- /home/travis/virtualenv
install:
-- pip install -r dev_requirements.txt
+- .ci/${MODE}_install.py
script:
-- |
- if [ -f "${TRAVIS_BUILD_DIR}/.travis/${MODE}_tests.sh" ]; then
- ${TRAVIS_BUILD_DIR}/.travis/${MODE}_tests.sh;
- else
- ${TRAVIS_BUILD_DIR}/.travis/${MODE}_tests.py;
- fi
-
-
-services:
- - docker
+- .ci/${MODE}_tests.py
# To avoid matrix explosion, just test against oldest->newest and
@@ -35,15 +24,21 @@ services:
matrix:
include:
# Mitogen tests.
+ # 2.4 -> 2.4
+ - language: c
+ env: MODE=mitogen_py24 DISTRO=centos5
# 2.7 -> 2.7
- python: "2.7"
env: MODE=mitogen DISTRO=debian
# 2.7 -> 2.6
- - python: "2.7"
- env: MODE=mitogen DISTRO=centos6
+ #- python: "2.7"
+ #env: MODE=mitogen DISTRO=centos6
# 2.6 -> 2.7
- python: "2.6"
env: MODE=mitogen DISTRO=centos7
+ # 2.6 -> 3.5
+ - python: "2.6"
+ env: MODE=mitogen DISTRO=debian-py3
# 3.6 -> 2.6
- python: "3.6"
env: MODE=mitogen DISTRO=centos6
@@ -58,6 +53,10 @@ matrix:
# ansible_mitogen tests.
+ # 2.3 -> {centos5}
+ - python: "2.6"
+ env: MODE=ansible VER=2.3.3.0 DISTROS=centos5
+
# 2.6 -> {debian, centos6, centos7}
- python: "2.6"
env: MODE=ansible VER=2.4.6.0
diff --git a/.travis/ansible_tests.py b/.travis/ansible_tests.py
deleted file mode 100755
index 3b5e40db..00000000
--- a/.travis/ansible_tests.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-# Run tests/ansible/all.yml under Ansible and Ansible-Mitogen
-
-import os
-import sys
-
-import ci_lib
-from ci_lib import run
-
-
-BASE_PORT = 2201
-TESTS_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/ansible')
-HOSTS_DIR = os.path.join(ci_lib.TMP, 'hosts')
-
-
-with ci_lib.Fold('docker_setup'):
- for i, distro in enumerate(ci_lib.DISTROS):
- try:
- run("docker rm -f target-%s", distro)
- except: pass
-
- run("""
- docker run
- --rm
- --detach
- --publish 0.0.0.0:%s:22/tcp
- --hostname=target-%s
- --name=target-%s
- mitogen/%s-test
- """, BASE_PORT + i, distro, distro, distro)
-
-
-with ci_lib.Fold('job_setup'):
- os.chdir(TESTS_DIR)
- os.chmod('../data/docker/mitogen__has_sudo_pubkey.key', int('0600', 7))
-
- # Don't set -U as that will upgrade Paramiko to a non-2.6 compatible version.
- run("pip install -q ansible==%s", ci_lib.ANSIBLE_VERSION)
-
- run("mkdir %s", HOSTS_DIR)
- run("ln -s %s/hosts/common-hosts %s", TESTS_DIR, HOSTS_DIR)
-
- with open(os.path.join(HOSTS_DIR, 'target'), 'w') as fp:
- fp.write('[test-targets]\n')
- for i, distro in enumerate(ci_lib.DISTROS):
- fp.write("target-%s "
- "ansible_host=%s "
- "ansible_port=%s "
- "ansible_user=mitogen__has_sudo_nopw "
- "ansible_password=has_sudo_nopw_password"
- "\n" % (
- distro,
- ci_lib.DOCKER_HOSTNAME,
- BASE_PORT + i,
- ))
-
- # Build the binaries.
- # run("make -C %s", TESTS_DIR)
- if not ci_lib.exists_in_path('sshpass'):
- run("sudo apt-get update")
- run("sudo apt-get install -y sshpass")
-
-
-with ci_lib.Fold('ansible'):
- run('/usr/bin/time ./run_ansible_playbook.sh all.yml -i "%s" %s',
- HOSTS_DIR, ' '.join(sys.argv[1:]))
diff --git a/.travis/ci_lib.py b/.travis/ci_lib.py
deleted file mode 100644
index eb130a14..00000000
--- a/.travis/ci_lib.py
+++ /dev/null
@@ -1,102 +0,0 @@
-
-from __future__ import absolute_import
-from __future__ import print_function
-
-import atexit
-import os
-import subprocess
-import sys
-import shlex
-import shutil
-import tempfile
-
-import os
-os.system('curl -H Metadata-Flavor:Google http://metadata.google.internal/computeMetadata/v1/instance/machine-type')
-
-#
-# check_output() monkeypatch cutpasted from testlib.py
-#
-
-def subprocess__check_output(*popenargs, **kwargs):
- # Missing from 2.6.
- process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
- output, _ = process.communicate()
- retcode = process.poll()
- if retcode:
- cmd = kwargs.get("args")
- if cmd is None:
- cmd = popenargs[0]
- raise subprocess.CalledProcessError(retcode, cmd)
- return output
-
-if not hasattr(subprocess, 'check_output'):
- subprocess.check_output = subprocess__check_output
-
-# -----------------
-
-def _argv(s, *args):
- if args:
- s %= args
- return shlex.split(s)
-
-
-def run(s, *args, **kwargs):
- argv = _argv(s, *args)
- print('Running: %s' % (argv,))
- return subprocess.check_call(argv, **kwargs)
-
-
-def get_output(s, *args, **kwargs):
- argv = _argv(s, *args)
- print('Running: %s' % (argv,))
- return subprocess.check_output(argv, **kwargs)
-
-
-def exists_in_path(progname):
- return any(os.path.exists(os.path.join(dirname, progname))
- for dirname in os.environ['PATH'].split(os.pathsep))
-
-
-class TempDir(object):
- def __init__(self):
- self.path = tempfile.mkdtemp(prefix='mitogen_ci_lib')
- atexit.register(self.destroy)
-
- def destroy(self, rmtree=shutil.rmtree):
- rmtree(self.path)
-
-
-class Fold(object):
- def __init__(self, name):
- self.name = name
-
- def __enter__(self):
- print('travis_fold:start:%s' % (self.name))
-
- def __exit__(self, _1, _2, _3):
- print('')
- print('travis_fold:end:%s' % (self.name))
-
-
-os.environ.setdefault('ANSIBLE_STRATEGY',
- os.environ.get('STRATEGY', 'mitogen_linear'))
-ANSIBLE_VERSION = os.environ.get('VER', '2.6.2')
-GIT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
-DISTROS = os.environ.get('DISTROS', 'debian centos6 centos7').split()
-TMP = TempDir().path
-
-os.environ['PYTHONDONTWRITEBYTECODE'] = 'x'
-os.environ['PYTHONPATH'] = '%s:%s' % (
- os.environ.get('PYTHONPATH', ''),
- GIT_ROOT
-)
-
-DOCKER_HOSTNAME = subprocess.check_output([
- sys.executable,
- os.path.join(GIT_ROOT, 'tests/show_docker_hostname.py'),
-]).decode().strip()
-
-# SSH passes these through to the container when run interactively, causing
-# stdout to get messed up with libc warnings.
-os.environ.pop('LANG', None)
-os.environ.pop('LC_ALL', None)
diff --git a/.travis/debops_common_tests.sh b/.travis/debops_common_tests.sh
deleted file mode 100755
index 50e67ada..00000000
--- a/.travis/debops_common_tests.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash -ex
-# Run some invocations of DebOps.
-
-TMPDIR="/tmp/debops-$$"
-TRAVIS_BUILD_DIR="${TRAVIS_BUILD_DIR:-`pwd`}"
-TARGET_COUNT="${TARGET_COUNT:-2}"
-ANSIBLE_VERSION="${VER:-2.6.1}"
-DISTRO=debian # Naturally DebOps only supports Debian.
-
-export PYTHONPATH="${PYTHONPATH}:${TRAVIS_BUILD_DIR}"
-
-function on_exit()
-{
- echo travis_fold:start:cleanup
- [ "$KEEP" ] || {
- rm -rf "$TMPDIR" || true
- for i in $(seq $TARGET_COUNT)
- do
- docker kill target$i || true
- done
- }
- echo travis_fold:end:cleanup
-}
-
-trap on_exit EXIT
-mkdir "$TMPDIR"
-
-
-echo travis_fold:start:job_setup
-pip install -qqqU debops==0.7.2 ansible==${ANSIBLE_VERSION}
-debops-init "$TMPDIR/project"
-cd "$TMPDIR/project"
-
-cat > .debops.cfg <<-EOF
-[ansible defaults]
-strategy_plugins = ${TRAVIS_BUILD_DIR}/ansible_mitogen/plugins/strategy
-strategy = mitogen_linear
-EOF
-
-chmod go= ${TRAVIS_BUILD_DIR}/tests/data/docker/mitogen__has_sudo_pubkey.key
-
-cat > ansible/inventory/group_vars/debops_all_hosts.yml <<-EOF
-ansible_python_interpreter: /usr/bin/python2.7
-
-ansible_user: mitogen__has_sudo_pubkey
-ansible_become_pass: has_sudo_pubkey_password
-ansible_ssh_private_key_file: ${TRAVIS_BUILD_DIR}/tests/data/docker/mitogen__has_sudo_pubkey.key
-
-# Speed up slow DH generation.
-dhparam__bits: ["128", "64"]
-EOF
-
-DOCKER_HOSTNAME="$(python ${TRAVIS_BUILD_DIR}/tests/show_docker_hostname.py)"
-
-for i in $(seq $TARGET_COUNT)
-do
- port=$((2200 + $i))
- docker run \
- --rm \
- --detach \
- --publish 0.0.0.0:$port:22/tcp \
- --name=target$i \
- mitogen/${DISTRO}-test
-
- echo \
- target$i \
- ansible_host=$DOCKER_HOSTNAME \
- ansible_port=$port \
- >> ansible/inventory/hosts
-done
-
-echo
-echo --- ansible/inventory/hosts: ----
-cat ansible/inventory/hosts
-echo ---
-
-# Now we have real host key checking, we need to turn it off. :)
-export ANSIBLE_HOST_KEY_CHECKING=False
-
-echo travis_fold:end:job_setup
-
-
-echo travis_fold:start:first_run
-/usr/bin/time debops common "$@"
-echo travis_fold:end:first_run
-
-
-echo travis_fold:start:second_run
-/usr/bin/time debops common "$@"
-echo travis_fold:end:second_run
diff --git a/.travis/mitogen_tests.sh b/.travis/mitogen_tests.sh
deleted file mode 100755
index db393d73..00000000
--- a/.travis/mitogen_tests.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash -ex
-# Run the Mitogen tests.
-
-MITOGEN_TEST_DISTRO="${DISTRO:-debian}"
-MITOGEN_LOG_LEVEL=debug PYTHONPATH=. ${TRAVIS_BUILD_DIR}/run_tests -vvv
diff --git a/README.md b/README.md
index 979afc66..5ef2447f 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,13 @@
# Mitogen
+
Please see the documentation.
+
+![](https://i.imgur.com/eBM6LhJ.gif)
+
+[![Total alerts](https://img.shields.io/lgtm/alerts/g/dw/mitogen.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/dw/mitogen/alerts/)
+
+[![Build Status](https://travis-ci.org/dw/mitogen.svg?branch=master)](https://travis-ci.org/dw/mitogen)
+
+[![Pipelines Status](https://dev.azure.com/dw-mitogen/Mitogen/_apis/build/status/dw.mitogen?branchName=master)](https://dev.azure.com/dw-mitogen/Mitogen/_build/latest?definitionId=1?branchName=master)
diff --git a/ansible_mitogen/affinity.py b/ansible_mitogen/affinity.py
new file mode 100644
index 00000000..d7ae45a6
--- /dev/null
+++ b/ansible_mitogen/affinity.py
@@ -0,0 +1,241 @@
+# Copyright 2017, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+As Mitogen separates asynchronous IO out to a broker thread, communication
+necessarily involves context switching and waking that thread. When application
+threads and the broker share a CPU, this can be almost invisibly fast - around
+25 microseconds for a full A->B->A round-trip.
+
+However when threads are scheduled on different CPUs, round-trip delays
+regularly vary wildly, and easily into milliseconds. Many contributing factors
+exist, not least scenarios like:
+
+1. A is preempted immediately after waking B, but before releasing the GIL.
+2. B wakes from IO wait only to immediately enter futex wait.
+3. A may wait 10ms or more for another timeslice, as the scheduler on its CPU
+ runs threads unrelated to its transaction (i.e. not B), wake only to release
+ its GIL, before entering IO sleep waiting for a reply from B, which cannot
+ exist yet.
+4. B wakes, acquires GIL, performs work, and sends reply to A, causing it to
+ wake. B is preempted before releasing GIL.
+5. A wakes from IO wait only to immediately enter futex wait.
+6. B may wait 10ms or more for another timeslice, wake only to release its GIL,
+ before sleeping again.
+7. A wakes, acquires GIL, finally receives reply.
+
+Per above if we are unlucky, on an even moderately busy machine it is possible
+to lose milliseconds just in scheduling delay, and the effect is compounded
+when pairs of threads in process A are communicating with pairs of threads in
+process B using the same scheme, such as when Ansible WorkerProcess is
+communicating with ContextService in the connection multiplexer. In the worst
+case it could involve 4 threads working in lockstep spread across 4 busy CPUs.
+
+Since multithreading in Python is essentially useless except for waiting on IO
+due to the presence of the GIL, at least in Ansible there is no good reason for
+threads in the same process to run on distinct CPUs - they always operate in
+lockstep due to the GIL, and are thus vulnerable to issues like above.
+
+Linux lacks any natural API to describe what we want, it only permits
+individual threads to be constrained to run on specific CPUs, and for that
+constraint to be inherited by new threads and forks of the constrained thread.
+
+This module therefore implements a CPU pinning policy for Ansible processes,
+providing methods that should be called early in any new process, either to
+rebalance which CPU it is pinned to, or in the case of subprocesses, to remove
+the pinning entirely. It is likely to require ongoing tweaking, since pinning
+necessarily involves preventing the scheduler from making load balancing
+decisions.
+"""
+
+import ctypes
+import mmap
+import multiprocessing
+import os
+import struct
+
+import mitogen.parent
+
+
+try:
+ _libc = ctypes.CDLL(None, use_errno=True)
+ _strerror = _libc.strerror
+ _strerror.restype = ctypes.c_char_p
+ _pthread_mutex_init = _libc.pthread_mutex_init
+ _pthread_mutex_lock = _libc.pthread_mutex_lock
+ _pthread_mutex_unlock = _libc.pthread_mutex_unlock
+ _sched_setaffinity = _libc.sched_setaffinity
+except (OSError, AttributeError):
+ _libc = None
+ _strerror = None
+ _pthread_mutex_init = None
+ _pthread_mutex_lock = None
+ _pthread_mutex_unlock = None
+ _sched_setaffinity = None
+
+
+class pthread_mutex_t(ctypes.Structure):
+ """
+ Wrap pthread_mutex_t to allow storing a lock in shared memory.
+ """
+ _fields_ = [
+ ('data', ctypes.c_uint8 * 512),
+ ]
+
+ def init(self):
+ if _pthread_mutex_init(self.data, 0):
+ raise Exception(_strerror(ctypes.get_errno()))
+
+ def acquire(self):
+ if _pthread_mutex_lock(self.data):
+ raise Exception(_strerror(ctypes.get_errno()))
+
+ def release(self):
+ if _pthread_mutex_unlock(self.data):
+ raise Exception(_strerror(ctypes.get_errno()))
+
+
+class State(ctypes.Structure):
+ """
+ Contents of shared memory segment. This allows :meth:`Manager.assign` to be
+ called from any child, since affinity assignment must happen from within
+ the context of the new child process.
+ """
+ _fields_ = [
+ ('lock', pthread_mutex_t),
+ ('counter', ctypes.c_uint8),
+ ]
+
+
+class Policy(object):
+ """
+ Process affinity policy.
+ """
+ def assign_controller(self):
+ """
+ Assign the Ansible top-level policy to this process.
+ """
+
+ def assign_muxprocess(self):
+ """
+ Assign the MuxProcess policy to this process.
+ """
+
+ def assign_worker(self):
+ """
+ Assign the WorkerProcess policy to this process.
+ """
+
+ def assign_subprocess(self):
+ """
+ Assign the helper subprocess policy to this process.
+ """
+
+
+class LinuxPolicy(Policy):
+ """
+ :class:`Policy` for Linux machines. The scheme here was tested on an
+ otherwise idle 16 thread machine.
+
+ - The connection multiplexer is pinned to CPU 0.
+ - The Ansible top-level (strategy) is pinned to CPU 1.
+ - WorkerProcesses are pinned sequentually to 2..N, wrapping around when no
+ more CPUs exist.
+ - Children such as SSH may be scheduled on any CPU except 0/1.
+
+ If the machine has less than 4 cores available, the top-level and workers
+ are pinned between CPU 2..N, i.e. no CPU is reserved for the top-level
+ process.
+
+ This could at least be improved by having workers pinned to independent
+ cores, before reusing the second hyperthread of an existing core.
+
+ A hook is installed that causes :meth:`reset` to run in the child of any
+ process created with :func:`mitogen.parent.detach_popen`, ensuring
+ CPU-intensive children like SSH are not forced to share the same core as
+ the (otherwise potentially very busy) parent.
+ """
+ def __init__(self):
+ self.mem = mmap.mmap(-1, 4096)
+ self.state = State.from_buffer(self.mem)
+ self.state.lock.init()
+ if self._cpu_count() < 4:
+ self._reserve_mask = 3
+ self._reserve_shift = 2
+ self._reserve_controller = True
+ else:
+ self._reserve_mask = 1
+ self._reserve_shift = 1
+ self._reserve_controller = False
+
+ def _set_affinity(self, mask):
+ mitogen.parent._preexec_hook = self._clear
+ s = struct.pack('L', mask)
+ _sched_setaffinity(os.getpid(), len(s), s)
+
+ def _cpu_count(self):
+ return multiprocessing.cpu_count()
+
+ def _balance(self):
+ self.state.lock.acquire()
+ try:
+ n = self.state.counter
+ self.state.counter += 1
+ finally:
+ self.state.lock.release()
+
+ self._set_cpu(self._reserve_shift + (
+ (n % max(1, (self._cpu_count() - self._reserve_shift)))
+ ))
+
+ def _set_cpu(self, cpu):
+ self._set_affinity(1 << cpu)
+
+ def _clear(self):
+ self._set_affinity(0xffffffff & ~self._reserve_mask)
+
+ def assign_controller(self):
+ if self._reserve_controller:
+ self._set_cpu(1)
+ else:
+ self._balance()
+
+ def assign_muxprocess(self):
+ self._set_cpu(0)
+
+ def assign_worker(self):
+ self._balance()
+
+ def assign_subprocess(self):
+ self._clear()
+
+
+if _sched_setaffinity is not None:
+ policy = LinuxPolicy()
+else:
+ policy = Policy()
diff --git a/ansible_mitogen/compat/simplejson/__init__.py b/ansible_mitogen/compat/simplejson/__init__.py
new file mode 100644
index 00000000..d5b4d399
--- /dev/null
+++ b/ansible_mitogen/compat/simplejson/__init__.py
@@ -0,0 +1,318 @@
+r"""JSON (JavaScript Object Notation) is a subset of
+JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
+interchange format.
+
+:mod:`simplejson` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
+
+Encoding basic Python object hierarchies::
+
+ >>> import simplejson as json
+ >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
+ '["foo", {"bar": ["baz", null, 1.0, 2]}]'
+ >>> print json.dumps("\"foo\bar")
+ "\"foo\bar"
+ >>> print json.dumps(u'\u1234')
+ "\u1234"
+ >>> print json.dumps('\\')
+ "\\"
+ >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
+ {"a": 0, "b": 0, "c": 0}
+ >>> from StringIO import StringIO
+ >>> io = StringIO()
+ >>> json.dump(['streaming API'], io)
+ >>> io.getvalue()
+ '["streaming API"]'
+
+Compact encoding::
+
+ >>> import simplejson as json
+ >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
+ '[1,2,3,{"4":5,"6":7}]'
+
+Pretty printing::
+
+ >>> import simplejson as json
+ >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
+ >>> print '\n'.join([l.rstrip() for l in s.splitlines()])
+ {
+ "4": 5,
+ "6": 7
+ }
+
+Decoding JSON::
+
+ >>> import simplejson as json
+ >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
+ >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+ True
+ >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
+ True
+ >>> from StringIO import StringIO
+ >>> io = StringIO('["streaming API"]')
+ >>> json.load(io)[0] == 'streaming API'
+ True
+
+Specializing JSON object decoding::
+
+ >>> import simplejson as json
+ >>> def as_complex(dct):
+ ... if '__complex__' in dct:
+ ... return complex(dct['real'], dct['imag'])
+ ... return dct
+ ...
+ >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
+ ... object_hook=as_complex)
+ (1+2j)
+ >>> import decimal
+ >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
+ True
+
+Specializing JSON object encoding::
+
+ >>> import simplejson as json
+ >>> def encode_complex(obj):
+ ... if isinstance(obj, complex):
+ ... return [obj.real, obj.imag]
+ ... raise TypeError(repr(o) + " is not JSON serializable")
+ ...
+ >>> json.dumps(2 + 1j, default=encode_complex)
+ '[2.0, 1.0]'
+ >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
+ '[2.0, 1.0]'
+ >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
+ '[2.0, 1.0]'
+
+
+Using simplejson.tool from the shell to validate and pretty-print::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+"""
+__version__ = '2.0.9'
+__all__ = [
+ 'dump', 'dumps', 'load', 'loads',
+ 'JSONDecoder', 'JSONEncoder',
+]
+
+__author__ = 'Bob Ippolito '
+
+from decoder import JSONDecoder
+from encoder import JSONEncoder
+
+_default_encoder = JSONEncoder(
+ skipkeys=False,
+ ensure_ascii=True,
+ check_circular=True,
+ allow_nan=True,
+ indent=None,
+ separators=None,
+ encoding='utf-8',
+ default=None,
+)
+
+def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, **kw):
+ """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
+ ``.write()``-supporting file-like object).
+
+ If ``skipkeys`` is true then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the some chunks written to ``fp``
+ may be ``unicode`` instances, subject to normal Python ``str`` to
+ ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
+ understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
+ to cause an error.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
+ in strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If ``indent`` is a non-negative integer, then JSON array elements and object
+ members will be pretty-printed with that indent level. An indent level
+ of 0 will only insert newlines. ``None`` is the most compact representation.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and not kw):
+ iterable = _default_encoder.iterencode(obj)
+ else:
+ if cls is None:
+ cls = JSONEncoder
+ iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding,
+ default=default, **kw).iterencode(obj)
+ # could accelerate with writelines in some versions of Python, at
+ # a debuggability cost
+ for chunk in iterable:
+ fp.write(chunk)
+
+
+def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, **kw):
+ """Serialize ``obj`` to a JSON formatted ``str``.
+
+ If ``skipkeys`` is false then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the return value will be a
+ ``unicode`` instance subject to normal Python ``str`` to ``unicode``
+ coercion rules instead of being escaped to an ASCII ``str``.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
+ strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If ``indent`` is a non-negative integer, then JSON array elements and
+ object members will be pretty-printed with that indent level. An indent
+ level of 0 will only insert newlines. ``None`` is the most compact
+ representation.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and not kw):
+ return _default_encoder.encode(obj)
+ if cls is None:
+ cls = JSONEncoder
+ return cls(
+ skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding, default=default,
+ **kw).encode(obj)
+
+
+_default_decoder = JSONDecoder(encoding=None, object_hook=None)
+
+
+def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, **kw):
+ """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+ a JSON document) to a Python object.
+
+ If the contents of ``fp`` is encoded with an ASCII based encoding other
+ than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
+ be specified. Encodings that are not ASCII based (such as UCS-2) are
+ not allowed, and should be wrapped with
+ ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
+ object and passed to ``loads()``
+
+ ``object_hook`` is an optional function that will be called with the
+ result of any object literal decode (a ``dict``). The return value of
+ ``object_hook`` will be used instead of the ``dict``. This feature
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ return loads(fp.read(),
+ encoding=encoding, cls=cls, object_hook=object_hook,
+ parse_float=parse_float, parse_int=parse_int,
+ parse_constant=parse_constant, **kw)
+
+
+def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, **kw):
+ """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
+ document) to a Python object.
+
+ If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
+ other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
+ must be specified. Encodings that are not ASCII based (such as UCS-2)
+ are not allowed and should be decoded to ``unicode`` first.
+
+ ``object_hook`` is an optional function that will be called with the
+ result of any object literal decode (a ``dict``). The return value of
+ ``object_hook`` will be used instead of the ``dict``. This feature
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+ ``parse_float``, if specified, will be called with the string
+ of every JSON float to be decoded. By default this is equivalent to
+ float(num_str). This can be used to use another datatype or parser
+ for JSON floats (e.g. decimal.Decimal).
+
+ ``parse_int``, if specified, will be called with the string
+ of every JSON int to be decoded. By default this is equivalent to
+ int(num_str). This can be used to use another datatype or parser
+ for JSON integers (e.g. float).
+
+ ``parse_constant``, if specified, will be called with one of the
+ following strings: -Infinity, Infinity, NaN, null, true, false.
+ This can be used to raise an exception if invalid JSON numbers
+ are encountered.
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ if (cls is None and encoding is None and object_hook is None and
+ parse_int is None and parse_float is None and
+ parse_constant is None and not kw):
+ return _default_decoder.decode(s)
+ if cls is None:
+ cls = JSONDecoder
+ if object_hook is not None:
+ kw['object_hook'] = object_hook
+ if parse_float is not None:
+ kw['parse_float'] = parse_float
+ if parse_int is not None:
+ kw['parse_int'] = parse_int
+ if parse_constant is not None:
+ kw['parse_constant'] = parse_constant
+ return cls(encoding=encoding, **kw).decode(s)
diff --git a/ansible_mitogen/compat/simplejson/decoder.py b/ansible_mitogen/compat/simplejson/decoder.py
new file mode 100644
index 00000000..b769ea48
--- /dev/null
+++ b/ansible_mitogen/compat/simplejson/decoder.py
@@ -0,0 +1,354 @@
+"""Implementation of JSONDecoder
+"""
+import re
+import sys
+import struct
+
+from simplejson.scanner import make_scanner
+try:
+ from simplejson._speedups import scanstring as c_scanstring
+except ImportError:
+ c_scanstring = None
+
+__all__ = ['JSONDecoder']
+
+FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
+
+def _floatconstants():
+ _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
+ if sys.byteorder != 'big':
+ _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
+ nan, inf = struct.unpack('dd', _BYTES)
+ return nan, inf, -inf
+
+NaN, PosInf, NegInf = _floatconstants()
+
+
+def linecol(doc, pos):
+ lineno = doc.count('\n', 0, pos) + 1
+ if lineno == 1:
+ colno = pos
+ else:
+ colno = pos - doc.rindex('\n', 0, pos)
+ return lineno, colno
+
+
+def errmsg(msg, doc, pos, end=None):
+ # Note that this function is called from _speedups
+ lineno, colno = linecol(doc, pos)
+ if end is None:
+ #fmt = '{0}: line {1} column {2} (char {3})'
+ #return fmt.format(msg, lineno, colno, pos)
+ fmt = '%s: line %d column %d (char %d)'
+ return fmt % (msg, lineno, colno, pos)
+ endlineno, endcolno = linecol(doc, end)
+ #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
+ #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+ fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+ return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
+
+
+_CONSTANTS = {
+ '-Infinity': NegInf,
+ 'Infinity': PosInf,
+ 'NaN': NaN,
+}
+
+STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
+BACKSLASH = {
+ '"': u'"', '\\': u'\\', '/': u'/',
+ 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
+}
+
+DEFAULT_ENCODING = "utf-8"
+
+def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
+ """Scan the string s for a JSON string. End is the index of the
+ character in s after the quote that started the JSON string.
+ Unescapes all valid JSON string escape sequences and raises ValueError
+ on attempt to decode an invalid string. If strict is False then literal
+ control characters are allowed in the string.
+
+ Returns a tuple of the decoded string and the index of the character in s
+ after the end quote."""
+ if encoding is None:
+ encoding = DEFAULT_ENCODING
+ chunks = []
+ _append = chunks.append
+ begin = end - 1
+ while 1:
+ chunk = _m(s, end)
+ if chunk is None:
+ raise ValueError(
+ errmsg("Unterminated string starting at", s, begin))
+ end = chunk.end()
+ content, terminator = chunk.groups()
+ # Content is contains zero or more unescaped string characters
+ if content:
+ if not isinstance(content, unicode):
+ content = unicode(content, encoding)
+ _append(content)
+ # Terminator is the end of string, a literal control character,
+ # or a backslash denoting that an escape sequence follows
+ if terminator == '"':
+ break
+ elif terminator != '\\':
+ if strict:
+ msg = "Invalid control character %r at" % (terminator,)
+ #msg = "Invalid control character {0!r} at".format(terminator)
+ raise ValueError(errmsg(msg, s, end))
+ else:
+ _append(terminator)
+ continue
+ try:
+ esc = s[end]
+ except IndexError:
+ raise ValueError(
+ errmsg("Unterminated string starting at", s, begin))
+ # If not a unicode escape sequence, must be in the lookup table
+ if esc != 'u':
+ try:
+ char = _b[esc]
+ except KeyError:
+ msg = "Invalid \\escape: " + repr(esc)
+ raise ValueError(errmsg(msg, s, end))
+ end += 1
+ else:
+ # Unicode escape sequence
+ esc = s[end + 1:end + 5]
+ next_end = end + 5
+ if len(esc) != 4:
+ msg = "Invalid \\uXXXX escape"
+ raise ValueError(errmsg(msg, s, end))
+ uni = int(esc, 16)
+ # Check for surrogate pair on UCS-4 systems
+ if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
+ msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+ if not s[end + 5:end + 7] == '\\u':
+ raise ValueError(errmsg(msg, s, end))
+ esc2 = s[end + 7:end + 11]
+ if len(esc2) != 4:
+ raise ValueError(errmsg(msg, s, end))
+ uni2 = int(esc2, 16)
+ uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+ next_end += 6
+ char = unichr(uni)
+ end = next_end
+ # Append the unescaped character
+ _append(char)
+ return u''.join(chunks), end
+
+
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
+
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
+
+def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ pairs = {}
+ # Use a slice to prevent IndexError from being raised, the following
+ # check will raise a more specific ValueError if the string is empty
+ nextchar = s[end:end + 1]
+ # Normally we expect nextchar == '"'
+ if nextchar != '"':
+ if nextchar in _ws:
+ end = _w(s, end).end()
+ nextchar = s[end:end + 1]
+ # Trivial empty object
+ if nextchar == '}':
+ return pairs, end + 1
+ elif nextchar != '"':
+ raise ValueError(errmsg("Expecting property name", s, end))
+ end += 1
+ while True:
+ key, end = scanstring(s, end, encoding, strict)
+
+ # To skip some function call overhead we optimize the fast paths where
+ # the JSON key separator is ": " or just ":".
+ if s[end:end + 1] != ':':
+ end = _w(s, end).end()
+ if s[end:end + 1] != ':':
+ raise ValueError(errmsg("Expecting : delimiter", s, end))
+
+ end += 1
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise ValueError(errmsg("Expecting object", s, end))
+ pairs[key] = value
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+ end += 1
+
+ if nextchar == '}':
+ break
+ elif nextchar != ',':
+ raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end += 1
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+
+ end += 1
+ if nextchar != '"':
+ raise ValueError(errmsg("Expecting property name", s, end - 1))
+
+ if object_hook is not None:
+ pairs = object_hook(pairs)
+ return pairs, end
+
+def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ values = []
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ # Look-ahead for trivial empty array
+ if nextchar == ']':
+ return values, end + 1
+ _append = values.append
+ while True:
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise ValueError(errmsg("Expecting object", s, end))
+ _append(value)
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ end += 1
+ if nextchar == ']':
+ break
+ elif nextchar != ',':
+ raise ValueError(errmsg("Expecting , delimiter", s, end))
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ return values, end
+
+class JSONDecoder(object):
+ """Simple JSON decoder
+
+ Performs the following translations in decoding by default:
+
+ +---------------+-------------------+
+ | JSON | Python |
+ +===============+===================+
+ | object | dict |
+ +---------------+-------------------+
+ | array | list |
+ +---------------+-------------------+
+ | string | unicode |
+ +---------------+-------------------+
+ | number (int) | int, long |
+ +---------------+-------------------+
+ | number (real) | float |
+ +---------------+-------------------+
+ | true | True |
+ +---------------+-------------------+
+ | false | False |
+ +---------------+-------------------+
+ | null | None |
+ +---------------+-------------------+
+
+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
+ their corresponding ``float`` values, which is outside the JSON spec.
+
+ """
+
+ def __init__(self, encoding=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, strict=True):
+ """``encoding`` determines the encoding used to interpret any ``str``
+ objects decoded by this instance (utf-8 by default). It has no
+ effect when decoding ``unicode`` objects.
+
+ Note that currently only encodings that are a superset of ASCII work,
+ strings of other encodings should be passed in as ``unicode``.
+
+ ``object_hook``, if specified, will be called with the result
+ of every JSON object decoded and its return value will be used in
+ place of the given ``dict``. This can be used to provide custom
+ deserializations (e.g. to support JSON-RPC class hinting).
+
+ ``parse_float``, if specified, will be called with the string
+ of every JSON float to be decoded. By default this is equivalent to
+ float(num_str). This can be used to use another datatype or parser
+ for JSON floats (e.g. decimal.Decimal).
+
+ ``parse_int``, if specified, will be called with the string
+ of every JSON int to be decoded. By default this is equivalent to
+ int(num_str). This can be used to use another datatype or parser
+ for JSON integers (e.g. float).
+
+ ``parse_constant``, if specified, will be called with one of the
+ following strings: -Infinity, Infinity, NaN.
+ This can be used to raise an exception if invalid JSON numbers
+ are encountered.
+
+ """
+ self.encoding = encoding
+ self.object_hook = object_hook
+ self.parse_float = parse_float or float
+ self.parse_int = parse_int or int
+ self.parse_constant = parse_constant or _CONSTANTS.__getitem__
+ self.strict = strict
+ self.parse_object = JSONObject
+ self.parse_array = JSONArray
+ self.parse_string = scanstring
+ self.scan_once = make_scanner(self)
+
+ def decode(self, s, _w=WHITESPACE.match):
+ """Return the Python representation of ``s`` (a ``str`` or ``unicode``
+ instance containing a JSON document)
+
+ """
+ obj, end = self.raw_decode(s, idx=_w(s, 0).end())
+ end = _w(s, end).end()
+ if end != len(s):
+ raise ValueError(errmsg("Extra data", s, end, len(s)))
+ return obj
+
+ def raw_decode(self, s, idx=0):
+ """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
+ with a JSON document) and return a 2-tuple of the Python
+ representation and the index in ``s`` where the document ended.
+
+ This can be used to decode a JSON document from a string that may
+ have extraneous data at the end.
+
+ """
+ try:
+ obj, end = self.scan_once(s, idx)
+ except StopIteration:
+ raise ValueError("No JSON object could be decoded")
+ return obj, end
diff --git a/ansible_mitogen/compat/simplejson/encoder.py b/ansible_mitogen/compat/simplejson/encoder.py
new file mode 100644
index 00000000..cf582903
--- /dev/null
+++ b/ansible_mitogen/compat/simplejson/encoder.py
@@ -0,0 +1,440 @@
+"""Implementation of JSONEncoder
+"""
+import re
+
+try:
+ from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
+except ImportError:
+ c_encode_basestring_ascii = None
+try:
+ from simplejson._speedups import make_encoder as c_make_encoder
+except ImportError:
+ c_make_encoder = None
+
+ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
+ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
+HAS_UTF8 = re.compile(r'[\x80-\xff]')
+ESCAPE_DCT = {
+ '\\': '\\\\',
+ '"': '\\"',
+ '\b': '\\b',
+ '\f': '\\f',
+ '\n': '\\n',
+ '\r': '\\r',
+ '\t': '\\t',
+}
+for i in range(0x20):
+ #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+# Assume this produces an infinity on all machines (probably not guaranteed)
+INFINITY = float('1e66666')
+FLOAT_REPR = repr
+
+def encode_basestring(s):
+ """Return a JSON representation of a Python string
+
+ """
+ def replace(match):
+ return ESCAPE_DCT[match.group(0)]
+ return '"' + ESCAPE.sub(replace, s) + '"'
+
+
+def py_encode_basestring_ascii(s):
+ """Return an ASCII-only JSON representation of a Python string
+
+ """
+ if isinstance(s, str) and HAS_UTF8.search(s) is not None:
+ s = s.decode('utf-8')
+ def replace(match):
+ s = match.group(0)
+ try:
+ return ESCAPE_DCT[s]
+ except KeyError:
+ n = ord(s)
+ if n < 0x10000:
+ #return '\\u{0:04x}'.format(n)
+ return '\\u%04x' % (n,)
+ else:
+ # surrogate pair
+ n -= 0x10000
+ s1 = 0xd800 | ((n >> 10) & 0x3ff)
+ s2 = 0xdc00 | (n & 0x3ff)
+ #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
+ return '\\u%04x\\u%04x' % (s1, s2)
+ return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
+
+
+encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
+
+class JSONEncoder(object):
+ """Extensible JSON encoder for Python data structures.
+
+ Supports the following objects and types by default:
+
+ +-------------------+---------------+
+ | Python | JSON |
+ +===================+===============+
+ | dict | object |
+ +-------------------+---------------+
+ | list, tuple | array |
+ +-------------------+---------------+
+ | str, unicode | string |
+ +-------------------+---------------+
+ | int, long, float | number |
+ +-------------------+---------------+
+ | True | true |
+ +-------------------+---------------+
+ | False | false |
+ +-------------------+---------------+
+ | None | null |
+ +-------------------+---------------+
+
+ To extend this to recognize other objects, subclass and implement a
+ ``.default()`` method with another method that returns a serializable
+ object for ``o`` if possible, otherwise it should call the superclass
+ implementation (to raise ``TypeError``).
+
+ """
+ item_separator = ', '
+ key_separator = ': '
+ def __init__(self, skipkeys=False, ensure_ascii=True,
+ check_circular=True, allow_nan=True, sort_keys=False,
+ indent=None, separators=None, encoding='utf-8', default=None):
+ """Constructor for JSONEncoder, with sensible defaults.
+
+ If skipkeys is false, then it is a TypeError to attempt
+ encoding of keys that are not str, int, long, float or None. If
+ skipkeys is True, such items are simply skipped.
+
+ If ensure_ascii is true, the output is guaranteed to be str
+ objects with all incoming unicode characters escaped. If
+ ensure_ascii is false, the output will be unicode object.
+
+ If check_circular is true, then lists, dicts, and custom encoded
+ objects will be checked for circular references during encoding to
+ prevent an infinite recursion (which would cause an OverflowError).
+ Otherwise, no such check takes place.
+
+ If allow_nan is true, then NaN, Infinity, and -Infinity will be
+ encoded as such. This behavior is not JSON specification compliant,
+ but is consistent with most JavaScript based encoders and decoders.
+ Otherwise, it will be a ValueError to encode such floats.
+
+ If sort_keys is true, then the output of dictionaries will be
+ sorted by key; this is useful for regression tests to ensure
+ that JSON serializations can be compared on a day-to-day basis.
+
+ If indent is a non-negative integer, then JSON array
+ elements and object members will be pretty-printed with that
+ indent level. An indent level of 0 will only insert newlines.
+ None is the most compact representation.
+
+ If specified, separators should be a (item_separator, key_separator)
+ tuple. The default is (', ', ': '). To get the most compact JSON
+ representation you should specify (',', ':') to eliminate whitespace.
+
+ If specified, default is a function that gets called for objects
+ that can't otherwise be serialized. It should return a JSON encodable
+ version of the object or raise a ``TypeError``.
+
+ If encoding is not None, then all input strings will be
+ transformed into unicode using that encoding prior to JSON-encoding.
+ The default is UTF-8.
+
+ """
+
+ self.skipkeys = skipkeys
+ self.ensure_ascii = ensure_ascii
+ self.check_circular = check_circular
+ self.allow_nan = allow_nan
+ self.sort_keys = sort_keys
+ self.indent = indent
+ if separators is not None:
+ self.item_separator, self.key_separator = separators
+ if default is not None:
+ self.default = default
+ self.encoding = encoding
+
+ def default(self, o):
+ """Implement this method in a subclass such that it returns
+ a serializable object for ``o``, or calls the base implementation
+ (to raise a ``TypeError``).
+
+ For example, to support arbitrary iterators, you could
+ implement default like this::
+
+ def default(self, o):
+ try:
+ iterable = iter(o)
+ except TypeError:
+ pass
+ else:
+ return list(iterable)
+ return JSONEncoder.default(self, o)
+
+ """
+ raise TypeError(repr(o) + " is not JSON serializable")
+
+ def encode(self, o):
+ """Return a JSON string representation of a Python data structure.
+
+ >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+ '{"foo": ["bar", "baz"]}'
+
+ """
+ # This is for extremely simple cases and benchmarks.
+ if isinstance(o, basestring):
+ if isinstance(o, str):
+ _encoding = self.encoding
+ if (_encoding is not None
+ and not (_encoding == 'utf-8')):
+ o = o.decode(_encoding)
+ if self.ensure_ascii:
+ return encode_basestring_ascii(o)
+ else:
+ return encode_basestring(o)
+ # This doesn't pass the iterator directly to ''.join() because the
+ # exceptions aren't as detailed. The list call should be roughly
+ # equivalent to the PySequence_Fast that ''.join() would do.
+ chunks = self.iterencode(o, _one_shot=True)
+ if not isinstance(chunks, (list, tuple)):
+ chunks = list(chunks)
+ return ''.join(chunks)
+
+ def iterencode(self, o, _one_shot=False):
+ """Encode the given object and yield each string
+ representation as available.
+
+ For example::
+
+ for chunk in JSONEncoder().iterencode(bigobject):
+ mysocket.write(chunk)
+
+ """
+ if self.check_circular:
+ markers = {}
+ else:
+ markers = None
+ if self.ensure_ascii:
+ _encoder = encode_basestring_ascii
+ else:
+ _encoder = encode_basestring
+ if self.encoding != 'utf-8':
+ def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
+ if isinstance(o, str):
+ o = o.decode(_encoding)
+ return _orig_encoder(o)
+
+ def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
+ # Check for specials. Note that this type of test is processor- and/or
+ # platform-specific, so do tests which don't depend on the internals.
+
+ if o != o:
+ text = 'NaN'
+ elif o == _inf:
+ text = 'Infinity'
+ elif o == _neginf:
+ text = '-Infinity'
+ else:
+ return _repr(o)
+
+ if not allow_nan:
+ raise ValueError(
+ "Out of range float values are not JSON compliant: " +
+ repr(o))
+
+ return text
+
+
+ if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
+ _iterencode = c_make_encoder(
+ markers, self.default, _encoder, self.indent,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, self.allow_nan)
+ else:
+ _iterencode = _make_iterencode(
+ markers, self.default, _encoder, self.indent, floatstr,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, _one_shot)
+ return _iterencode(o, 0)
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+ ## HACK: hand-optimized bytecode; turn globals into locals
+ False=False,
+ True=True,
+ ValueError=ValueError,
+ basestring=basestring,
+ dict=dict,
+ float=float,
+ id=id,
+ int=int,
+ isinstance=isinstance,
+ list=list,
+ long=long,
+ str=str,
+ tuple=tuple,
+ ):
+
+ def _iterencode_list(lst, _current_indent_level):
+ if not lst:
+ yield '[]'
+ return
+ if markers is not None:
+ markerid = id(lst)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = lst
+ buf = '['
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ separator = _item_separator + newline_indent
+ buf += newline_indent
+ else:
+ newline_indent = None
+ separator = _item_separator
+ first = True
+ for value in lst:
+ if first:
+ first = False
+ else:
+ buf = separator
+ if isinstance(value, basestring):
+ yield buf + _encoder(value)
+ elif value is None:
+ yield buf + 'null'
+ elif value is True:
+ yield buf + 'true'
+ elif value is False:
+ yield buf + 'false'
+ elif isinstance(value, (int, long)):
+ yield buf + str(value)
+ elif isinstance(value, float):
+ yield buf + _floatstr(value)
+ else:
+ yield buf
+ if isinstance(value, (list, tuple)):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ yield ']'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode_dict(dct, _current_indent_level):
+ if not dct:
+ yield '{}'
+ return
+ if markers is not None:
+ markerid = id(dct)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = dct
+ yield '{'
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ item_separator = _item_separator + newline_indent
+ yield newline_indent
+ else:
+ newline_indent = None
+ item_separator = _item_separator
+ first = True
+ if _sort_keys:
+ items = dct.items()
+ items.sort(key=lambda kv: kv[0])
+ else:
+ items = dct.iteritems()
+ for key, value in items:
+ if isinstance(key, basestring):
+ pass
+ # JavaScript is weakly typed for these, so it makes sense to
+ # also allow them. Many encoders seem to do something like this.
+ elif isinstance(key, float):
+ key = _floatstr(key)
+ elif key is True:
+ key = 'true'
+ elif key is False:
+ key = 'false'
+ elif key is None:
+ key = 'null'
+ elif isinstance(key, (int, long)):
+ key = str(key)
+ elif _skipkeys:
+ continue
+ else:
+ raise TypeError("key " + repr(key) + " is not a string")
+ if first:
+ first = False
+ else:
+ yield item_separator
+ yield _encoder(key)
+ yield _key_separator
+ if isinstance(value, basestring):
+ yield _encoder(value)
+ elif value is None:
+ yield 'null'
+ elif value is True:
+ yield 'true'
+ elif value is False:
+ yield 'false'
+ elif isinstance(value, (int, long)):
+ yield str(value)
+ elif isinstance(value, float):
+ yield _floatstr(value)
+ else:
+ if isinstance(value, (list, tuple)):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ yield '}'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode(o, _current_indent_level):
+ if isinstance(o, basestring):
+ yield _encoder(o)
+ elif o is None:
+ yield 'null'
+ elif o is True:
+ yield 'true'
+ elif o is False:
+ yield 'false'
+ elif isinstance(o, (int, long)):
+ yield str(o)
+ elif isinstance(o, float):
+ yield _floatstr(o)
+ elif isinstance(o, (list, tuple)):
+ for chunk in _iterencode_list(o, _current_indent_level):
+ yield chunk
+ elif isinstance(o, dict):
+ for chunk in _iterencode_dict(o, _current_indent_level):
+ yield chunk
+ else:
+ if markers is not None:
+ markerid = id(o)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = o
+ o = _default(o)
+ for chunk in _iterencode(o, _current_indent_level):
+ yield chunk
+ if markers is not None:
+ del markers[markerid]
+
+ return _iterencode
diff --git a/ansible_mitogen/compat/simplejson/scanner.py b/ansible_mitogen/compat/simplejson/scanner.py
new file mode 100644
index 00000000..adbc6ec9
--- /dev/null
+++ b/ansible_mitogen/compat/simplejson/scanner.py
@@ -0,0 +1,65 @@
+"""JSON token scanner
+"""
+import re
+try:
+ from simplejson._speedups import make_scanner as c_make_scanner
+except ImportError:
+ c_make_scanner = None
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+ r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+ (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+ parse_object = context.parse_object
+ parse_array = context.parse_array
+ parse_string = context.parse_string
+ match_number = NUMBER_RE.match
+ encoding = context.encoding
+ strict = context.strict
+ parse_float = context.parse_float
+ parse_int = context.parse_int
+ parse_constant = context.parse_constant
+ object_hook = context.object_hook
+
+ def _scan_once(string, idx):
+ try:
+ nextchar = string[idx]
+ except IndexError:
+ raise StopIteration
+
+ if nextchar == '"':
+ return parse_string(string, idx + 1, encoding, strict)
+ elif nextchar == '{':
+ return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
+ elif nextchar == '[':
+ return parse_array((string, idx + 1), _scan_once)
+ elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+ return None, idx + 4
+ elif nextchar == 't' and string[idx:idx + 4] == 'true':
+ return True, idx + 4
+ elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+ return False, idx + 5
+
+ m = match_number(string, idx)
+ if m is not None:
+ integer, frac, exp = m.groups()
+ if frac or exp:
+ res = parse_float(integer + (frac or '') + (exp or ''))
+ else:
+ res = parse_int(integer)
+ return res, m.end()
+ elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+ return parse_constant('NaN'), idx + 3
+ elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+ return parse_constant('Infinity'), idx + 8
+ elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+ return parse_constant('-Infinity'), idx + 9
+ else:
+ raise StopIteration
+
+ return _scan_once
+
+make_scanner = c_make_scanner or py_make_scanner
diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py
index 708b6c13..c7e70c43 100644
--- a/ansible_mitogen/connection.py
+++ b/ansible_mitogen/connection.py
@@ -29,10 +29,13 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import errno
import logging
import os
+import pprint
import random
import stat
+import sys
import time
import jinja2.runtime
@@ -41,6 +44,8 @@ import ansible.errors
import ansible.plugins.connection
import ansible.utils.shlex
+import mitogen.core
+import mitogen.fork
import mitogen.unix
import mitogen.utils
@@ -48,27 +53,42 @@ import ansible_mitogen.parsing
import ansible_mitogen.process
import ansible_mitogen.services
import ansible_mitogen.target
+import ansible_mitogen.transport_config
LOG = logging.getLogger(__name__)
-def optional_secret(value):
+def optional_int(value):
"""
- Wrap `value` in :class:`mitogen.core.Secret` if it is not :data:`None`,
- otherwise return :data:`None`.
+ Convert `value` to an integer if it is not :data:`None`, otherwise return
+ :data:`None`.
"""
- if value is not None:
- return mitogen.core.Secret(value)
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ return None
+
+
+def convert_bool(obj):
+ if isinstance(obj, bool):
+ return obj
+ if str(obj).lower() in ('no', 'false', '0'):
+ return False
+ if str(obj).lower() not in ('yes', 'true', '1'):
+ raise ansible.errors.AnsibleConnectionFailure(
+ 'expected yes/no/true/false/0/1, got %r' % (obj,)
+ )
+ return True
-def parse_python_path(s):
+def default(value, default):
"""
- Given the string set for ansible_python_interpeter, parse it using shell
- syntax and return an appropriate argument vector.
+ Return `default` is `value` is :data:`None`, otherwise return `value`.
"""
- if s:
- return ansible.utils.shlex.shlex_split(s)
+ if value is None:
+ return default
+ return value
def _connect_local(spec):
@@ -78,7 +98,7 @@ def _connect_local(spec):
return {
'method': 'local',
'kwargs': {
- 'python_path': spec['python_path'],
+ 'python_path': spec.python_path(),
}
}
@@ -92,21 +112,30 @@ def _connect_ssh(spec):
else:
check_host_keys = 'ignore'
+ # #334: tilde-expand private_key_file to avoid implementation difference
+ # between Python and OpenSSH.
+ private_key_file = spec.private_key_file()
+ if private_key_file is not None:
+ private_key_file = os.path.expanduser(private_key_file)
+
return {
'method': 'ssh',
'kwargs': {
'check_host_keys': check_host_keys,
- 'hostname': spec['remote_addr'],
- 'username': spec['remote_user'],
- 'password': optional_secret(spec['password']),
- 'port': spec['port'],
- 'python_path': spec['python_path'],
- 'identity_file': spec['private_key_file'],
+ 'hostname': spec.remote_addr(),
+ 'username': spec.remote_user(),
+ 'compression': convert_bool(
+ default(spec.mitogen_ssh_compression(), True)
+ ),
+ 'password': spec.password(),
+ 'port': spec.port(),
+ 'python_path': spec.python_path(),
+ 'identity_file': private_key_file,
'identities_only': False,
- 'ssh_path': spec['ssh_executable'],
- 'connect_timeout': spec['ansible_ssh_timeout'],
- 'ssh_args': spec['ssh_args'],
- 'ssh_debug_level': spec['mitogen_ssh_debug_level'],
+ 'ssh_path': spec.ssh_executable(),
+ 'connect_timeout': spec.ansible_ssh_timeout(),
+ 'ssh_args': spec.ssh_args(),
+ 'ssh_debug_level': spec.mitogen_ssh_debug_level(),
}
}
@@ -118,10 +147,10 @@ def _connect_docker(spec):
return {
'method': 'docker',
'kwargs': {
- 'username': spec['remote_user'],
- 'container': spec['remote_addr'],
- 'python_path': spec['python_path'],
- 'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'],
+ 'username': spec.remote_user(),
+ 'container': spec.remote_addr(),
+ 'python_path': spec.python_path(),
+ 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
}
}
@@ -133,10 +162,11 @@ def _connect_kubectl(spec):
return {
'method': 'kubectl',
'kwargs': {
- 'pod': spec['remote_addr'],
- 'python_path': spec['python_path'],
- 'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'],
- 'kubectl_args': spec['extra_args'],
+ 'pod': spec.remote_addr(),
+ 'python_path': spec.python_path(),
+ 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+ 'kubectl_path': spec.mitogen_kubectl_path(),
+ 'kubectl_args': spec.extra_args(),
}
}
@@ -148,10 +178,10 @@ def _connect_jail(spec):
return {
'method': 'jail',
'kwargs': {
- 'username': spec['remote_user'],
- 'container': spec['remote_addr'],
- 'python_path': spec['python_path'],
- 'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'],
+ 'username': spec.remote_user(),
+ 'container': spec.remote_addr(),
+ 'python_path': spec.python_path(),
+ 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
}
}
@@ -163,9 +193,10 @@ def _connect_lxc(spec):
return {
'method': 'lxc',
'kwargs': {
- 'container': spec['remote_addr'],
- 'python_path': spec['python_path'],
- 'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'],
+ 'container': spec.remote_addr(),
+ 'python_path': spec.python_path(),
+ 'lxc_attach_path': spec.mitogen_lxc_attach_path(),
+ 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
}
}
@@ -177,9 +208,10 @@ def _connect_lxd(spec):
return {
'method': 'lxd',
'kwargs': {
- 'container': spec['remote_addr'],
- 'python_path': spec['python_path'],
- 'connect_timeout': spec['ansible_ssh_timeout'] or spec['timeout'],
+ 'container': spec.remote_addr(),
+ 'python_path': spec.python_path(),
+ 'lxc_path': spec.mitogen_lxc_path(),
+ 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
}
}
@@ -188,24 +220,24 @@ def _connect_machinectl(spec):
"""
Return ContextService arguments for a machinectl connection.
"""
- return _connect_setns(dict(spec, mitogen_kind='machinectl'))
+ return _connect_setns(spec, kind='machinectl')
-def _connect_setns(spec):
+def _connect_setns(spec, kind=None):
"""
Return ContextService arguments for a mitogen_setns connection.
"""
return {
'method': 'setns',
'kwargs': {
- 'container': spec['remote_addr'],
- 'username': spec['remote_user'],
- 'python_path': spec['python_path'],
- 'kind': spec['mitogen_kind'],
- 'docker_path': spec['mitogen_docker_path'],
- 'kubectl_path': spec['mitogen_kubectl_path'],
- 'lxc_info_path': spec['mitogen_lxc_info_path'],
- 'machinectl_path': spec['mitogen_machinectl_path'],
+ 'container': spec.remote_addr(),
+ 'username': spec.remote_user(),
+ 'python_path': spec.python_path(),
+ 'kind': kind or spec.mitogen_kind(),
+ 'docker_path': spec.mitogen_docker_path(),
+ 'lxc_path': spec.mitogen_lxc_path(),
+ 'lxc_info_path': spec.mitogen_lxc_info_path(),
+ 'machinectl_path': spec.mitogen_machinectl_path(),
}
}
@@ -218,11 +250,11 @@ def _connect_su(spec):
'method': 'su',
'enable_lru': True,
'kwargs': {
- 'username': spec['become_user'],
- 'password': optional_secret(spec['become_pass']),
- 'python_path': spec['python_path'],
- 'su_path': spec['become_exe'],
- 'connect_timeout': spec['timeout'],
+ 'username': spec.become_user(),
+ 'password': spec.become_pass(),
+ 'python_path': spec.python_path(),
+ 'su_path': spec.become_exe(),
+ 'connect_timeout': spec.timeout(),
}
}
@@ -235,12 +267,12 @@ def _connect_sudo(spec):
'method': 'sudo',
'enable_lru': True,
'kwargs': {
- 'username': spec['become_user'],
- 'password': optional_secret(spec['become_pass']),
- 'python_path': spec['python_path'],
- 'sudo_path': spec['become_exe'],
- 'connect_timeout': spec['timeout'],
- 'sudo_args': spec['sudo_args'],
+ 'username': spec.become_user(),
+ 'password': spec.become_pass(),
+ 'python_path': spec.python_path(),
+ 'sudo_path': spec.become_exe(),
+ 'connect_timeout': spec.timeout(),
+ 'sudo_args': spec.sudo_args(),
}
}
@@ -253,11 +285,11 @@ def _connect_doas(spec):
'method': 'doas',
'enable_lru': True,
'kwargs': {
- 'username': spec['become_user'],
- 'password': optional_secret(spec['become_pass']),
- 'python_path': spec['python_path'],
- 'doas_path': spec['become_exe'],
- 'connect_timeout': spec['timeout'],
+ 'username': spec.become_user(),
+ 'password': spec.become_pass(),
+ 'python_path': spec.python_path(),
+ 'doas_path': spec.become_exe(),
+ 'connect_timeout': spec.timeout(),
}
}
@@ -269,11 +301,11 @@ def _connect_mitogen_su(spec):
return {
'method': 'su',
'kwargs': {
- 'username': spec['remote_user'],
- 'password': optional_secret(spec['password']),
- 'python_path': spec['python_path'],
- 'su_path': spec['become_exe'],
- 'connect_timeout': spec['timeout'],
+ 'username': spec.remote_user(),
+ 'password': spec.password(),
+ 'python_path': spec.python_path(),
+ 'su_path': spec.become_exe(),
+ 'connect_timeout': spec.timeout(),
}
}
@@ -285,12 +317,12 @@ def _connect_mitogen_sudo(spec):
return {
'method': 'sudo',
'kwargs': {
- 'username': spec['remote_user'],
- 'password': optional_secret(spec['password']),
- 'python_path': spec['python_path'],
- 'sudo_path': spec['become_exe'],
- 'connect_timeout': spec['timeout'],
- 'sudo_args': spec['sudo_args'],
+ 'username': spec.remote_user(),
+ 'password': spec.password(),
+ 'python_path': spec.python_path(),
+ 'sudo_path': spec.become_exe(),
+ 'connect_timeout': spec.timeout(),
+ 'sudo_args': spec.sudo_args(),
}
}
@@ -302,11 +334,11 @@ def _connect_mitogen_doas(spec):
return {
'method': 'doas',
'kwargs': {
- 'username': spec['remote_user'],
- 'password': optional_secret(spec['password']),
- 'python_path': spec['python_path'],
- 'doas_path': spec['become_exe'],
- 'connect_timeout': spec['timeout'],
+ 'username': spec.remote_user(),
+ 'password': spec.password(),
+ 'python_path': spec.python_path(),
+ 'doas_path': spec.become_exe(),
+ 'connect_timeout': spec.timeout(),
}
}
@@ -333,110 +365,40 @@ CONNECTION_METHOD = {
}
-def config_from_play_context(transport, inventory_name, connection):
+class Broker(mitogen.master.Broker):
"""
- Return a dict representing all important connection configuration, allowing
- the same functions to work regardless of whether configuration came from
- play_context (direct connection) or host vars (mitogen_via=).
+ WorkerProcess maintains at most 2 file descriptors, therefore does not need
+ the exuberant syscall expense of EpollPoller, so override it and restore
+ the poll() poller.
"""
- return {
- 'transport': transport,
- 'inventory_name': inventory_name,
- 'remote_addr': connection._play_context.remote_addr,
- 'remote_user': connection._play_context.remote_user,
- 'become': connection._play_context.become,
- 'become_method': connection._play_context.become_method,
- 'become_user': connection._play_context.become_user,
- 'become_pass': connection._play_context.become_pass,
- 'password': connection._play_context.password,
- 'port': connection._play_context.port,
- 'python_path': parse_python_path(
- connection.get_task_var('ansible_python_interpreter',
- default='/usr/bin/python')
- ),
- 'private_key_file': connection._play_context.private_key_file,
- 'ssh_executable': connection._play_context.ssh_executable,
- 'timeout': connection._play_context.timeout,
- 'ansible_ssh_timeout':
- connection.get_task_var('ansible_ssh_timeout',
- default=C.DEFAULT_TIMEOUT),
- 'ssh_args': [
- mitogen.core.to_text(term)
- for s in (
- getattr(connection._play_context, 'ssh_args', ''),
- getattr(connection._play_context, 'ssh_common_args', ''),
- getattr(connection._play_context, 'ssh_extra_args', '')
- )
- for term in ansible.utils.shlex.shlex_split(s or '')
- ],
- 'become_exe': connection._play_context.become_exe,
- 'sudo_args': [
- mitogen.core.to_text(term)
- for s in (
- connection._play_context.sudo_flags,
- connection._play_context.become_flags
- )
- for term in ansible.utils.shlex.shlex_split(s or '')
- ],
- 'mitogen_via':
- connection.get_task_var('mitogen_via'),
- 'mitogen_kind':
- connection.get_task_var('mitogen_kind'),
- 'mitogen_docker_path':
- connection.get_task_var('mitogen_docker_path'),
- 'mitogen_kubectl_path':
- connection.get_task_var('mitogen_kubectl_path'),
- 'mitogen_lxc_info_path':
- connection.get_task_var('mitogen_lxc_info_path'),
- 'mitogen_machinectl_path':
- connection.get_task_var('mitogen_machinectl_path'),
- 'mitogen_ssh_debug_level':
- connection.get_task_var('mitogen_ssh_debug_level'),
- 'extra_args':
- connection.get_extra_args(),
- }
-
-
-def config_from_hostvars(transport, inventory_name, connection,
- hostvars, become_user):
- """
- Override config_from_play_context() to take equivalent information from
- host vars.
- """
- config = config_from_play_context(transport, inventory_name, connection)
- hostvars = dict(hostvars)
- return dict(config, **{
- 'remote_addr': hostvars.get('ansible_host', inventory_name),
- 'become': bool(become_user),
- 'become_user': become_user,
- 'become_pass': None,
- 'remote_user': hostvars.get('ansible_user'), # TODO
- 'password': (hostvars.get('ansible_ssh_pass') or
- hostvars.get('ansible_password')),
- 'port': hostvars.get('ansible_port'),
- 'python_path': parse_python_path(hostvars.get('ansible_python_interpreter')),
- 'private_key_file': (hostvars.get('ansible_ssh_private_key_file') or
- hostvars.get('ansible_private_key_file')),
- 'mitogen_via': hostvars.get('mitogen_via'),
- 'mitogen_kind': hostvars.get('mitogen_kind'),
- 'mitogen_docker_path': hostvars.get('mitogen_docker_path'),
- 'mitogen_kubectl_path': hostvars.get('mitogen_kubectl_path'),
- 'mitogen_lxc_info_path': hostvars.get('mitogen_lxc_info_path'),
- 'mitogen_machinectl_path': hostvars.get('mitogen_machinctl_path'),
- })
+ poller_class = mitogen.core.Poller
class CallChain(mitogen.parent.CallChain):
+ """
+ Extend :class:`mitogen.parent.CallChain` to additionally cause the
+ associated :class:`Connection` to be reset if a ChannelError occurs.
+
+ This only catches failures that occur while a call is pending, it is a
+ stop-gap until a more general method is available to notice connection in
+ every situation.
+ """
call_aborted_msg = (
'Mitogen was disconnected from the remote environment while a call '
'was in-progress. If you feel this is in error, please file a bug. '
'Original error was: %s'
)
+ def __init__(self, connection, context, pipelined=False):
+ super(CallChain, self).__init__(context, pipelined)
+ #: The connection to reset on CallError.
+ self._connection = connection
+
def _rethrow(self, recv):
try:
return recv.get().unpickle()
except mitogen.core.ChannelError as e:
+ self._connection.reset()
raise ansible.errors.AnsibleConnectionFailure(
self.call_aborted_msg % (e,)
)
@@ -550,11 +512,30 @@ class Connection(ansible.plugins.connection.ConnectionBase):
self.host_vars = task_vars['hostvars']
self.delegate_to_hostname = delegate_to_hostname
self.loader_basedir = loader_basedir
- self.close(new_task=True)
+ self._mitogen_reset(mode='put')
def get_task_var(self, key, default=None):
- if self._task_vars and key in self._task_vars:
- return self._task_vars[key]
+ """
+ Fetch the value of a task variable related to connection configuration,
+ or, if delegate_to is active, fetch the same variable via HostVars for
+ the delegated-to machine.
+
+ When running with delegate_to, Ansible tasks have variables associated
+ with the original machine, not the delegated-to machine, therefore it
+ does not make sense to extract connection-related configuration for the
+ delegated-to machine from them.
+ """
+ if self._task_vars:
+ if self.delegate_to_hostname is None:
+ if key in self._task_vars:
+ return self._task_vars[key]
+ else:
+ delegated_vars = self._task_vars['ansible_delegated_vars']
+ if self.delegate_to_hostname in delegated_vars:
+ task_vars = delegated_vars[self.delegate_to_hostname]
+ if key in task_vars:
+ return task_vars[key]
+
return default
@property
@@ -566,12 +547,14 @@ class Connection(ansible.plugins.connection.ConnectionBase):
def connected(self):
return self.context is not None
- def _config_from_via(self, via_spec):
+ def _spec_from_via(self, via_spec):
"""
Produce a dict connection specifiction given a string `via_spec`, of
- the form `[become_user@]inventory_hostname`.
+ the form `[[become_method:]become_user@]inventory_hostname`.
"""
become_user, _, inventory_name = via_spec.rpartition('@')
+ become_method, _, become_user = become_user.rpartition(':')
+
via_vars = self.host_vars[inventory_name]
if isinstance(via_vars, jinja2.runtime.Undefined):
raise ansible.errors.AnsibleConnectionFailure(
@@ -581,41 +564,68 @@ class Connection(ansible.plugins.connection.ConnectionBase):
)
)
- return config_from_hostvars(
- transport=via_vars.get('ansible_connection', 'ssh'),
+ return ansible_mitogen.transport_config.MitogenViaSpec(
inventory_name=inventory_name,
- connection=self,
- hostvars=via_vars,
+ host_vars=dict(via_vars), # TODO: make it lazy
+ become_method=become_method or None,
become_user=become_user or None,
)
unknown_via_msg = 'mitogen_via=%s of %s specifies an unknown hostname'
via_cycle_msg = 'mitogen_via=%s of %s creates a cycle (%s)'
- def _stack_from_config(self, config, stack=(), seen_names=()):
- if config['inventory_name'] in seen_names:
+ def _stack_from_spec(self, spec, stack=(), seen_names=()):
+ """
+ Return a tuple of ContextService parameter dictionaries corresponding
+ to the connection described by `spec`, and any connection referenced by
+ its `mitogen_via` or `become` fields. Each element is a dict of the
+ form::
+
+ {
+ # Optional. If present and `True`, this hop is elegible for
+ # interpreter recycling.
+ "enable_lru": True,
+ # mitogen.master.Router method name.
+ "method": "ssh",
+ # mitogen.master.Router method kwargs.
+ "kwargs": {
+ "hostname": "..."
+ }
+ }
+
+ :param ansible_mitogen.transport_config.Spec spec:
+ Connection specification.
+ :param tuple stack:
+ Stack elements from parent call (used for recursion).
+ :param tuple seen_names:
+ Inventory hostnames from parent call (cycle detection).
+ :returns:
+ Tuple `(stack, seen_names)`.
+ """
+ if spec.inventory_name() in seen_names:
raise ansible.errors.AnsibleConnectionFailure(
self.via_cycle_msg % (
- config['mitogen_via'],
- config['inventory_name'],
+ spec.mitogen_via(),
+ spec.inventory_name(),
' -> '.join(reversed(
- seen_names + (config['inventory_name'],)
+ seen_names + (spec.inventory_name(),)
)),
)
)
- if config['mitogen_via']:
- stack, seen_names = self._stack_from_config(
- self._config_from_via(config['mitogen_via']),
+ if spec.mitogen_via():
+ stack = self._stack_from_spec(
+ self._spec_from_via(spec.mitogen_via()),
stack=stack,
- seen_names=seen_names + (config['inventory_name'],)
+ seen_names=seen_names + (spec.inventory_name(),),
)
- stack += (CONNECTION_METHOD[config['transport']](config),)
- if config['become']:
- stack += (CONNECTION_METHOD[config['become_method']](config),)
+ stack += (CONNECTION_METHOD[spec.transport()](spec),)
+ if spec.become() and ((spec.become_user() != spec.remote_user()) or
+ C.BECOME_ALLOW_SAME_USER):
+ stack += (CONNECTION_METHOD[spec.become_method()](spec),)
- return stack, seen_names
+ return stack
def _connect_broker(self):
"""
@@ -629,26 +639,6 @@ class Connection(ansible.plugins.connection.ConnectionBase):
broker=self.broker,
)
- def _config_from_direct_connection(self):
- """
- """
- return config_from_play_context(
- transport=self.transport,
- inventory_name=self.inventory_hostname,
- connection=self
- )
-
- def _config_from_delegate_to(self):
- return config_from_hostvars(
- transport=self._play_context.connection,
- inventory_name=self.delegate_to_hostname,
- connection=self,
- hostvars=self.host_vars[self.delegate_to_hostname],
- become_user=(self._play_context.become_user
- if self._play_context.become
- else None),
- )
-
def _build_stack(self):
"""
Construct a list of dictionaries representing the connection
@@ -656,25 +646,35 @@ class Connection(ansible.plugins.connection.ConnectionBase):
additionally used by the integration tests "mitogen_get_stack" action
to fetch the would-be connection configuration.
"""
- if self.delegate_to_hostname is not None:
- target_config = self._config_from_delegate_to()
- else:
- target_config = self._config_from_direct_connection()
-
- stack, _ = self._stack_from_config(target_config)
- return stack
+ return self._stack_from_spec(
+ ansible_mitogen.transport_config.PlayContextSpec(
+ connection=self,
+ play_context=self._play_context,
+ transport=self.transport,
+ inventory_name=self.inventory_hostname,
+ )
+ )
def _connect_stack(self, stack):
"""
Pass `stack` to ContextService, requesting a copy of the context object
- representing the target. If no connection exists yet, ContextService
- will establish it before returning it or throwing an error.
+ representing the last tuple element. If no connection exists yet,
+ ContextService will recursively establish it before returning it or
+ throwing an error.
+
+ See :meth:`ansible_mitogen.services.ContextService.get` docstring for
+ description of the returned dictionary.
"""
- dct = self.parent.call_service(
- service_name='ansible_mitogen.services.ContextService',
- method_name='get',
- stack=mitogen.utils.cast(list(stack)),
- )
+ try:
+ dct = self.parent.call_service(
+ service_name='ansible_mitogen.services.ContextService',
+ method_name='get',
+ stack=mitogen.utils.cast(list(stack)),
+ )
+ except mitogen.core.CallError:
+ LOG.warning('Connection failed; stack configuration was:\n%s',
+ pprint.pformat(stack))
+ raise
if dct['msg']:
if dct['method_name'] in self.become_methods:
@@ -682,7 +682,7 @@ class Connection(ansible.plugins.connection.ConnectionBase):
raise ansible.errors.AnsibleConnectionFailure(dct['msg'])
self.context = dct['context']
- self.chain = CallChain(self.context, pipelined=True)
+ self.chain = CallChain(self, self.context, pipelined=True)
if self._play_context.become:
self.login_context = dct['via']
else:
@@ -691,6 +691,11 @@ class Connection(ansible.plugins.connection.ConnectionBase):
self.init_child_result = dct['init_child_result']
def get_good_temp_dir(self):
+ """
+ Return the 'good temporary directory' as discovered by
+ :func:`ansible_mitogen.target.init_child` immediately after
+ ContextService constructed the target context.
+ """
self._connect()
return self.init_child_result['good_temp_dir']
@@ -709,6 +714,20 @@ class Connection(ansible.plugins.connection.ConnectionBase):
self.get_chain().call_no_reply(os.mkdir, self._shell.tmpdir)
return self._shell.tmpdir
+ def _reset_tmp_path(self):
+ """
+ Called by _mitogen_reset(); ask the remote context to delete any
+ temporary directory created for the action. CallChain is not used here
+ to ensure exception is logged by the context on failure, since the
+ CallChain itself is about to be destructed.
+ """
+ if getattr(self._shell, 'tmpdir', None) is not None:
+ self.context.call_no_reply(
+ ansible_mitogen.target.prune_tree,
+ self._shell.tmpdir,
+ )
+ self._shell.tmpdir = None
+
def _connect(self):
"""
Establish a connection to the master process's UNIX listener socket,
@@ -727,38 +746,103 @@ class Connection(ansible.plugins.connection.ConnectionBase):
stack = self._build_stack()
self._connect_stack(stack)
- def close(self, new_task=False):
+ def _mitogen_reset(self, mode):
"""
- Arrange for the mitogen.master.Router running in the worker to
- gracefully shut down, and wait for shutdown to complete. Safe to call
- multiple times.
+ Forget everything we know about the connected context. This function
+ cannot be called _reset() since that name is used as a public API by
+ Ansible 2.4 wait_for_connection plug-in.
+
+ :param str mode:
+ Name of ContextService method to use to discard the context, either
+ 'put' or 'reset'.
"""
- if getattr(self._shell, 'tmpdir', None) is not None:
- # Avoid CallChain to ensure exception is logged on failure.
- self.context.call_no_reply(
- ansible_mitogen.target.prune_tree,
- self._shell.tmpdir,
- )
- self._shell.tmpdir = None
+ if not self.context:
+ return
- if self.context:
- self.chain.reset()
- self.parent.call_service(
- service_name='ansible_mitogen.services.ContextService',
- method_name='put',
- context=self.context
- )
+ self._reset_tmp_path()
+ self.chain.reset()
+ self.parent.call_service(
+ service_name='ansible_mitogen.services.ContextService',
+ method_name=mode,
+ context=self.context
+ )
self.context = None
self.login_context = None
self.init_child_result = None
self.chain = None
- if self.broker and not new_task:
+
+ def _shutdown_broker(self):
+ """
+ Shutdown the broker thread during :meth:`close` or :meth:`reset`.
+ """
+ if self.broker:
self.broker.shutdown()
self.broker.join()
self.broker = None
self.router = None
+ # #420: Ansible executes "meta" actions in the top-level process,
+ # meaning "reset_connection" will cause :class:`mitogen.core.Latch`
+ # FDs to be cached and erroneously shared by children on subsequent
+ # WorkerProcess forks. To handle that, call on_fork() to ensure any
+ # shared state is discarded.
+ # #490: only attempt to clean up when it's known that some
+ # resources exist to cleanup, otherwise later __del__ double-call
+ # to close() due to GC at random moment may obliterate an unrelated
+ # Connection's resources.
+ mitogen.fork.on_fork()
+
+ def close(self):
+ """
+ Arrange for the mitogen.master.Router running in the worker to
+ gracefully shut down, and wait for shutdown to complete. Safe to call
+ multiple times.
+ """
+ self._mitogen_reset(mode='put')
+ self._shutdown_broker()
+
+ def _reset_find_task_vars(self):
+ """
+ Monsterous hack: since "meta: reset_connection" does not run from an
+ action, we cannot capture task variables via :meth:`on_action_run`.
+ Instead walk the parent frames searching for the `all_vars` local from
+ StrategyBase._execute_meta(). If this fails, just leave task_vars
+ unset, likely causing a subtly wrong configuration to be selected.
+ """
+ frame = sys._getframe()
+ while frame and not self._task_vars:
+ self._task_vars = frame.f_locals.get('all_vars')
+ frame = frame.f_back
+
+ reset_compat_msg = (
+ 'Mitogen only supports "reset_connection" on Ansible 2.5.6 or later'
+ )
+
+ def reset(self):
+ """
+ Explicitly terminate the connection to the remote host. This discards
+ any local state we hold for the connection, returns the Connection to
+ the 'disconnected' state, and informs ContextService the connection is
+ bad somehow, and should be shut down and discarded.
+ """
+ if self._task_vars is None:
+ self._reset_find_task_vars()
+
+ if self._play_context.remote_addr is None:
+ # <2.5.6 incorrectly populate PlayContext for reset_connection
+ # https://github.com/ansible/ansible/issues/27520
+ raise ansible.errors.AnsibleConnectionFailure(
+ self.reset_compat_msg
+ )
+
+ self._connect()
+ self._mitogen_reset(mode='reset')
+ self._shutdown_broker()
+
+ # Compatibility with Ansible 2.4 wait_for_connection plug-in.
+ _reset = reset
+
def get_chain(self, use_login=False, use_fork=False):
"""
Return the :class:`mitogen.parent.CallChain` to use for executing
@@ -774,21 +858,20 @@ class Connection(ansible.plugins.connection.ConnectionBase):
self._connect()
if use_login:
return self.login_context.default_call_chain
- if use_fork:
+ # See FORK_SUPPORTED comments in target.py.
+ if use_fork and self.init_child_result['fork_context'] is not None:
return self.init_child_result['fork_context'].default_call_chain
return self.chain
- def create_fork_child(self):
+ def spawn_isolated_child(self):
"""
- Fork a new child off the target context. The actual fork occurs from
- the 'virginal fork parent', which does not any Ansible modules prior to
- fork, to avoid conflicts resulting from custom module_utils paths.
+ Fork or launch a new child off the target context.
:returns:
mitogen.core.Context of the new child.
"""
return self.get_chain(use_fork=True).call(
- ansible_mitogen.target.create_fork_child
+ ansible_mitogen.target.spawn_isolated_child
)
def get_extra_args(self):
@@ -834,9 +917,9 @@ class Connection(ansible.plugins.connection.ConnectionBase):
emulate_tty=emulate_tty,
)
- stderr += 'Shared connection to %s closed.%s' % (
- self._play_context.remote_addr,
- ('\r\n' if emulate_tty else '\n'),
+ stderr += b'Shared connection to %s closed.%s' % (
+ self._play_context.remote_addr.encode(),
+ (b'\r\n' if emulate_tty else b'\n'),
)
return rc, stdout, stderr
@@ -882,6 +965,11 @@ class Connection(ansible.plugins.connection.ConnectionBase):
#: slightly more overhead, so just randomly subtract 4KiB.
SMALL_FILE_LIMIT = mitogen.core.CHUNK_SIZE - 4096
+ def _throw_io_error(self, e, path):
+ if e.args[0] == errno.ENOENT:
+ s = 'file or module does not exist: ' + path
+ raise ansible.errors.AnsibleFileNotFound(s)
+
def put_file(self, in_path, out_path):
"""
Implement put_file() by streamily transferring the file via
@@ -892,7 +980,12 @@ class Connection(ansible.plugins.connection.ConnectionBase):
:param str out_path:
Remote filesystem path to write.
"""
- st = os.stat(in_path)
+ try:
+ st = os.stat(in_path)
+ except OSError as e:
+ self._throw_io_error(e, in_path)
+ raise
+
if not stat.S_ISREG(st.st_mode):
raise IOError('%r is not a regular file.' % (in_path,))
@@ -900,17 +993,22 @@ class Connection(ansible.plugins.connection.ConnectionBase):
# rather than introducing an extra RTT for the child to request it from
# FileService.
if st.st_size <= self.SMALL_FILE_LIMIT:
- fp = open(in_path, 'rb')
try:
- s = fp.read(self.SMALL_FILE_LIMIT + 1)
- finally:
- fp.close()
+ fp = open(in_path, 'rb')
+ try:
+ s = fp.read(self.SMALL_FILE_LIMIT + 1)
+ finally:
+ fp.close()
+ except OSError:
+ self._throw_io_error(e, in_path)
+ raise
# Ensure did not grow during read.
if len(s) == st.st_size:
return self.put_data(out_path, s, mode=st.st_mode,
utimes=(st.st_atime, st.st_mtime))
+ self._connect()
self.parent.call_service(
service_name='mitogen.service.FileService',
method_name='register',
diff --git a/ansible_mitogen/logging.py b/ansible_mitogen/logging.py
index 37e309e2..97832938 100644
--- a/ansible_mitogen/logging.py
+++ b/ansible_mitogen/logging.py
@@ -29,7 +29,6 @@
from __future__ import absolute_import
import logging
import os
-import sys
import mitogen.core
import mitogen.utils
diff --git a/ansible_mitogen/mixins.py b/ansible_mitogen/mixins.py
index 4c06063b..7a180952 100644
--- a/ansible_mitogen/mixins.py
+++ b/ansible_mitogen/mixins.py
@@ -30,7 +30,6 @@ from __future__ import absolute_import
import logging
import os
import pwd
-import shutil
import traceback
try:
@@ -156,7 +155,7 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase):
"""
LOG.debug('_remote_file_exists(%r)', path)
return self._connection.get_chain().call(
- os.path.exists,
+ ansible_mitogen.target.file_exists,
mitogen.utils.cast(path)
)
@@ -223,7 +222,7 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase):
"""
LOG.debug('_fixup_perms2(%r, remote_user=%r, execute=%r)',
remote_paths, remote_user, execute)
- if execute and self._load_name not in self.FIXUP_PERMS_RED_HERRING:
+ if execute and self._task.action not in self.FIXUP_PERMS_RED_HERRING:
return self._remote_chmod(remote_paths, mode='u+x')
return self.COMMAND_RESULT.copy()
diff --git a/ansible_mitogen/planner.py b/ansible_mitogen/planner.py
index caf40af3..f3e4500e 100644
--- a/ansible_mitogen/planner.py
+++ b/ansible_mitogen/planner.py
@@ -46,6 +46,7 @@ from ansible.executor import module_common
import ansible.errors
import ansible.module_utils
import mitogen.core
+import mitogen.select
import ansible_mitogen.loaders
import ansible_mitogen.parsing
@@ -172,7 +173,7 @@ class BinaryPlanner(Planner):
return module_common._is_binary(self._inv.module_source)
def get_push_files(self):
- return [self._inv.module_path]
+ return [mitogen.core.to_text(self._inv.module_path)]
def get_kwargs(self, **kwargs):
return super(BinaryPlanner, self).get_kwargs(
@@ -205,15 +206,13 @@ class ScriptPlanner(BinaryPlanner):
involved here, the vanilla implementation uses it and that use is
exploited in common playbooks.
"""
+ key = u'ansible_%s_interpreter' % os.path.basename(path).strip()
try:
- key = u'ansible_%s_interpreter' % os.path.basename(path).strip()
template = self._inv.task_vars[key]
except KeyError:
return path
- return mitogen.utils.cast(
- self._inv.templar.template(self._inv.task_vars[key])
- )
+ return mitogen.utils.cast(self._inv.templar.template(template))
def _get_interpreter(self):
path, arg = ansible_mitogen.parsing.parse_hashbang(
@@ -285,7 +284,7 @@ class NewStylePlanner(ScriptPlanner):
def get_push_files(self):
return super(NewStylePlanner, self).get_push_files() + [
- path
+ mitogen.core.to_text(path)
for fullname, path, is_pkg in self.get_module_map()['custom']
]
@@ -416,28 +415,39 @@ def _propagate_deps(invocation, planner, context):
def _invoke_async_task(invocation, planner):
job_id = '%016x' % random.randint(0, 2**64)
- context = invocation.connection.create_fork_child()
+ context = invocation.connection.spawn_isolated_child()
_propagate_deps(invocation, planner, context)
- context.call_no_reply(
- ansible_mitogen.target.run_module_async,
- job_id=job_id,
- timeout_secs=invocation.timeout_secs,
- kwargs=planner.get_kwargs(),
- )
-
- return {
- 'stdout': json.dumps({
- # modules/utilities/logic/async_wrapper.py::_run_module().
- 'changed': True,
- 'started': 1,
- 'finished': 0,
- 'ansible_job_id': job_id,
- })
- }
+ with mitogen.core.Receiver(context.router) as started_recv:
+ call_recv = context.call_async(
+ ansible_mitogen.target.run_module_async,
+ job_id=job_id,
+ timeout_secs=invocation.timeout_secs,
+ started_sender=started_recv.to_sender(),
+ kwargs=planner.get_kwargs(),
+ )
-def _invoke_forked_task(invocation, planner):
- context = invocation.connection.create_fork_child()
+ # Wait for run_module_async() to crash, or for AsyncRunner to indicate
+ # the job file has been written.
+ for msg in mitogen.select.Select([started_recv, call_recv]):
+ if msg.receiver is call_recv:
+ # It can only be an exception.
+ raise msg.unpickle()
+ break
+
+ return {
+ 'stdout': json.dumps({
+ # modules/utilities/logic/async_wrapper.py::_run_module().
+ 'changed': True,
+ 'started': 1,
+ 'finished': 0,
+ 'ansible_job_id': job_id,
+ })
+ }
+
+
+def _invoke_isolated_task(invocation, planner):
+ context = invocation.connection.spawn_isolated_child()
_propagate_deps(invocation, planner, context)
try:
return context.call(
@@ -477,7 +487,7 @@ def invoke(invocation):
if invocation.wrap_async:
response = _invoke_async_task(invocation, planner)
elif planner.should_fork():
- response = _invoke_forked_task(invocation, planner)
+ response = _invoke_isolated_task(invocation, planner)
else:
_propagate_deps(invocation, planner, invocation.connection.context)
response = invocation.connection.get_chain().call(
diff --git a/ansible_mitogen/plugins/action/__init__.py b/ansible_mitogen/plugins/action/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/ansible_mitogen/plugins/action/mitogen_get_stack.py b/ansible_mitogen/plugins/action/mitogen_get_stack.py
new file mode 100644
index 00000000..ed7520cf
--- /dev/null
+++ b/ansible_mitogen/plugins/action/mitogen_get_stack.py
@@ -0,0 +1,54 @@
+# Copyright 2017, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+"""
+Fetch the connection configuration stack that would be used to connect to a
+target, without actually connecting to it.
+"""
+
+import ansible_mitogen.connection
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+ if not isinstance(self._connection,
+ ansible_mitogen.connection.Connection):
+ return {
+ 'skipped': True,
+ }
+
+ return {
+ 'changed': True,
+ 'result': self._connection._build_stack(),
+ '_ansible_verbose_always': True,
+ }
diff --git a/ansible_mitogen/plugins/connection/mitogen_docker.py b/ansible_mitogen/plugins/connection/mitogen_docker.py
index 8af42711..5904c83e 100644
--- a/ansible_mitogen/plugins/connection/mitogen_docker.py
+++ b/ansible_mitogen/plugins/connection/mitogen_docker.py
@@ -42,3 +42,10 @@ import ansible_mitogen.connection
class Connection(ansible_mitogen.connection.Connection):
transport = 'docker'
+
+ @property
+ def docker_cmd(self):
+ """
+ Ansible 2.3 synchronize module wants to know how we run Docker.
+ """
+ return 'docker'
diff --git a/ansible_mitogen/plugins/connection/mitogen_kubectl.py b/ansible_mitogen/plugins/connection/mitogen_kubectl.py
index 5ffe3f7b..2dab131b 100644
--- a/ansible_mitogen/plugins/connection/mitogen_kubectl.py
+++ b/ansible_mitogen/plugins/connection/mitogen_kubectl.py
@@ -31,7 +31,12 @@ from __future__ import absolute_import
import os.path
import sys
-import ansible.plugins.connection.kubectl
+try:
+ from ansible.plugins.connection import kubectl
+except ImportError:
+ kubectl = None
+
+from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six import iteritems
try:
@@ -47,9 +52,19 @@ import ansible_mitogen.connection
class Connection(ansible_mitogen.connection.Connection):
transport = 'kubectl'
+ not_supported_msg = (
+ 'The "mitogen_kubectl" plug-in requires a version of Ansible '
+ 'that ships with the "kubectl" connection plug-in.'
+ )
+
+ def __init__(self, *args, **kwargs):
+ if kubectl is None:
+ raise AnsibleConnectionFailure(self.not_supported_msg)
+ super(Connection, self).__init__(*args, **kwargs)
+
def get_extra_args(self):
parameters = []
- for key, option in iteritems(ansible.plugins.connection.kubectl.CONNECTION_OPTIONS):
+ for key, option in iteritems(kubectl.CONNECTION_OPTIONS):
if self.get_task_var('ansible_' + key) is not None:
parameters += [ option, self.get_task_var('ansible_' + key) ]
diff --git a/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py b/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
new file mode 100644
index 00000000..175e1f8b
--- /dev/null
+++ b/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
@@ -0,0 +1,67 @@
+# Copyright 2017, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+ sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.loaders
+import ansible_mitogen.strategy
+
+
+Base = ansible_mitogen.loaders.strategy_loader.get('host_pinned', class_only=True)
+
+if Base is None:
+ raise ImportError(
+ 'The host_pinned strategy is only available in Ansible 2.7 or newer.'
+ )
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base):
+ pass
diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py
index 6e18a863..8137af9c 100644
--- a/ansible_mitogen/process.py
+++ b/ansible_mitogen/process.py
@@ -50,15 +50,22 @@ import mitogen.service
import mitogen.unix
import mitogen.utils
+import ansible
import ansible.constants as C
import ansible_mitogen.logging
import ansible_mitogen.services
from mitogen.core import b
+import ansible_mitogen.affinity
LOG = logging.getLogger(__name__)
+ANSIBLE_PKG_OVERRIDE = (
+ u"__version__ = %r\n"
+ u"__author__ = %r\n"
+)
+
def clean_shutdown(sock):
"""
@@ -87,25 +94,22 @@ def getenv_int(key, default=0):
return default
-def setup_gil():
- """
- Set extremely long GIL release interval to let threads naturally progress
- through CPU-heavy sequences without forcing the wake of another thread that
- may contend trying to run the same CPU-heavy code. For the new-style work,
- this drops runtime ~33% and involuntary context switches by >80%,
- essentially making threads cooperatively scheduled.
+def save_pid(name):
"""
- try:
- # Python 2.
- sys.setcheckinterval(100000)
- except AttributeError:
- pass
+ When debugging and profiling, it is very annoying to poke through the
+ process list to discover the currently running Ansible and MuxProcess IDs,
+ especially when trying to catch an issue during early startup. So here, if
+ a magic environment variable set, stash them in hidden files in the CWD::
- try:
- # Python 3.
- sys.setswitchinterval(10)
- except AttributeError:
- pass
+ alias muxpid="cat .ansible-mux.pid"
+ alias anspid="cat .ansible-controller.pid"
+
+ gdb -p $(muxpid)
+ perf top -p $(anspid)
+ """
+ if os.environ.get('MITOGEN_SAVE_PIDS'):
+ with open('.ansible-%s.pid' % (name,), 'w') as fp:
+ fp.write(str(os.getpid()))
class MuxProcess(object):
@@ -154,13 +158,16 @@ class MuxProcess(object):
_instance = None
@classmethod
- def start(cls):
+ def start(cls, _init_logging=True):
"""
Arrange for the subprocess to be started, if it is not already running.
The parent process picks a UNIX socket path the child will use prior to
fork, creates a socketpair used essentially as a semaphore, then blocks
waiting for the child to indicate the UNIX socket is ready for use.
+
+ :param bool _init_logging:
+ For testing, if :data:`False`, don't initialize logging.
"""
if cls.worker_sock is not None:
return
@@ -168,29 +175,34 @@ class MuxProcess(object):
if faulthandler is not None:
faulthandler.enable()
- setup_gil()
+ mitogen.utils.setup_gil()
cls.unix_listener_path = mitogen.unix.make_socket_path()
cls.worker_sock, cls.child_sock = socket.socketpair()
atexit.register(lambda: clean_shutdown(cls.worker_sock))
mitogen.core.set_cloexec(cls.worker_sock.fileno())
mitogen.core.set_cloexec(cls.child_sock.fileno())
- if os.environ.get('MITOGEN_PROFILING'):
+ cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None
+ if cls.profiling:
mitogen.core.enable_profiling()
cls.original_env = dict(os.environ)
cls.child_pid = os.fork()
- ansible_mitogen.logging.setup()
+ if _init_logging:
+ ansible_mitogen.logging.setup()
if cls.child_pid:
+ save_pid('controller')
+ ansible_mitogen.affinity.policy.assign_controller()
cls.child_sock.close()
cls.child_sock = None
mitogen.core.io_op(cls.worker_sock.recv, 1)
else:
+ save_pid('mux')
+ ansible_mitogen.affinity.policy.assign_muxprocess()
cls.worker_sock.close()
cls.worker_sock = None
self = cls()
self.worker_main()
- sys.exit()
def worker_main(self):
"""
@@ -201,10 +213,19 @@ class MuxProcess(object):
self._setup_master()
self._setup_services()
- # Let the parent know our listening socket is ready.
- mitogen.core.io_op(self.child_sock.send, b('1'))
- # Block until the socket is closed, which happens on parent exit.
- mitogen.core.io_op(self.child_sock.recv, 1)
+ try:
+ # Let the parent know our listening socket is ready.
+ mitogen.core.io_op(self.child_sock.send, b('1'))
+ # Block until the socket is closed, which happens on parent exit.
+ mitogen.core.io_op(self.child_sock.recv, 1)
+ finally:
+ self.broker.shutdown()
+ self.broker.join()
+
+ # Test frameworks living somewhere higher on the stack of the
+ # original parent process may try to catch sys.exit(), so do a C
+ # level exit instead.
+ os._exit(0)
def _enable_router_debug(self):
if 'MITOGEN_ROUTER_DEBUG' in os.environ:
@@ -215,15 +236,43 @@ class MuxProcess(object):
if secs:
mitogen.debug.dump_to_logger(secs=secs)
+ def _setup_responder(self, responder):
+ """
+ Configure :class:`mitogen.master.ModuleResponder` to only permit
+ certain packages, and to generate custom responses for certain modules.
+ """
+ responder.whitelist_prefix('ansible')
+ responder.whitelist_prefix('ansible_mitogen')
+ responder.whitelist_prefix('simplejson')
+ simplejson_path = os.path.join(os.path.dirname(__file__), 'compat')
+ sys.path.insert(0, simplejson_path)
+
+ # Ansible 2.3 is compatible with Python 2.4 targets, however
+ # ansible/__init__.py is not. Instead, executor/module_common.py writes
+ # out a 2.4-compatible namespace package for unknown reasons. So we
+ # copy it here.
+ responder.add_source_override(
+ fullname='ansible',
+ path=ansible.__file__,
+ source=(ANSIBLE_PKG_OVERRIDE % (
+ ansible.__version__,
+ ansible.__author__,
+ )).encode(),
+ is_pkg=True,
+ )
+
def _setup_master(self):
"""
Construct a Router, Broker, and mitogen.unix listener
"""
- self.router = mitogen.master.Router(max_message_size=4096 * 1048576)
- self.router.responder.whitelist_prefix('ansible')
- self.router.responder.whitelist_prefix('ansible_mitogen')
- mitogen.core.listen(self.router.broker, 'shutdown', self.on_broker_shutdown)
- mitogen.core.listen(self.router.broker, 'exit', self.on_broker_exit)
+ self.broker = mitogen.master.Broker(install_watcher=False)
+ self.router = mitogen.master.Router(
+ broker=self.broker,
+ max_message_size=4096 * 1048576,
+ )
+ self._setup_responder(self.router.responder)
+ mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown)
+ mitogen.core.listen(self.broker, 'exit', self.on_broker_exit)
self.listener = mitogen.unix.Listener(
router=self.router,
path=self.unix_listener_path,
@@ -245,7 +294,7 @@ class MuxProcess(object):
ansible_mitogen.services.ContextService(self.router),
ansible_mitogen.services.ModuleDepService(self.router),
],
- size=getenv_int('MITOGEN_POOL_SIZE', default=16),
+ size=getenv_int('MITOGEN_POOL_SIZE', default=32),
)
LOG.debug('Service pool configured: size=%d', self.pool.size)
@@ -256,13 +305,9 @@ class MuxProcess(object):
then cannot clean up pending handlers, which is required for the
threads to exit gracefully.
"""
- self.pool.stop(join=False)
- try:
- os.unlink(self.listener.path)
- except OSError as e:
- # Prevent a shutdown race with the parent process.
- if e.args[0] != errno.ENOENT:
- raise
+ # In normal operation we presently kill the process because there is
+ # not yet any way to cancel connect().
+ self.pool.stop(join=self.profiling)
def on_broker_exit(self):
"""
@@ -270,10 +315,9 @@ class MuxProcess(object):
ourself. In future this should gracefully join the pool, but TERM is
fine for now.
"""
- if os.environ.get('MITOGEN_PROFILING'):
- # TODO: avoid killing pool threads before they have written their
- # .pstats. Really shouldn't be using kill() here at all, but hard
- # to guarantee services can always be unblocked during shutdown.
- time.sleep(1)
-
- os.kill(os.getpid(), signal.SIGTERM)
+ if not self.profiling:
+ # In normal operation we presently kill the process because there is
+ # not yet any way to cancel connect(). When profiling, threads
+ # including the broker must shut down gracefully, otherwise pstats
+ # won't be written.
+ os.kill(os.getpid(), signal.SIGTERM)
diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py
index 45bb5f0b..768cc57c 100644
--- a/ansible_mitogen/runner.py
+++ b/ansible_mitogen/runner.py
@@ -26,6 +26,7 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
"""
These classes implement execution for each style of Ansible module. They are
@@ -35,23 +36,36 @@ Each class in here has a corresponding Planner class in planners.py that knows
how to build arguments for it, preseed related data, etc.
"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
import atexit
-import ctypes
-import errno
+import codecs
import imp
-import json
-import logging
import os
import shlex
+import shutil
import sys
import tempfile
+import traceback
import types
import mitogen.core
import ansible_mitogen.target # TODO: circular import
+from mitogen.core import b
+from mitogen.core import bytes_partition
+from mitogen.core import str_partition
+from mitogen.core import str_rpartition
+from mitogen.core import to_text
+
+try:
+ import ctypes
+except ImportError:
+ # Python 2.4
+ ctypes = None
+
+try:
+ import json
+except ImportError:
+ # Python 2.4
+ import simplejson as json
try:
# Cannot use cStringIO as it does not support Unicode.
@@ -64,6 +78,10 @@ try:
except ImportError:
from pipes import quote as shlex_quote
+# Absolute imports for <2.5.
+logging = __import__('logging')
+
+
# Prevent accidental import of an Ansible module from hanging on stdin read.
import ansible.module_utils.basic
ansible.module_utils.basic._ANSIBLE_ARGS = '{}'
@@ -72,18 +90,27 @@ ansible.module_utils.basic._ANSIBLE_ARGS = '{}'
# resolv.conf at startup and never implicitly reload it. Cope with that via an
# explicit call to res_init() on each task invocation. BSD-alikes export it
# directly, Linux #defines it as "__res_init".
-libc = ctypes.CDLL(None)
libc__res_init = None
-for symbol in 'res_init', '__res_init':
- try:
- libc__res_init = getattr(libc, symbol)
- except AttributeError:
- pass
+if ctypes:
+ libc = ctypes.CDLL(None)
+ for symbol in 'res_init', '__res_init':
+ try:
+ libc__res_init = getattr(libc, symbol)
+ except AttributeError:
+ pass
iteritems = getattr(dict, 'iteritems', dict.items)
LOG = logging.getLogger(__name__)
+if mitogen.core.PY3:
+ shlex_split = shlex.split
+else:
+ def shlex_split(s, comments=False):
+ return [mitogen.core.to_text(token)
+ for token in shlex.split(str(s), comments=comments)]
+
+
class EnvironmentFileWatcher(object):
"""
Usually Ansible edits to /etc/environment and ~/.pam_environment are
@@ -118,8 +145,11 @@ class EnvironmentFileWatcher(object):
def _load(self):
try:
- with open(self.path, 'r') as fp:
+ fp = codecs.open(self.path, 'r', encoding='utf-8')
+ try:
return list(self._parse(fp))
+ finally:
+ fp.close()
except IOError:
return []
@@ -129,14 +159,14 @@ class EnvironmentFileWatcher(object):
"""
for line in fp:
# ' #export foo=some var ' -> ['#export', 'foo=some var ']
- bits = shlex.split(line, comments=True)
+ bits = shlex_split(line, comments=True)
if (not bits) or bits[0].startswith('#'):
continue
- if bits[0] == 'export':
+ if bits[0] == u'export':
bits.pop(0)
- key, sep, value = (' '.join(bits)).partition('=')
+ key, sep, value = str_partition(u' '.join(bits), u'=')
if key and sep:
yield key, value
@@ -255,7 +285,7 @@ class Runner(object):
self.service_context = service_context
self.econtext = econtext
self.detach = detach
- self.args = json.loads(json_args)
+ self.args = json.loads(mitogen.core.to_text(json_args))
self.good_temp_dir = good_temp_dir
self.extra_env = extra_env
self.env = env
@@ -354,6 +384,55 @@ class Runner(object):
self.revert()
+class AtExitWrapper(object):
+ """
+ issue #397, #454: Newer Ansibles use :func:`atexit.register` to trigger
+ tmpdir cleanup when AnsibleModule.tmpdir is responsible for creating its
+ own temporary directory, however with Mitogen processes are preserved
+ across tasks, meaning cleanup must happen earlier.
+
+ Patch :func:`atexit.register`, catching :func:`shutil.rmtree` calls so they
+ can be executed on task completion, rather than on process shutdown.
+ """
+ # Wrapped in a dict to avoid instance method decoration.
+ original = {
+ 'register': atexit.register
+ }
+
+ def __init__(self):
+ assert atexit.register == self.original['register'], \
+ "AtExitWrapper installed twice."
+ atexit.register = self._atexit__register
+ self.deferred = []
+
+ def revert(self):
+ """
+ Restore the original :func:`atexit.register`.
+ """
+ assert atexit.register == self._atexit__register, \
+ "AtExitWrapper not installed."
+ atexit.register = self.original['register']
+
+ def run_callbacks(self):
+ while self.deferred:
+ func, targs, kwargs = self.deferred.pop()
+ try:
+ func(*targs, **kwargs)
+ except Exception:
+ LOG.exception('While running atexit callbacks')
+
+ def _atexit__register(self, func, *targs, **kwargs):
+ """
+ Intercept :func:`atexit.register` calls, diverting any to
+ :func:`shutil.rmtree` into a private list.
+ """
+ if func == shutil.rmtree:
+ self.deferred.append((func, targs, kwargs))
+ return
+
+ self.original['register'](func, *targs, **kwargs)
+
+
class ModuleUtilsImporter(object):
"""
:param list module_utils:
@@ -388,7 +467,7 @@ class ModuleUtilsImporter(object):
mod.__path__ = []
mod.__package__ = str(fullname)
else:
- mod.__package__ = str(fullname.rpartition('.')[0])
+ mod.__package__ = str(str_rpartition(to_text(fullname), '.')[0])
exec(code, mod.__dict__)
self._loaded.add(fullname)
return mod
@@ -404,6 +483,8 @@ class TemporaryEnvironment(object):
self.original = dict(os.environ)
self.env = env or {}
for key, value in iteritems(self.env):
+ key = mitogen.core.to_text(key)
+ value = mitogen.core.to_text(value)
if value is None:
os.environ.pop(key, None)
else:
@@ -530,7 +611,7 @@ class ProgramRunner(Runner):
Return the final argument vector used to execute the program.
"""
return [
- self.args['_ansible_shell_executable'],
+ self.args.get('_ansible_shell_executable', '/bin/sh'),
'-c',
self._get_shell_fragment(),
]
@@ -547,18 +628,19 @@ class ProgramRunner(Runner):
args=self._get_argv(),
emulate_tty=self.emulate_tty,
)
- except Exception as e:
+ except Exception:
LOG.exception('While running %s', self._get_argv())
+ e = sys.exc_info()[1]
return {
- 'rc': 1,
- 'stdout': '',
- 'stderr': '%s: %s' % (type(e), e),
+ u'rc': 1,
+ u'stdout': u'',
+ u'stderr': u'%s: %s' % (type(e), e),
}
return {
- 'rc': rc,
- 'stdout': mitogen.core.to_text(stdout),
- 'stderr': mitogen.core.to_text(stderr),
+ u'rc': rc,
+ u'stdout': mitogen.core.to_text(stdout),
+ u'stderr': mitogen.core.to_text(stderr),
}
@@ -608,7 +690,7 @@ class ScriptRunner(ProgramRunner):
self.interpreter_fragment = interpreter_fragment
self.is_python = is_python
- b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
+ b_ENCODING_STRING = b('# -*- coding: utf-8 -*-')
def _get_program(self):
return self._rewrite_source(
@@ -617,7 +699,7 @@ class ScriptRunner(ProgramRunner):
def _get_argv(self):
return [
- self.args['_ansible_shell_executable'],
+ self.args.get('_ansible_shell_executable', '/bin/sh'),
'-c',
self._get_shell_fragment(),
]
@@ -641,13 +723,13 @@ class ScriptRunner(ProgramRunner):
# While Ansible rewrites the #! using ansible_*_interpreter, it is
# never actually used to execute the script, instead it is a shell
# fragment consumed by shell/__init__.py::build_module_command().
- new = [b'#!' + utf8(self.interpreter_fragment)]
+ new = [b('#!') + utf8(self.interpreter_fragment)]
if self.is_python:
new.append(self.b_ENCODING_STRING)
- _, _, rest = s.partition(b'\n')
+ _, _, rest = bytes_partition(s, b('\n'))
new.append(rest)
- return b'\n'.join(new)
+ return b('\n').join(new)
class NewStyleRunner(ScriptRunner):
@@ -701,6 +783,7 @@ class NewStyleRunner(ScriptRunner):
)
self._setup_imports()
self._setup_excepthook()
+ self.atexit_wrapper = AtExitWrapper()
if libc__res_init:
libc__res_init()
@@ -708,6 +791,7 @@ class NewStyleRunner(ScriptRunner):
sys.excepthook = self.original_excepthook
def revert(self):
+ self.atexit_wrapper.revert()
self._argv.revert()
self._stdio.revert()
self._revert_excepthook()
@@ -733,16 +817,18 @@ class NewStyleRunner(ScriptRunner):
return self._code_by_path[self.path]
except KeyError:
return self._code_by_path.setdefault(self.path, compile(
- source=self.source,
- filename="master:" + self.path,
- mode='exec',
- dont_inherit=True,
+ # Py2.4 doesn't support kwargs.
+ self.source, # source
+ "master:" + self.path, # filename
+ 'exec', # mode
+ 0, # flags
+ True, # dont_inherit
))
if mitogen.core.PY3:
main_module_name = '__main__'
else:
- main_module_name = b'__main__'
+ main_module_name = b('__main__')
def _handle_magic_exception(self, mod, exc):
"""
@@ -764,18 +850,10 @@ class NewStyleRunner(ScriptRunner):
exec(code, vars(mod))
else:
exec('exec code in vars(mod)')
- except Exception as e:
- self._handle_magic_exception(mod, e)
+ except Exception:
+ self._handle_magic_exception(mod, sys.exc_info()[1])
raise
- def _run_atexit_funcs(self):
- """
- Newer Ansibles use atexit.register() to trigger tmpdir cleanup, when
- AnsibleModule.tmpdir is responsible for creating its own temporary
- directory.
- """
- atexit._run_exitfuncs()
-
def _run(self):
mod = types.ModuleType(self.main_module_name)
mod.__package__ = None
@@ -789,24 +867,30 @@ class NewStyleRunner(ScriptRunner):
)
code = self._get_code()
- exc = None
+ rc = 2
try:
try:
self._run_code(code, mod)
- finally:
- self._run_atexit_funcs()
- except SystemExit as e:
- exc = e
+ except SystemExit:
+ exc = sys.exc_info()[1]
+ rc = exc.args[0]
+ except Exception:
+ # This writes to stderr by default.
+ traceback.print_exc()
+ rc = 1
+
+ finally:
+ self.atexit_wrapper.run_callbacks()
return {
- 'rc': exc.args[0] if exc else 2,
- 'stdout': mitogen.core.to_text(sys.stdout.getvalue()),
- 'stderr': mitogen.core.to_text(sys.stderr.getvalue()),
+ u'rc': rc,
+ u'stdout': mitogen.core.to_text(sys.stdout.getvalue()),
+ u'stderr': mitogen.core.to_text(sys.stderr.getvalue()),
}
class JsonArgsRunner(ScriptRunner):
- JSON_ARGS = b'<>'
+ JSON_ARGS = b('<>')
def _get_args_contents(self):
return json.dumps(self.args).encode()
diff --git a/ansible_mitogen/services.py b/ansible_mitogen/services.py
index 199f2116..61286382 100644
--- a/ansible_mitogen/services.py
+++ b/ansible_mitogen/services.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
Classes in this file define Mitogen 'services' that run (initially) within the
connection multiplexer process that is forked off the top-level controller
@@ -79,14 +81,35 @@ else:
def _get_candidate_temp_dirs():
- options = ansible.constants.config.get_plugin_options('shell', 'sh')
+ try:
+ # >=2.5
+ options = ansible.constants.config.get_plugin_options('shell', 'sh')
+ remote_tmp = options.get('remote_tmp') or ansible.constants.DEFAULT_REMOTE_TMP
+ system_tmpdirs = options.get('system_tmpdirs', ('/var/tmp', '/tmp'))
+ except AttributeError:
+ # 2.3
+ remote_tmp = ansible.constants.DEFAULT_REMOTE_TMP
+ system_tmpdirs = ('/var/tmp', '/tmp')
+
+ return mitogen.utils.cast([remote_tmp] + list(system_tmpdirs))
- # Pre 2.5 this came from ansible.constants.
- remote_tmp = (options.get('remote_tmp') or
- ansible.constants.DEFAULT_REMOTE_TMP)
- dirs = list(options.get('system_tmpdirs', ('/var/tmp', '/tmp')))
- dirs.insert(0, remote_tmp)
- return mitogen.utils.cast(dirs)
+
+def key_from_dict(**kwargs):
+ """
+ Return a unique string representation of a dict as quickly as possible.
+ Used to generated deduplication keys from a request.
+ """
+ out = []
+ stack = [kwargs]
+ while stack:
+ obj = stack.pop()
+ if isinstance(obj, dict):
+ stack.extend(sorted(obj.items()))
+ elif isinstance(obj, (list, tuple)):
+ stack.extend(obj)
+ else:
+ out.append(str(obj))
+ return ''.join(out)
class Error(Exception):
@@ -113,7 +136,7 @@ class ContextService(mitogen.service.Service):
super(ContextService, self).__init__(*args, **kwargs)
self._lock = threading.Lock()
#: Records the :meth:`get` result dict for successful calls, returned
- #: for identical subsequent calls. Keyed by :meth:`key_from_kwargs`.
+ #: for identical subsequent calls. Keyed by :meth:`key_from_dict`.
self._response_by_key = {}
#: List of :class:`mitogen.core.Latch` awaiting the result for a
#: particular key.
@@ -126,8 +149,27 @@ class ContextService(mitogen.service.Service):
#: :attr:`max_interpreters` is reached, the most recently used context
#: is destroyed to make room for any additional context.
self._lru_by_via = {}
- #: :meth:`key_from_kwargs` result by Context.
+ #: :func:`key_from_dict` result by Context.
self._key_by_context = {}
+ #: Mapping of Context -> parent Context
+ self._via_by_context = {}
+
+ @mitogen.service.expose(mitogen.service.AllowParents())
+ @mitogen.service.arg_spec({
+ 'context': mitogen.core.Context
+ })
+ def reset(self, context):
+ """
+ Return a reference, forcing close and discard of the underlying
+ connection. Used for 'meta: reset_connection' or when some other error
+ is detected.
+ """
+ LOG.debug('%r.reset(%r)', self, context)
+ self._lock.acquire()
+ try:
+ self._shutdown_unlocked(context)
+ finally:
+ self._lock.release()
@mitogen.service.expose(mitogen.service.AllowParents())
@mitogen.service.arg_spec({
@@ -149,29 +191,13 @@ class ContextService(mitogen.service.Service):
finally:
self._lock.release()
- def key_from_kwargs(self, **kwargs):
- """
- Generate a deduplication key from the request.
- """
- out = []
- stack = [kwargs]
- while stack:
- obj = stack.pop()
- if isinstance(obj, dict):
- stack.extend(sorted(obj.items()))
- elif isinstance(obj, (list, tuple)):
- stack.extend(obj)
- else:
- out.append(str(obj))
- return ''.join(out)
-
def _produce_response(self, key, response):
"""
Reply to every waiting request matching a configuration key with a
response dictionary, deleting the list of waiters when done.
:param str key:
- Result of :meth:`key_from_kwargs`
+ Result of :meth:`key_from_dict`
:param dict response:
Response dictionary
:returns:
@@ -187,6 +213,19 @@ class ContextService(mitogen.service.Service):
self._lock.release()
return count
+ def _forget_context_unlocked(self, context):
+ key = self._key_by_context.get(context)
+ if key is None:
+ LOG.debug('%r: attempt to forget unknown %r', self, context)
+ return
+
+ self._response_by_key.pop(key, None)
+ self._latches_by_key.pop(key, None)
+ self._key_by_context.pop(context, None)
+ self._refs_by_context.pop(context, None)
+ self._via_by_context.pop(context, None)
+ self._lru_by_via.pop(context, None)
+
def _shutdown_unlocked(self, context, lru=None, new_context=None):
"""
Arrange for `context` to be shut down, and optionally add `new_context`
@@ -194,15 +233,15 @@ class ContextService(mitogen.service.Service):
"""
LOG.info('%r._shutdown_unlocked(): shutting down %r', self, context)
context.shutdown()
-
- key = self._key_by_context[context]
- del self._response_by_key[key]
- del self._refs_by_context[context]
- del self._key_by_context[context]
- if lru and context in lru:
- lru.remove(context)
- if new_context:
- lru.append(new_context)
+ via = self._via_by_context.get(context)
+ if via:
+ lru = self._lru_by_via.get(via)
+ if lru:
+ if context in lru:
+ lru.remove(context)
+ if new_context:
+ lru.append(new_context)
+ self._forget_context_unlocked(context)
def _update_lru_unlocked(self, new_context, spec, via):
"""
@@ -210,6 +249,8 @@ class ContextService(mitogen.service.Service):
by `kwargs`, destroying the most recently created context if the list
is full. Finally add `new_context` to the list.
"""
+ self._via_by_context[new_context] = via
+
lru = self._lru_by_via.setdefault(via, [])
if len(lru) < self.max_interpreters:
lru.append(new_context)
@@ -232,6 +273,23 @@ class ContextService(mitogen.service.Service):
finally:
self._lock.release()
+ @mitogen.service.expose(mitogen.service.AllowParents())
+ def dump(self):
+ """
+ For testing, return a list of dicts describing every currently
+ connected context.
+ """
+ return [
+ {
+ 'context_name': context.name,
+ 'via': getattr(self._via_by_context.get(context),
+ 'name', None),
+ 'refs': self._refs_by_context.get(context),
+ }
+ for context, key in sorted(self._key_by_context.items(),
+ key=lambda c_k: c_k[0].context_id)
+ ]
+
@mitogen.service.expose(mitogen.service.AllowParents())
def shutdown_all(self):
"""
@@ -241,30 +299,19 @@ class ContextService(mitogen.service.Service):
try:
for context in list(self._key_by_context):
self._shutdown_unlocked(context)
- self._lru_by_via = {}
finally:
self._lock.release()
- def _on_stream_disconnect(self, stream):
+ def _on_context_disconnect(self, context):
"""
- Respond to Stream disconnection by deleting any record of contexts
- reached via that stream. This method runs in the Broker thread and must
- not to block.
+ Respond to Context disconnect event by deleting any record of the no
+ longer reachable context. This method runs in the Broker thread and
+ must not to block.
"""
- # TODO: there is a race between creation of a context and disconnection
- # of its related stream. An error reply should be sent to any message
- # in _latches_by_key below.
self._lock.acquire()
try:
- for context, key in list(self._key_by_context.items()):
- if context.context_id in stream.routes:
- LOG.info('Dropping %r due to disconnect of %r',
- context, stream)
- self._response_by_key.pop(key, None)
- self._latches_by_key.pop(key, None)
- self._refs_by_context.pop(context, None)
- self._lru_by_via.pop(context, None)
- self._refs_by_context.pop(context, None)
+ LOG.info('%r: Forgetting %r due to stream disconnect', self, context)
+ self._forget_context_unlocked(context)
finally:
self._lock.release()
@@ -330,13 +377,10 @@ class ContextService(mitogen.service.Service):
context = method(via=via, unidirectional=True, **spec['kwargs'])
if via and spec.get('enable_lru'):
self._update_lru(context, spec, via)
- else:
- # For directly connected contexts, listen to the associated
- # Stream's disconnect event and use it to invalidate dependent
- # Contexts.
- stream = self.router.stream_by_id(context.context_id)
- mitogen.core.listen(stream, 'disconnect',
- lambda: self._on_stream_disconnect(stream))
+
+ # Forget the context when its disconnect event fires.
+ mitogen.core.listen(context, 'disconnect',
+ lambda: self._on_context_disconnect(context))
self._send_module_forwards(context)
init_child_result = context.call(
@@ -360,7 +404,7 @@ class ContextService(mitogen.service.Service):
def _wait_or_start(self, spec, via=None):
latch = mitogen.core.Latch()
- key = self.key_from_kwargs(via=via, **spec)
+ key = key_from_dict(via=via, **spec)
self._lock.acquire()
try:
response = self._response_by_key.get(key)
@@ -453,14 +497,16 @@ class ModuleDepService(mitogen.service.Service):
def _get_builtin_names(self, builtin_path, resolved):
return [
- fullname
+ mitogen.core.to_text(fullname)
for fullname, path, is_pkg in resolved
if os.path.abspath(path).startswith(builtin_path)
]
def _get_custom_tups(self, builtin_path, resolved):
return [
- (fullname, path, is_pkg)
+ (mitogen.core.to_text(fullname),
+ mitogen.core.to_text(path),
+ is_pkg)
for fullname, path, is_pkg in resolved
if not os.path.abspath(path).startswith(builtin_path)
]
diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py
index e105984c..4d1636e2 100644
--- a/ansible_mitogen/strategy.py
+++ b/ansible_mitogen/strategy.py
@@ -28,11 +28,45 @@
from __future__ import absolute_import
import os
+import signal
+import threading
+import mitogen.core
+import ansible_mitogen.affinity
import ansible_mitogen.loaders
import ansible_mitogen.mixins
import ansible_mitogen.process
+import ansible.executor.process.worker
+
+
+def _patch_awx_callback():
+ """
+ issue #400: AWX loads a display callback that suffers from thread-safety
+ issues. Detect the presence of older AWX versions and patch the bug.
+ """
+ # AWX uses sitecustomize.py to force-load this package. If it exists, we're
+ # running under AWX.
+ try:
+ from awx_display_callback.events import EventContext
+ from awx_display_callback.events import event_context
+ except ImportError:
+ return
+
+ if hasattr(EventContext(), '_local'):
+ # Patched version.
+ return
+
+ def patch_add_local(self, **kwargs):
+ tls = vars(self._local)
+ ctx = tls.setdefault('_ctx', {})
+ ctx.update(kwargs)
+
+ EventContext._local = threading.local()
+ EventContext.add_local = patch_add_local
+
+_patch_awx_callback()
+
def wrap_action_loader__get(name, *args, **kwargs):
"""
@@ -46,7 +80,6 @@ def wrap_action_loader__get(name, *args, **kwargs):
"""
klass = action_loader__get(name, class_only=True)
if klass:
- wrapped_name = 'MitogenActionModule_' + name
bases = (ansible_mitogen.mixins.ActionModuleMixin, klass)
adorned_klass = type(str(name), bases, {})
if kwargs.get('class_only'):
@@ -65,6 +98,22 @@ def wrap_connection_loader__get(name, *args, **kwargs):
return connection_loader__get(name, *args, **kwargs)
+def wrap_worker__run(*args, **kwargs):
+ """
+ While the strategy is active, rewrite connection_loader.get() calls for
+ some transports into requests for a compatible Mitogen transport.
+ """
+ # Ignore parent's attempts to murder us when we still need to write
+ # profiling output.
+ if mitogen.core._profile_hook.__name__ != '_profile_hook':
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+ ansible_mitogen.affinity.policy.assign_worker()
+ return mitogen.core._profile_hook('WorkerProcess',
+ lambda: worker__run(*args, **kwargs)
+ )
+
+
class StrategyMixin(object):
"""
This mix-in enhances any built-in strategy by arranging for various Mitogen
@@ -139,22 +188,56 @@ class StrategyMixin(object):
connection_loader__get = ansible_mitogen.loaders.connection_loader.get
ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get
+ global worker__run
+ worker__run = ansible.executor.process.worker.WorkerProcess.run
+ ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run
+
def _remove_wrappers(self):
"""
Uninstall the PluginLoader monkey patches.
"""
ansible_mitogen.loaders.action_loader.get = action_loader__get
ansible_mitogen.loaders.connection_loader.get = connection_loader__get
+ ansible.executor.process.worker.WorkerProcess.run = worker__run
- def _add_connection_plugin_path(self):
+ def _add_plugin_paths(self):
"""
- Add the mitogen connection plug-in directory to the ModuleLoader path,
- avoiding the need for manual configuration.
+ Add the Mitogen plug-in directories to the ModuleLoader path, avoiding
+ the need for manual configuration.
"""
base_dir = os.path.join(os.path.dirname(__file__), 'plugins')
ansible_mitogen.loaders.connection_loader.add_directory(
os.path.join(base_dir, 'connection')
)
+ ansible_mitogen.loaders.action_loader.add_directory(
+ os.path.join(base_dir, 'action')
+ )
+
+ def _queue_task(self, host, task, task_vars, play_context):
+ """
+ Many PluginLoader caches are defective as they are only populated in
+ the ephemeral WorkerProcess. Touch each plug-in path before forking to
+ ensure all workers receive a hot cache.
+ """
+ ansible_mitogen.loaders.module_loader.find_plugin(
+ name=task.action,
+ mod_type='',
+ )
+ ansible_mitogen.loaders.connection_loader.get(
+ name=play_context.connection,
+ class_only=True,
+ )
+ ansible_mitogen.loaders.action_loader.get(
+ name=task.action,
+ class_only=True,
+ )
+
+ return super(StrategyMixin, self)._queue_task(
+ host=host,
+ task=task,
+ task_vars=task_vars,
+ play_context=play_context,
+ )
def run(self, iterator, play_context, result=0):
"""
@@ -162,9 +245,12 @@ class StrategyMixin(object):
the strategy's real run() method.
"""
ansible_mitogen.process.MuxProcess.start()
- self._add_connection_plugin_path()
+ run = super(StrategyMixin, self).run
+ self._add_plugin_paths()
self._install_wrappers()
try:
- return super(StrategyMixin, self).run(iterator, play_context)
+ return mitogen.core._profile_hook('Strategy',
+ lambda: run(iterator, play_context)
+ )
finally:
self._remove_wrappers()
diff --git a/ansible_mitogen/target.py b/ansible_mitogen/target.py
index ff6ed083..01877e34 100644
--- a/ansible_mitogen/target.py
+++ b/ansible_mitogen/target.py
@@ -26,24 +26,19 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
Helper functions intended to be executed on the target. These are entrypoints
for file transfer, module execution and sundry bits like changing file modes.
"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
import errno
-import functools
import grp
-import json
-import logging
import operator
import os
import pwd
import re
-import resource
import signal
import stat
import subprocess
@@ -52,10 +47,32 @@ import tempfile
import traceback
import types
+# Absolute imports for <2.5.
+logging = __import__('logging')
+
import mitogen.core
import mitogen.fork
import mitogen.parent
import mitogen.service
+from mitogen.core import b
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+try:
+ reduce
+except NameError:
+ # Python 3.x.
+ from functools import reduce
+
+try:
+ BaseException
+except NameError:
+ # Python 2.4
+ BaseException = Exception
+
# Ansible since PR #41749 inserts "import __main__" into
# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so
@@ -71,16 +88,23 @@ import ansible_mitogen.runner
LOG = logging.getLogger(__name__)
MAKE_TEMP_FAILED_MSG = (
- "Unable to find a useable temporary directory. This likely means no\n"
- "system-supplied TMP directory can be written to, or all directories\n"
- "were mounted on 'noexec' filesystems.\n"
- "\n"
- "The following paths were tried:\n"
- " %(namelist)s\n"
- "\n"
- "Please check '-vvv' output for a log of individual path errors."
+ u"Unable to find a useable temporary directory. This likely means no\n"
+ u"system-supplied TMP directory can be written to, or all directories\n"
+ u"were mounted on 'noexec' filesystems.\n"
+ u"\n"
+ u"The following paths were tried:\n"
+ u" %(namelist)s\n"
+ u"\n"
+ u"Please check '-vvv' output for a log of individual path errors."
)
+# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up
+# interpreter state. So 2.4/2.5 interpreters start .local() contexts for
+# isolation instead. Since we don't have any crazy memory sharing problems to
+# avoid, there is no virginal fork parent either. The child is started directly
+# from the login/become process. In future this will be default everywhere,
+# fork is brainwrong from the stone age.
+FORK_SUPPORTED = sys.version_info >= (2, 6)
#: Initialized to an econtext.parent.Context pointing at a pristine fork of
#: the target Python interpreter before it executes any code or imports.
@@ -91,20 +115,46 @@ _fork_parent = None
good_temp_dir = None
-# issue #362: subprocess.Popen(close_fds=True) aka. AnsibleModule.run_command()
-# loops the entire SC_OPEN_MAX space. CentOS>5 ships with 1,048,576 FDs by
-# default, resulting in huge (>500ms) runtime waste running many commands.
-# Therefore if we are a child, cap the range to something reasonable.
-rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
-if (rlimit[0] > 512 or rlimit[1] > 512) and not mitogen.is_master:
- resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
- subprocess.MAXFD = 512 # Python <3.x
-del rlimit
+def subprocess__Popen__close_fds(self, but):
+ """
+ issue #362, #435: subprocess.Popen(close_fds=True) aka.
+ AnsibleModule.run_command() loops the entire FD space on Python<3.2.
+ CentOS>5 ships with 1,048,576 FDs by default, resulting in huge (>500ms)
+ latency starting children. Therefore replace Popen._close_fds on Linux with
+ a version that is O(fds) rather than O(_SC_OPEN_MAX).
+ """
+ try:
+ names = os.listdir(u'/proc/self/fd')
+ except OSError:
+ # May fail if acting on a container that does not have /proc mounted.
+ self._original_close_fds(but)
+ return
+
+ for name in names:
+ if not name.isdigit():
+ continue
+
+ fd = int(name, 10)
+ if fd > 2 and fd != but:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+
+if (
+ sys.platform.startswith(u'linux') and
+ sys.version < u'3.0' and
+ hasattr(subprocess.Popen, u'_close_fds') and
+ not mitogen.is_master
+):
+ subprocess.Popen._original_close_fds = subprocess.Popen._close_fds
+ subprocess.Popen._close_fds = subprocess__Popen__close_fds
def get_small_file(context, path):
"""
- Basic in-memory caching module fetcher. This generates an one roundtrip for
+ Basic in-memory caching module fetcher. This generates one roundtrip for
every previously unseen file, so it is only a temporary solution.
:param context:
@@ -117,7 +167,7 @@ def get_small_file(context, path):
Bytestring file data.
"""
pool = mitogen.service.get_or_create_pool(router=context.router)
- service = pool.get_service('mitogen.service.PushFileService')
+ service = pool.get_service(u'mitogen.service.PushFileService')
return service.get(path)
@@ -127,8 +177,8 @@ def transfer_file(context, in_path, out_path, sync=False, set_owner=False):
controller.
:param mitogen.core.Context context:
- Reference to the context hosting the FileService that will be used to
- fetch the file.
+ Reference to the context hosting the FileService that will transmit the
+ file.
:param bytes in_path:
FileService registered name of the input file.
:param bytes out_path:
@@ -159,9 +209,10 @@ def transfer_file(context, in_path, out_path, sync=False, set_owner=False):
if not ok:
raise IOError('transfer of %r was interrupted.' % (in_path,))
- os.fchmod(fp.fileno(), metadata['mode'])
+ set_file_mode(tmp_path, metadata['mode'], fd=fp.fileno())
if set_owner:
- set_fd_owner(fp.fileno(), metadata['owner'], metadata['group'])
+ set_file_owner(tmp_path, metadata['owner'], metadata['group'],
+ fd=fp.fileno())
finally:
fp.close()
@@ -184,7 +235,8 @@ def prune_tree(path):
try:
os.unlink(path)
return
- except OSError as e:
+ except OSError:
+ e = sys.exc_info()[1]
if not (os.path.isdir(path) and
e.args[0] in (errno.EPERM, errno.EISDIR)):
LOG.error('prune_tree(%r): %s', path, e)
@@ -194,7 +246,8 @@ def prune_tree(path):
# Ensure write access for readonly directories. Ignore error in case
# path is on a weird filesystem (e.g. vfat).
os.chmod(path, int('0700', 8))
- except OSError as e:
+ except OSError:
+ e = sys.exc_info()[1]
LOG.warning('prune_tree(%r): %s', path, e)
try:
@@ -202,7 +255,8 @@ def prune_tree(path):
if name not in ('.', '..'):
prune_tree(os.path.join(path, name))
os.rmdir(path)
- except OSError as e:
+ except OSError:
+ e = sys.exc_info()[1]
LOG.error('prune_tree(%r): %s', path, e)
@@ -223,7 +277,8 @@ def is_good_temp_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path, mode=int('0700', 8))
- except OSError as e:
+ except OSError:
+ e = sys.exc_info()[1]
LOG.debug('temp dir %r unusable: did not exist and attempting '
'to create it failed: %s', path, e)
return False
@@ -233,24 +288,26 @@ def is_good_temp_dir(path):
prefix='ansible_mitogen_is_good_temp_dir',
dir=path,
)
- except (OSError, IOError) as e:
+ except (OSError, IOError):
+ e = sys.exc_info()[1]
LOG.debug('temp dir %r unusable: %s', path, e)
return False
try:
try:
os.chmod(tmp.name, int('0700', 8))
- except OSError as e:
- LOG.debug('temp dir %r unusable: %s: chmod failed: %s',
- path, e)
+ except OSError:
+ e = sys.exc_info()[1]
+ LOG.debug('temp dir %r unusable: chmod failed: %s', path, e)
return False
try:
# access(.., X_OK) is sufficient to detect noexec.
if not os.access(tmp.name, os.X_OK):
raise OSError('filesystem appears to be mounted noexec')
- except OSError as e:
- LOG.debug('temp dir %r unusable: %s: %s', path, e)
+ except OSError:
+ e = sys.exc_info()[1]
+ LOG.debug('temp dir %r unusable: %s', path, e)
return False
finally:
tmp.close()
@@ -305,8 +362,9 @@ def init_child(econtext, log_level, candidate_temp_dirs):
Dict like::
{
- 'fork_context': mitogen.core.Context.
- 'home_dir': str.
+ 'fork_context': mitogen.core.Context or None,
+ 'good_temp_dir': ...
+ 'home_dir': str
}
Where `fork_context` refers to the newly forked 'fork parent' context
@@ -320,28 +378,36 @@ def init_child(econtext, log_level, candidate_temp_dirs):
logging.getLogger('ansible_mitogen').setLevel(log_level)
global _fork_parent
- mitogen.parent.upgrade_router(econtext)
- _fork_parent = econtext.router.fork()
+ if FORK_SUPPORTED:
+ mitogen.parent.upgrade_router(econtext)
+ _fork_parent = econtext.router.fork()
global good_temp_dir
good_temp_dir = find_good_temp_dir(candidate_temp_dirs)
return {
- 'fork_context': _fork_parent,
- 'home_dir': mitogen.core.to_text(os.path.expanduser('~')),
- 'good_temp_dir': good_temp_dir,
+ u'fork_context': _fork_parent,
+ u'home_dir': mitogen.core.to_text(os.path.expanduser('~')),
+ u'good_temp_dir': good_temp_dir,
}
@mitogen.core.takes_econtext
-def create_fork_child(econtext):
+def spawn_isolated_child(econtext):
"""
For helper functions executed in the fork parent context, arrange for
the context's router to be upgraded as necessary and for a new child to be
prepared.
+
+ The actual fork occurs from the 'virginal fork parent', which does not have
+ any Ansible modules loaded prior to fork, to avoid conflicts resulting from
+ custom module_utils paths.
"""
mitogen.parent.upgrade_router(econtext)
- context = econtext.router.fork()
+ if FORK_SUPPORTED:
+ context = econtext.router.fork()
+ else:
+ context = econtext.router.local()
LOG.debug('create_fork_child() -> %r', context)
return context
@@ -355,7 +421,7 @@ def run_module(kwargs):
"""
runner_name = kwargs.pop('runner_name')
klass = getattr(ansible_mitogen.runner, runner_name)
- impl = klass(**kwargs)
+ impl = klass(**mitogen.core.Kwargs(kwargs))
return impl.run()
@@ -366,9 +432,10 @@ def _get_async_dir():
class AsyncRunner(object):
- def __init__(self, job_id, timeout_secs, econtext, kwargs):
+ def __init__(self, job_id, timeout_secs, started_sender, econtext, kwargs):
self.job_id = job_id
self.timeout_secs = timeout_secs
+ self.started_sender = started_sender
self.econtext = econtext
self.kwargs = kwargs
self._timed_out = False
@@ -388,8 +455,11 @@ class AsyncRunner(object):
dct.setdefault('ansible_job_id', self.job_id)
dct.setdefault('data', '')
- with open(self.path + '.tmp', 'w') as fp:
+ fp = open(self.path + '.tmp', 'w')
+ try:
fp.write(json.dumps(dct))
+ finally:
+ fp.close()
os.rename(self.path + '.tmp', self.path)
def _on_sigalrm(self, signum, frame):
@@ -448,6 +518,7 @@ class AsyncRunner(object):
'finished': 0,
'pid': os.getpid()
})
+ self.started_sender.send(True)
if self.timeout_secs > 0:
self._install_alarm()
@@ -483,13 +554,26 @@ class AsyncRunner(object):
@mitogen.core.takes_econtext
-def run_module_async(kwargs, job_id, timeout_secs, econtext):
+def run_module_async(kwargs, job_id, timeout_secs, started_sender, econtext):
"""
Execute a module with its run status and result written to a file,
terminating on the process on completion. This function must run in a child
forked using :func:`create_fork_child`.
- """
- arunner = AsyncRunner(job_id, timeout_secs, econtext, kwargs)
+
+ @param mitogen.core.Sender started_sender:
+ A sender that will receive :data:`True` once the job has reached a
+ point where its initial job file has been written. This is required to
+ avoid a race where an overly eager controller can check for a task
+ before it has reached that point in execution, which is possible at
+ least on Python 2.4, where forking is not available for async tasks.
+ """
+ arunner = AsyncRunner(
+ job_id,
+ timeout_secs,
+ started_sender,
+ econtext,
+ kwargs
+ )
arunner.run()
@@ -541,8 +625,8 @@ def exec_args(args, in_data='', chdir=None, shell=None, emulate_tty=False):
stdout, stderr = proc.communicate(in_data)
if emulate_tty:
- stdout = stdout.replace(b'\n', b'\r\n')
- return proc.returncode, stdout, stderr or ''
+ stdout = stdout.replace(b('\n'), b('\r\n'))
+ return proc.returncode, stdout, stderr or b('')
def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False):
@@ -574,7 +658,7 @@ def read_path(path):
return open(path, 'rb').read()
-def set_fd_owner(fd, owner, group=None):
+def set_file_owner(path, owner, group=None, fd=None):
if owner:
uid = pwd.getpwnam(owner).pw_uid
else:
@@ -585,7 +669,11 @@ def set_fd_owner(fd, owner, group=None):
else:
gid = os.getegid()
- os.fchown(fd, (uid, gid))
+ if fd is not None and hasattr(os, 'fchown'):
+ os.fchown(fd, (uid, gid))
+ else:
+ # Python<2.6
+ os.chown(path, (uid, gid))
def write_path(path, s, owner=None, group=None, mode=None,
@@ -603,9 +691,9 @@ def write_path(path, s, owner=None, group=None, mode=None,
try:
try:
if mode:
- os.fchmod(fp.fileno(), mode)
+ set_file_mode(tmp_path, mode, fd=fp.fileno())
if owner or group:
- set_fd_owner(fp.fileno(), owner, group)
+ set_file_owner(tmp_path, owner, group, fd=fp.fileno())
fp.write(s)
finally:
fp.close()
@@ -645,14 +733,14 @@ def apply_mode_spec(spec, mode):
Given a symbolic file mode change specification in the style of chmod(1)
`spec`, apply changes in the specification to the numeric file mode `mode`.
"""
- for clause in spec.split(','):
+ for clause in mitogen.core.to_text(spec).split(','):
match = CHMOD_CLAUSE_PAT.match(clause)
who, op, perms = match.groups()
for ch in who or 'a':
mask = CHMOD_MASKS[ch]
bits = CHMOD_BITS[ch]
cur_perm_bits = mode & mask
- new_perm_bits = functools.reduce(operator.or_, (bits[p] for p in perms), 0)
+ new_perm_bits = reduce(operator.or_, (bits[p] for p in perms), 0)
mode &= ~mask
if op == '=':
mode |= new_perm_bits
@@ -663,15 +751,30 @@ def apply_mode_spec(spec, mode):
return mode
-def set_file_mode(path, spec):
+def set_file_mode(path, spec, fd=None):
"""
Update the permissions of a file using the same syntax as chmod(1).
"""
- mode = os.stat(path).st_mode
-
- if spec.isdigit():
+ if isinstance(spec, int):
+ new_mode = spec
+ elif not mitogen.core.PY3 and isinstance(spec, long):
+ new_mode = spec
+ elif spec.isdigit():
new_mode = int(spec, 8)
else:
+ mode = os.stat(path).st_mode
new_mode = apply_mode_spec(spec, mode)
- os.chmod(path, new_mode)
+ if fd is not None and hasattr(os, 'fchmod'):
+ os.fchmod(fd, new_mode)
+ else:
+ os.chmod(path, new_mode)
+
+
+def file_exists(path):
+ """
+ Return :data:`True` if `path` exists. This is a wrapper function over
+ :func:`os.path.exists`, since its implementation module varies across
+ Python versions.
+ """
+ return os.path.exists(path)
diff --git a/ansible_mitogen/transport_config.py b/ansible_mitogen/transport_config.py
new file mode 100644
index 00000000..290c12d5
--- /dev/null
+++ b/ansible_mitogen/transport_config.py
@@ -0,0 +1,593 @@
+# Copyright 2017, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+"""
+Mitogen extends Ansible's target configuration mechanism in several ways that
+require some care:
+
+* Per-task configurables in Ansible like ansible_python_interpreter are
+ connection-layer configurables in Mitogen. They must be extracted during each
+ task execution to form the complete connection-layer configuration.
+
+* Mitogen has extra configurables not supported by Ansible at all, such as
+ mitogen_ssh_debug_level. These are extracted the same way as
+ ansible_python_interpreter.
+
+* Mitogen allows connections to be delegated to other machines. Ansible has no
+ internal framework for this, and so Mitogen must figure out a delegated
+ connection configuration all on its own. It cannot reuse much of the Ansible
+ machinery for building a connection configuration, as that machinery is
+ deeply spread out and hard-wired to expect Ansible's usual mode of operation.
+
+For normal and delegate_to connections, Ansible's PlayContext is reused where
+possible to maximize compatibility, but for proxy hops, configurations are
+built up using the HostVars magic class to call VariableManager.get_vars()
+behind the scenes on our behalf. Where Ansible has multiple sources of a
+configuration item, for example, ansible_ssh_extra_args, Mitogen must (ideally
+perfectly) reproduce how Ansible arrives at its value, without using mechanisms
+that are hard-wired or change across Ansible versions.
+
+That is what this file is for. It exports two spec classes, one that takes all
+information from PlayContext, and another that takes (almost) all information
+from HostVars.
+"""
+
+import abc
+import os
+import ansible.utils.shlex
+import ansible.constants as C
+
+from ansible.module_utils.six import with_metaclass
+
+
+import mitogen.core
+
+
+def parse_python_path(s):
+ """
+ Given the string set for ansible_python_interpeter, parse it using shell
+ syntax and return an appropriate argument vector.
+ """
+ if s:
+ return ansible.utils.shlex.shlex_split(s)
+
+
+def optional_secret(value):
+ """
+ Wrap `value` in :class:`mitogen.core.Secret` if it is not :data:`None`,
+ otherwise return :data:`None`.
+ """
+ if value is not None:
+ return mitogen.core.Secret(value)
+
+
+def first_true(it, default=None):
+ """
+ Return the first truthy element from `it`.
+ """
+ for elem in it:
+ if elem:
+ return elem
+ return default
+
+
+class Spec(with_metaclass(abc.ABCMeta, object)):
+ """
+ A source for variables that comprise a connection configuration.
+ """
+
+ @abc.abstractmethod
+ def transport(self):
+ """
+ The name of the Ansible plug-in implementing the connection.
+ """
+
+ @abc.abstractmethod
+ def inventory_name(self):
+ """
+ The name of the target being connected to as it appears in Ansible's
+ inventory.
+ """
+
+ @abc.abstractmethod
+ def remote_addr(self):
+ """
+ The network address of the target, or for container and other special
+ targets, some other unique identifier.
+ """
+
+ @abc.abstractmethod
+ def remote_user(self):
+ """
+ The username of the login account on the target.
+ """
+
+ @abc.abstractmethod
+ def password(self):
+ """
+ The password of the login account on the target.
+ """
+
+ @abc.abstractmethod
+ def become(self):
+ """
+ :data:`True` if privilege escalation should be active.
+ """
+
+ @abc.abstractmethod
+ def become_method(self):
+ """
+ The name of the Ansible become method to use.
+ """
+
+ @abc.abstractmethod
+ def become_user(self):
+ """
+ The username of the target account for become.
+ """
+
+ @abc.abstractmethod
+ def become_pass(self):
+ """
+ The password of the target account for become.
+ """
+
+ @abc.abstractmethod
+ def port(self):
+ """
+ The port of the login service on the target machine.
+ """
+
+ @abc.abstractmethod
+ def python_path(self):
+ """
+ Path to the Python interpreter on the target machine.
+ """
+
+ @abc.abstractmethod
+ def private_key_file(self):
+ """
+ Path to the SSH private key file to use to login.
+ """
+
+ @abc.abstractmethod
+ def ssh_executable(self):
+ """
+ Path to the SSH executable.
+ """
+
+ @abc.abstractmethod
+ def timeout(self):
+ """
+ The generic timeout for all connections.
+ """
+
+ @abc.abstractmethod
+ def ansible_ssh_timeout(self):
+ """
+ The SSH-specific timeout for a connection.
+ """
+
+ @abc.abstractmethod
+ def ssh_args(self):
+ """
+ The list of additional arguments that should be included in an SSH
+ invocation.
+ """
+
+ @abc.abstractmethod
+ def become_exe(self):
+ """
+ The path to the executable implementing the become method on the remote
+ machine.
+ """
+
+ @abc.abstractmethod
+ def sudo_args(self):
+ """
+ The list of additional arguments that should be included in a become
+ invocation.
+ """
+ # TODO: split out into sudo_args/become_args.
+
+ @abc.abstractmethod
+ def mitogen_via(self):
+ """
+ The value of the mitogen_via= variable for this connection. Indicates
+ the connection should be established via an intermediary.
+ """
+
+ @abc.abstractmethod
+ def mitogen_kind(self):
+ """
+ The type of container to use with the "setns" transport.
+ """
+
+ @abc.abstractmethod
+ def mitogen_docker_path(self):
+ """
+ The path to the "docker" program for the 'docker' transport.
+ """
+
+ @abc.abstractmethod
+ def mitogen_kubectl_path(self):
+ """
+ The path to the "kubectl" program for the 'docker' transport.
+ """
+
+ @abc.abstractmethod
+ def mitogen_lxc_path(self):
+ """
+ The path to the "lxc" program for the 'lxd' transport.
+ """
+
+ @abc.abstractmethod
+ def mitogen_lxc_attach_path(self):
+ """
+ The path to the "lxc-attach" program for the 'lxc' transport.
+ """
+
+ @abc.abstractmethod
+ def mitogen_lxc_info_path(self):
+ """
+ The path to the "lxc-info" program for the 'lxc' transport.
+ """
+
+ @abc.abstractmethod
+ def mitogen_machinectl_path(self):
+ """
+ The path to the "machinectl" program for the 'setns' transport.
+ """
+
+ @abc.abstractmethod
+ def mitogen_ssh_debug_level(self):
+ """
+ The SSH debug level.
+ """
+
+ @abc.abstractmethod
+ def mitogen_ssh_compression(self):
+ """
+ Whether SSH compression is enabled.
+ """
+
+ @abc.abstractmethod
+ def extra_args(self):
+ """
+ Connection-specific arguments.
+ """
+
+
+class PlayContextSpec(Spec):
+ """
+ PlayContextSpec takes almost all its information as-is from Ansible's
+ PlayContext. It is used for normal connections and delegate_to connections,
+ and should always be accurate.
+ """
+ def __init__(self, connection, play_context, transport, inventory_name):
+ self._connection = connection
+ self._play_context = play_context
+ self._transport = transport
+ self._inventory_name = inventory_name
+
+ def transport(self):
+ return self._transport
+
+ def inventory_name(self):
+ return self._inventory_name
+
+ def remote_addr(self):
+ return self._play_context.remote_addr
+
+ def remote_user(self):
+ return self._play_context.remote_user
+
+ def become(self):
+ return self._play_context.become
+
+ def become_method(self):
+ return self._play_context.become_method
+
+ def become_user(self):
+ return self._play_context.become_user
+
+ def become_pass(self):
+ return optional_secret(self._play_context.become_pass)
+
+ def password(self):
+ return optional_secret(self._play_context.password)
+
+ def port(self):
+ return self._play_context.port
+
+ def python_path(self):
+ return parse_python_path(
+ self._connection.get_task_var('ansible_python_interpreter')
+ )
+
+ def private_key_file(self):
+ return self._play_context.private_key_file
+
+ def ssh_executable(self):
+ return self._play_context.ssh_executable
+
+ def timeout(self):
+ return self._play_context.timeout
+
+ def ansible_ssh_timeout(self):
+ return (
+ self._connection.get_task_var('ansible_timeout') or
+ self._connection.get_task_var('ansible_ssh_timeout') or
+ self.timeout()
+ )
+
+ def ssh_args(self):
+ return [
+ mitogen.core.to_text(term)
+ for s in (
+ getattr(self._play_context, 'ssh_args', ''),
+ getattr(self._play_context, 'ssh_common_args', ''),
+ getattr(self._play_context, 'ssh_extra_args', '')
+ )
+ for term in ansible.utils.shlex.shlex_split(s or '')
+ ]
+
+ def become_exe(self):
+ return self._play_context.become_exe
+
+ def sudo_args(self):
+ return [
+ mitogen.core.to_text(term)
+ for term in ansible.utils.shlex.shlex_split(
+ first_true((
+ self._play_context.become_flags,
+ self._play_context.sudo_flags,
+ # Ansible 2.3.
+ getattr(C, 'DEFAULT_BECOME_FLAGS', ''),
+ getattr(C, 'DEFAULT_SUDO_FLAGS', '')
+ ), default='')
+ )
+ ]
+
+ def mitogen_via(self):
+ return self._connection.get_task_var('mitogen_via')
+
+ def mitogen_kind(self):
+ return self._connection.get_task_var('mitogen_kind')
+
+ def mitogen_docker_path(self):
+ return self._connection.get_task_var('mitogen_docker_path')
+
+ def mitogen_kubectl_path(self):
+ return self._connection.get_task_var('mitogen_kubectl_path')
+
+ def mitogen_lxc_path(self):
+ return self._connection.get_task_var('mitogen_lxc_path')
+
+ def mitogen_lxc_attach_path(self):
+ return self._connection.get_task_var('mitogen_lxc_attach_path')
+
+ def mitogen_lxc_info_path(self):
+ return self._connection.get_task_var('mitogen_lxc_info_path')
+
+ def mitogen_machinectl_path(self):
+ return self._connection.get_task_var('mitogen_machinectl_path')
+
+ def mitogen_ssh_debug_level(self):
+ return self._connection.get_task_var('mitogen_ssh_debug_level')
+
+ def mitogen_ssh_compression(self):
+ return self._connection.get_task_var('mitogen_ssh_compression')
+
+ def extra_args(self):
+ return self._connection.get_extra_args()
+
+
+class MitogenViaSpec(Spec):
+ """
+ MitogenViaSpec takes most of its information from the HostVars of the
+ running task. HostVars is a lightweight wrapper around VariableManager, so
+ it is better to say that VariableManager.get_vars() is the ultimate source
+ of MitogenViaSpec's information.
+
+ Due to this, mitogen_via= hosts must have all their configuration
+ information represented as host and group variables. We cannot use any
+ per-task configuration, as all that data belongs to the real target host.
+
+ Ansible uses all kinds of strange historical logic for calculating
+ variables, including making their precedence configurable. MitogenViaSpec
+ must ultimately reimplement all of that logic. It is likely that if you are
+ having a configruation problem with connection delegation, the answer to
+ your problem lies in the method implementations below!
+ """
+ def __init__(self, inventory_name, host_vars,
+ become_method, become_user):
+ self._inventory_name = inventory_name
+ self._host_vars = host_vars
+ self._become_method = become_method
+ self._become_user = become_user
+
+ def transport(self):
+ return (
+ self._host_vars.get('ansible_connection') or
+ C.DEFAULT_TRANSPORT
+ )
+
+ def inventory_name(self):
+ return self._inventory_name
+
+ def remote_addr(self):
+ return (
+ self._host_vars.get('ansible_host') or
+ self._inventory_name
+ )
+
+ def remote_user(self):
+ return (
+ self._host_vars.get('ansible_user') or
+ self._host_vars.get('ansible_ssh_user') or
+ C.DEFAULT_REMOTE_USER
+ )
+
+ def become(self):
+ return bool(self._become_user)
+
+ def become_method(self):
+ return self._become_method or C.DEFAULT_BECOME_METHOD
+
+ def become_user(self):
+ return self._become_user
+
+ def become_pass(self):
+ return optional_secret(
+ # TODO: Might have to come from PlayContext.
+ self._host_vars.get('ansible_become_password') or
+ self._host_vars.get('ansible_become_pass')
+ )
+
+ def password(self):
+ return optional_secret(
+ # TODO: Might have to come from PlayContext.
+ self._host_vars.get('ansible_ssh_pass') or
+ self._host_vars.get('ansible_password')
+ )
+
+ def port(self):
+ return (
+ self._host_vars.get('ansible_port') or
+ C.DEFAULT_REMOTE_PORT
+ )
+
+ def python_path(self):
+ return parse_python_path(
+ self._host_vars.get('ansible_python_interpreter')
+ # This variable has no default for remote hosts. For local hosts it
+ # is sys.executable.
+ )
+
+ def private_key_file(self):
+ # TODO: must come from PlayContext too.
+ return (
+ self._host_vars.get('ansible_ssh_private_key_file') or
+ self._host_vars.get('ansible_private_key_file') or
+ C.DEFAULT_PRIVATE_KEY_FILE
+ )
+
+ def ssh_executable(self):
+ return (
+ self._host_vars.get('ansible_ssh_executable') or
+ C.ANSIBLE_SSH_EXECUTABLE
+ )
+
+ def timeout(self):
+ # TODO: must come from PlayContext too.
+ return C.DEFAULT_TIMEOUT
+
+ def ansible_ssh_timeout(self):
+ return (
+ self._host_vars.get('ansible_timeout') or
+ self._host_vars.get('ansible_ssh_timeout') or
+ self.timeout()
+ )
+
+ def ssh_args(self):
+ return [
+ mitogen.core.to_text(term)
+ for s in (
+ (
+ self._host_vars.get('ansible_ssh_args') or
+ getattr(C, 'ANSIBLE_SSH_ARGS', None) or
+ os.environ.get('ANSIBLE_SSH_ARGS')
+ # TODO: ini entry. older versions.
+ ),
+ (
+ self._host_vars.get('ansible_ssh_common_args') or
+ os.environ.get('ANSIBLE_SSH_COMMON_ARGS')
+ # TODO: ini entry.
+ ),
+ (
+ self._host_vars.get('ansible_ssh_extra_args') or
+ os.environ.get('ANSIBLE_SSH_EXTRA_ARGS')
+ # TODO: ini entry.
+ ),
+ )
+ for term in ansible.utils.shlex.shlex_split(s)
+ if s
+ ]
+
+ def become_exe(self):
+ return (
+ self._host_vars.get('ansible_become_exe') or
+ C.DEFAULT_BECOME_EXE
+ )
+
+ def sudo_args(self):
+ return [
+ mitogen.core.to_text(term)
+ for s in (
+ self._host_vars.get('ansible_sudo_flags') or '',
+ self._host_vars.get('ansible_become_flags') or '',
+ )
+ for term in ansible.utils.shlex.shlex_split(s)
+ ]
+
+ def mitogen_via(self):
+ return self._host_vars.get('mitogen_via')
+
+ def mitogen_kind(self):
+ return self._host_vars.get('mitogen_kind')
+
+ def mitogen_docker_path(self):
+ return self._host_vars.get('mitogen_docker_path')
+
+ def mitogen_kubectl_path(self):
+ return self._host_vars.get('mitogen_kubectl_path')
+
+ def mitogen_lxc_path(self):
+ return self.host_vars.get('mitogen_lxc_path')
+
+ def mitogen_lxc_attach_path(self):
+ return self._host_vars.get('mitogen_lxc_attach_path')
+
+ def mitogen_lxc_info_path(self):
+ return self._host_vars.get('mitogen_lxc_info_path')
+
+ def mitogen_machinectl_path(self):
+ return self._host_vars.get('mitogen_machinectl_path')
+
+ def mitogen_ssh_debug_level(self):
+ return self._host_vars.get('mitogen_ssh_debug_level')
+
+ def mitogen_ssh_compression(self):
+ return self._host_vars.get('mitogen_ssh_compression')
+
+ def extra_args(self):
+ return [] # TODO
diff --git a/dev_requirements.txt b/dev_requirements.txt
index 68f0422a..2dc0d171 100644
--- a/dev_requirements.txt
+++ b/dev_requirements.txt
@@ -1,17 +1,11 @@
--r docs/docs-requirements.txt
-ansible==2.6.1
-coverage==4.5.1
-Django==1.6.11 # Last version supporting 2.6.
-mock==2.0.0
-pytz==2018.5
-paramiko==2.3.2 # Last 2.6-compat version.
-pytest-catchlog==1.2.2
-pytest==3.1.2
-PyYAML==3.11; python_version < '2.7'
-PyYAML==3.12; python_version >= '2.7'
-timeoutcontext==1.2.0
-unittest2==1.1.0
-# Fix InsecurePlatformWarning while creating py26 tox environment
-# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
-urllib3[secure]; python_version < '2.7.9'
-google-api-python-client==1.6.5
+# This file is no longer used by CI jobs, it's mostly for interactive use.
+# Instead CI jobs grab the relevant sub-requirement.
+
+# mitogen_tests
+-r tests/requirements.txt
+
+# ansible_tests
+-r tests/ansible/requirements.txt
+
+# readthedocs
+-r docs/requirements.txt
diff --git a/docs/Makefile b/docs/Makefile
index bc394d34..18703fd7 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -2,7 +2,7 @@
#
default:
- sphinx-autobuild -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ sphinx-build . build/html/
# You can set these variables from the command line.
SPHINXOPTS =
diff --git a/docs/_static/style.css b/docs/_static/style.css
index 2ca15e1d..ec25901f 100644
--- a/docs/_static/style.css
+++ b/docs/_static/style.css
@@ -12,6 +12,17 @@ div.body li {
}
+/*
+ * Undo the hyphens: auto in Sphinx basic.css.
+ */
+div.body p, div.body dd, div.body li, div.body blockquote {
+ -moz-hyphens: inherit;
+ -ms-hyphens: inherit;
+ -webkit-hyphens: inherit;
+ hyphens: inherit;
+}
+
+
/*
* Setting :width; on an image causes Sphinx to turn the image into a link, so
@@ -27,6 +38,12 @@ div.body li {
width: 150px;
}
+.mitogen-right-200 {
+ float: right;
+ padding-left: 8px;
+ width: 200px;
+}
+
.mitogen-right-225 {
float: right;
padding-left: 8px;
@@ -50,3 +67,10 @@ div.body li {
padding-left: 8px;
width: 350px;
}
+
+.mitogen-logo-wrap {
+ shape-margin: 8px;
+ shape-outside: polygon(
+ 100% 0, 50% 10%, 24% 24%, 0% 50%, 24% 75%, 50% 90%, 100% 100%
+ );
+}
diff --git a/docs/ansible.rst b/docs/ansible.rst
index af17c19b..17354755 100644
--- a/docs/ansible.rst
+++ b/docs/ansible.rst
@@ -1,19 +1,17 @@
-.. image:: images/ansible/ansible_mitogen.svg
- :class: mitogen-right-225
-
-
Mitogen for Ansible
===================
+.. image:: images/ansible/ansible_mitogen.svg
+ :class: mitogen-right-200 mitogen-logo-wrap
+
An extension to `Ansible`_ is included that implements connections over
Mitogen, replacing embedded shell invocations with pure-Python equivalents
invoked via highly efficient remote procedure calls to persistent interpreters
tunnelled over SSH. No changes are required to target hosts.
-The extension is approaching stability and real-world usage is encouraged. `Bug
-reports`_ are welcome: Ansible is huge, and only wide testing will ensure
-soundness.
+The extension is stable and real-world use is encouraged. `Bug reports`_ are
+welcome: Ansible is huge, and only wide testing will ensure soundness.
.. _Ansible: https://www.ansible.com/
@@ -58,7 +56,7 @@ write files.
Installation
------------
-1. Thoroughly review :ref:`noteworthy_differences` and :ref:`changelog`.
+1. Thoroughly review :ref:`noteworthy_differences` and :ref:`known_issues`.
2. Download and extract |mitogen_url|.
3. Modify ``ansible.cfg``:
@@ -70,8 +68,9 @@ Installation
The ``strategy`` key is optional. If omitted, the
``ANSIBLE_STRATEGY=mitogen_linear`` environment variable can be set on a
- per-run basis. Like ``mitogen_linear``, the ``mitogen_free`` strategy exists
- to mimic the ``free`` strategy.
+ per-run basis. Like ``mitogen_linear``, the ``mitogen_free`` and
+ ``mitogen_host_pinned`` strategies exists to mimic the ``free`` and
+ ``host_pinned`` strategies.
4. If targets have a restrictive ``sudoers`` file, add a rule like:
@@ -179,7 +178,7 @@ Noteworthy Differences
practice, and light web searches failed to reveal many examples of them.
* Ansible permits up to ``forks`` connections to be setup in parallel, whereas
- in Mitogen this is handled by a fixed-size thread pool. Up to 16 connections
+ in Mitogen this is handled by a fixed-size thread pool. Up to 32 connections
may be established in parallel by default, this can be modified by setting
the ``MITOGEN_POOL_SIZE`` environment variable.
@@ -430,7 +429,7 @@ Temporary Files
Temporary file handling in Ansible is tricky, and the precise behaviour varies
across major versions. A variety of temporary files and directories are
-created, depending on the operating mode:
+created, depending on the operating mode.
In the best case when pipelining is enabled and no temporary uploads are
required, for each task Ansible will create one directory below a
@@ -769,10 +768,10 @@ Connect to classic LXC containers, like `lxc
connection delegation is supported, and ``lxc-attach`` is always used rather
than the LXC Python bindings, as is usual with ``lxc``.
-The ``lxc-attach`` command must be available on the host machine.
-
* ``ansible_python_interpreter``
* ``ansible_host``: Name of LXC container (default: inventory hostname).
+* ``mitogen_lxc_attach_path``: path to ``lxc-attach`` command if not available
+ on the system path.
.. _method-lxd:
@@ -787,6 +786,8 @@ the host machine.
* ``ansible_python_interpreter``
* ``ansible_host``: Name of LXC container (default: inventory hostname).
+* ``mitogen_lxc_path``: path to ``lxc`` command if not available on the system
+ path.
.. _machinectl:
@@ -899,6 +900,10 @@ except connection delegation is supported.
* ``ssh_args``, ``ssh_common_args``, ``ssh_extra_args``
* ``mitogen_ssh_debug_level``: integer between `0..3` indicating the SSH client
debug level. Ansible must also be run with '-vvv' to view the output.
+* ``mitogen_ssh_compression``: :data:`True` to enable SSH compression,
+ otherwise :data:`False`. This will change to off by default in a future
+ release. If you are targetting many hosts on a fast network, please consider
+ disabling SSH compression.
Debugging
@@ -919,6 +924,194 @@ logging is necessary. File-based logging can be enabled by setting
enabled, one file per context will be created on the local machine and every
target machine, as ``/tmp/mitogen..log``.
+
+Common Problems
+~~~~~~~~~~~~~~~
+
+The most common bug reports fall into the following categories, so it is worth
+checking whether you can categorize a problem using the tools provided before
+reporting it:
+
+**Missed/Incorrect Configuration Variables**
+ In some cases Ansible may support a configuration variable that Mitogen
+ does not yet support, or Mitogen supports, but the support is broken. For
+ example, Mitogen may pick the wrong username or SSH parameters.
+
+ To detect this, use the special ``mitogen_get_stack`` action described
+ below to verify the settings Mitogen has chosen for the connection make
+ sense.
+
+**Process Environment Differences**
+ Mitogen's process model differs significantly to Ansible's in many places.
+ In the past, bugs have been reported because Ansible plug-ins modify an
+ environment variable after Mitogen processes are started.
+
+ If your task's failure may relate to the process environment in some way,
+ for example, ``SSH_AUTH_SOCK``, ``LC_ALL`` or ``PATH``, then an environment
+ difference may explain it. Environment differences are always considered
+ bugs in the extension, and are very easy to repair, so even if you find a
+ workaround, please report them to avoid someone else encountering the same
+ problem.
+
+**Variable Expansion Differences**
+ To avoid many classes of bugs, Mitogen avoids shell wherever possible.
+ Ansible however is traditionally built on shell, and it is often difficult
+ to tell just how many times a configuration parameter will pass through
+ shell expansion and quoting, and in what context before it is used.
+
+ Due to this, in some circumstances Mitogen may parse some expanded
+ variables differently, for example, in the wrong user account. Careful
+ review of ``-vvv`` and ``mitogen_ssh_debug_level`` logs can reveal this.
+ For example in the past, Mitogen used a different method of expanding
+ ``~/.ssh/id_rsa``, causing authentication to fail when ``ansible-playbook``
+ was run via ``sudo -E``.
+
+**External Tool Integration Differences**
+ Mitogen reimplements any aspect of Ansible that involves integrating with
+ SSH, sudo, Docker, or related tools. For this reason, sometimes its support
+ for those tools differs or is less mature than in Ansible.
+
+ In the past Mitogen has had bug reports due to failing to recognize a
+ particular variation of a login or password prompt on an exotic or
+ non-English operating system, or confusing a login banner for a password
+ prompt. Careful review of ``-vvv`` logs help identify these cases, as
+ Mitogen logs all strings it receives during connection, and how it
+ interprets them.
+
+
+.. _mitogen-get-stack:
+
+The `mitogen_get_stack` Action
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When a Mitogen strategy is loaded, a special ``mitogen_get_stack`` action is
+available that returns a concise description of the connection configuration as
+extracted from Ansible and passed to the core library. Using it, you can learn
+whether a problem lies in the Ansible extension or deeper in library code.
+
+The action may be used in a playbook as ``mitogen_get_stack:`` just like a
+regular module, or directly from the command-line::
+
+ $ ANSIBLE_STRATEGY=mitogen_linear ansible -m mitogen_get_stack -b -k k3
+ SSH password:
+ k3 | SUCCESS => {
+ "changed": true,
+ "result": [
+ {
+ "kwargs": {
+ "check_host_keys": "enforce",
+ "connect_timeout": 10,
+ "hostname": "k3",
+ "identities_only": false,
+ "identity_file": null,
+ "password": "mysecretpassword",
+ "port": null,
+ "python_path": null,
+ "ssh_args": [
+ "-C",
+ "-o",
+ "ControlMaster=auto",
+ "-o",
+ "ControlPersist=60s"
+ ],
+ "ssh_debug_level": null,
+ "ssh_path": "ssh",
+ "username": null
+ },
+ "method": "ssh"
+ },
+ {
+ "enable_lru": true,
+ "kwargs": {
+ "connect_timeout": 10,
+ "password": null,
+ "python_path": null,
+ "sudo_args": [
+ "-H",
+ "-S",
+ "-n"
+ ],
+ "sudo_path": null,
+ "username": "root"
+ },
+ "method": "sudo"
+ }
+ ]
+ }
+
+Each object in the list represents a single 'hop' in the connection, from
+nearest to furthest. Unlike in Ansible, the core library treats ``become``
+steps and SSH steps identically, so they are represented distinctly in the
+output.
+
+The presence of ``null`` means no explicit value was extracted from Ansible,
+and either the Mitogen library or SSH will choose a value for the parameter. In
+the example above, Mitogen will choose ``/usr/bin/python`` for ``python_path``,
+and SSH will choose ``22`` for ``port``, or whatever ``Port`` it parses from
+``~/.ssh/config``. Note the presence of ``null`` may indicate the extension
+failed to extract the correct value.
+
+When using ``mitogen_get_stack`` to diagnose a problem, pay special attention
+to ensuring the invocation exactly matches the problematic task. For example,
+if the failing task has ``delegate_to:`` or ``become:`` enabled, the
+``mitogen_get_stack`` invocation must include those statements in order for the
+output to be accurate.
+
+If a playbook cannot start at all, you may need to temporarily use
+``gather_facts: no`` to allow the first task to proceed. This action does not
+create connections, so if it is the first task, it is still possible to review
+its output.
+
+
+The `mitogen_ssh_debug_level` Variable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Mitogen has support for capturing SSH diagnostic logs, and integrating them
+into the regular debug log output produced when ``-vvv`` is active. This
+provides a single audit trail of every component active during SSH
+authentication.
+
+Particularly for authentication failures, setting this variable to 3, in
+combination with ``-vvv``, allows review of every parameter passed to SSH, and
+review of every action SSH attempted during authentication.
+
+For example, this method can be used to ascertain whether SSH attempted agent
+authentication, or what private key files it was able to access and which it tried.
+
+
+Post-authentication Bootstrap Failure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If logging indicates Mitogen was able to authenticate, but some error occurred
+after authentication preventing the Python bootstrap from completing, it can be
+immensely useful to temporarily replace ``ansible_python_interpreter`` with a
+wrapper that runs Python under ``strace``::
+
+ $ ssh badbox
+
+ badbox$ cat > strace-python.sh
+ #!/bin/sh
+ strace -o /tmp/strace-python.$$ -ff -s 100 python "$@"
+ ^D
+
+ badbox$ chmod +x strace-python.sh
+ badbox$ logout
+
+ $ ansible-playbook site.yml \
+ -e ansible_python_interpreter=./strace-python.sh \
+ -l badbox
+
+This will produce a potentially large number of log files under ``/tmp/``. The
+lowest-numbered traced PID is generally the main Python interpreter. The most
+intricate bootstrap steps happen there, any error should be visible near the
+end of the trace.
+
+It is also possible the first stage bootstrap failed. That is usually the next
+lowest-numbered PID and tends to be the smallest file. Even if you can't
+ascertain the problem with your configuration from these logs, including them
+in a bug report can save days of detective effort.
+
+
.. _diagnosing-hangs:
Diagnosing Hangs
@@ -944,6 +1137,25 @@ cases `faulthandler `_ may be used:
of the stacks, along with a description of the last task executing prior to
the hang.
+It is possible the hang occurred in a process on a target. If ``strace`` is
+available, look for the host name not listed in Ansible output as reporting a
+result for the most recent task, log into it, and use ``strace -ff -p ``
+on each process whose name begins with ``mitogen:``::
+
+ $ strace -ff -p 29858
+ strace: Process 29858 attached with 3 threads
+ [pid 29864] futex(0x55ea9be52f60, FUTEX_WAIT_BITSET_PRIVATE|FUTEX_CLOCK_REALTIME, 0, NULL, 0xffffffff
+ [pid 29860] restart_syscall(<... resuming interrupted poll ...>
+ [pid 29858] futex(0x55ea9be52f60, FUTEX_WAIT_BITSET_PRIVATE|FUTEX_CLOCK_REALTIME, 0, NULL, 0xffffffff
+ ^C
+
+ $
+
+This shows one thread waiting on IO (``poll``) and two more waiting on the same
+lock. It is taken from a real example of a deadlock due to a forking bug.
+Please include any such information for all processes that you are able to
+collect in any bug report.
+
Getting Help
~~~~~~~~~~~~
@@ -955,35 +1167,103 @@ FreeNode IRC network.
Sample Profiles
---------------
-Local VM connection
-~~~~~~~~~~~~~~~~~~~
+The summaries below may be reproduced using data and scripts maintained in the
+`pcaps branch `_. Traces were
+recorded using Ansible 2.5.14.
+
+
+Trivial Loop: Local Host
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This demonstrates Mitogen vs. SSH pipelining to the local machine running
+`bench/loop-100-items.yml
+`_,
+executing a simple command 100 times. Most Ansible controller overhead is
+isolated, characterizing just module executor and connection layer performance.
+Mitogen requires **63x less bandwidth and 5.9x less time**.
+
+.. image:: images/ansible/pcaps/loop-100-items-local.svg
+
+Unlike in SSH pipelining where payloads are sent as a single compressed block,
+by default Mitogen enables SSH compression for its uncompressed RPC data. In
+many-host scenarios it may be desirable to disable compression. This has
+negligible impact on footprint, since program code is separately compressed and
+sent only once. Compression also benefits SSH pipelining, but the presence of
+large precompressed per-task payloads may present a more significant CPU burden
+during many-host runs.
+
+.. image:: images/ansible/pcaps/loop-100-items-local-detail.svg
-This demonstrates Mitogen vs. connection pipelining to a local VM executing
-``bench/loop-100-items.yml``, which simply executes ``hostname`` 100 times.
-Mitogen requires **43x less bandwidth and 6.5x less time**.
+In a detailed trace, improved interaction with the host machine is visible. In
+this playbook because no forks were required to start SSH clients from the
+worker process executing the loop, the worker's memory was never marked
+read-only, thus avoiding a major hidden performance problem - the page fault
+rate is more than halved.
+
+
+File Transfer: UK to France
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`This playbook
+`_
+was used to compare file transfer performance over a ~26 ms link. It uses the
+``with_filetree`` loop syntax to copy a directory of 1,000 0-byte files to the
+target.
+
+.. raw:: html
+
+
+
+.. csv-table::
+ :header: , Secs, CPU Secs, Sent, Received, Roundtrips
+ :class: nojunk
+ :align: right
+
+ Mitogen, 98.54, 43.04, "815 KiB", "447 KiB", 3.79
+ SSH Pipelining, "1,483.54", 329.37, "99,539 KiB", "6,870 KiB", 57.01
+
+*Roundtrips* is the approximate number of network roundtrips required to
+describe the runtime that was consumed. Due to Mitogen's built-in file transfer
+support, continuous reinitialization of an external `scp`/`sftp` client is
+avoided, permitting large ``with_filetree`` copies to become practical without
+any special casing within the playbook or the Ansible implementation.
+
+
+DebOps: UK to India
+~~~~~~~~~~~~~~~~~~~
-.. image:: images/ansible/run_hostname_100_times_mito.svg
-.. image:: images/ansible/run_hostname_100_times_plain.svg
+This is an all-green run of 246 tasks from the `DebOps
+`_ 0.7.2 `common.yml
+`_
+playbook over a ~370 ms link between the UK and India. The playbook touches a
+wide variety of modules, many featuring unavoidable waits for slow computation
+on the target.
+More tasks of a wider variety are featured than previously, placing strain on
+Mitogen's module loading and in-memory caching. By running over a long-distance
+connection, it highlights behaviour of the connection layer in the presence of
+high latency.
-Kathmandu to Paris
-~~~~~~~~~~~~~~~~~~
+Mitogen requires **14.5x less bandwidth and 4x less time**.
-This is a full Django application playbook over a ~180ms link between Kathmandu
-and Paris. Aside from large pauses where the host performs useful work, the
-high latency of this link means Mitogen only manages a 1.7x speedup.
+.. image:: images/ansible/pcaps/debops-uk-india.svg
-Many early roundtrips are due to inefficiencies in Mitogen's importer that will
-be fixed over time, however the majority, comprising at least 10 seconds, are
-due to idling while the host's previous result and next command are in-flight
-on the network.
-The initial extension lays groundwork for exciting structural changes to the
-execution model: a future version will tackle latency head-on by delegating
-some control flow to the target host, melding the performance and scalability
-benefits of pull-based operation with the management simplicity of push-based
-operation.
+Django App: UK to India
+~~~~~~~~~~~~~~~~~~~~~~~
-.. image:: images/ansible/costapp.png
+This short playbook features only 23 steps executed over the same ~370 ms link
+as previously, with many steps running unavoidably expensive tasks like
+building C++ code, and compiling static web site assets.
+Despite the small margin for optimization, Mitogen still manages **6.2x less
+bandwidth and 1.8x less time**.
+.. image:: images/ansible/pcaps/costapp-uk-india.svg
diff --git a/docs/api.rst b/docs/api.rst
index 965aa0c7..3fd70bea 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -84,768 +84,469 @@ Message Class
=============
.. currentmodule:: mitogen.core
-
-.. class:: Message
-
- Messages are the fundamental unit of communication, comprising fields from
- the :ref:`stream-protocol` header, an optional reference to the receiving
- :class:`mitogen.core.Router` for ingress messages, and helper methods for
- deserialization and generating replies.
-
- .. attribute:: router
-
- The :class:`mitogen.core.Router` responsible for routing the
- message. This is :data:`None` for locally originated messages.
-
- .. attribute:: receiver
-
- The :class:`mitogen.core.Receiver` over which the message was last
- received. Part of the :class:`mitogen.select.Select` interface.
- Defaults to :data:`None`.
-
- .. attribute:: dst_id
-
- Integer target context ID. :class:`mitogen.core.Router` delivers
- messages locally when their :attr:`dst_id` matches
- :data:`mitogen.context_id`, otherwise they are routed up or downstream.
-
- .. attribute:: src_id
-
- Integer source context ID. Used as the target of replies if any are
- generated.
-
- .. attribute:: auth_id
-
- The context ID under whose authority the message is acting. See
- :ref:`source-verification`.
-
- .. attribute:: handle
-
- Integer target handle in the destination context. This is one of the
- :ref:`standard-handles`, or a dynamically generated handle used to
- receive a one-time reply, such as the return value of a function call.
-
- .. attribute:: reply_to
-
- Integer target handle to direct any reply to this message. Used to
- receive a one-time reply, such as the return value of a function call.
- :data:`IS_DEAD` has a special meaning when it appears in this field.
-
- .. attribute:: data
-
- Message data, which may be raw or pickled.
-
- .. attribute:: is_dead
-
- :data:`True` if :attr:`reply_to` is set to the magic value
- :data:`mitogen.core.IS_DEAD`, indicating the sender considers the
- channel dead.
-
- .. py:method:: __init__ (\**kwargs)
-
- Construct a message from from the supplied `kwargs`. :attr:`src_id`
- and :attr:`auth_id` are always set to :data:`mitogen.context_id`.
-
- .. py:classmethod:: pickled (obj, \**kwargs)
-
- Construct a pickled message, setting :attr:`data` to the
- serialization of `obj`, and setting remaining fields using `kwargs`.
-
- :returns:
- The new message.
-
- .. method:: unpickle (throw=True)
-
- Unpickle :attr:`data`, optionally raising any exceptions present.
-
- :param bool throw:
- If :data:`True`, raise exceptions, otherwise it is the caller's
- responsibility.
-
- :raises mitogen.core.CallError:
- The serialized data contained CallError exception.
- :raises mitogen.core.ChannelError:
- The `is_dead` field was set.
-
- .. method:: reply (obj, router=None, \**kwargs)
-
- Compose a reply to this message and send it using :attr:`router`, or
- `router` is :attr:`router` is :data:`None`.
-
- :param obj:
- Either a :class:`Message`, or an object to be serialized in order
- to construct a new message.
- :param router:
- Optional router to use if :attr:`router` is :data:`None`.
- :param kwargs:
- Optional keyword parameters overriding message fields in the reply.
-
+.. autoclass:: Message
+ :members:
Router Class
============
.. currentmodule:: mitogen.core
-
-.. class:: Router
-
- Route messages between parent and child contexts, and invoke handlers
- defined on our parent context. :meth:`Router.route() ` straddles
- the :class:`Broker ` and user threads, it is safe
- to call anywhere.
-
- **Note:** This is the somewhat limited core version of the Router class
- used by child contexts. The master subclass is documented below this one.
-
- .. attribute:: unidirectional
-
- When :data:`True`, permit children to only communicate with the current
- context or a parent of the current context. Routing between siblings or
- children of parents is prohibited, ensuring no communication is
- possible between intentionally partitioned networks, such as when a
- program simultaneously manipulates hosts spread across a corporate and
- a production network, or production networks that are otherwise
- air-gapped.
-
- Sending a prohibited message causes an error to be logged and a dead
- message to be sent in reply to the errant message, if that message has
- ``reply_to`` set.
-
- The value of :data:`unidirectional` becomes the default for the
- :meth:`local() ` `unidirectional`
- parameter.
-
- .. method:: stream_by_id (dst_id)
-
- Return the :class:`mitogen.core.Stream` that should be used to
- communicate with `dst_id`. If a specific route for `dst_id` is not
- known, a reference to the parent context's stream is returned.
-
- .. method:: add_route (target_id, via_id)
-
- Arrange for messages whose `dst_id` is `target_id` to be forwarded on
- the directly connected stream for `via_id`. This method is called
- automatically in response to ``ADD_ROUTE`` messages, but remains public
- for now while the design has not yet settled, and situations may arise
- where routing is not fully automatic.
-
- .. method:: register (context, stream)
-
- Register a new context and its associated stream, and add the stream's
- receive side to the I/O multiplexer. This This method remains public
- for now while hte design has not yet settled.
-
- .. method:: add_handler (fn, handle=None, persist=True, respondent=None, policy=None)
-
- Invoke `fn(msg)` for each Message sent to `handle` from this context.
- Unregister after one invocation if `persist` is :data:`False`. If
- `handle` is :data:`None`, a new handle is allocated and returned.
-
- :param int handle:
- If not :data:`None`, an explicit handle to register, usually one of
- the ``mitogen.core.*`` constants. If unspecified, a new unused
- handle will be allocated.
-
- :param bool persist:
- If :data:`False`, the handler will be unregistered after a single
- message has been received.
-
- :param mitogen.core.Context respondent:
- Context that messages to this handle are expected to be sent from.
- If specified, arranges for a dead message to be delivered to `fn`
- when disconnection of the context is detected.
-
- In future `respondent` will likely also be used to prevent other
- contexts from sending messages to the handle.
-
- :param function policy:
- Function invoked as `policy(msg, stream)` where `msg` is a
- :class:`mitogen.core.Message` about to be delivered, and
- `stream` is the :class:`mitogen.core.Stream` on which it was
- received. The function must return :data:`True`, otherwise an
- error is logged and delivery is refused.
-
- Two built-in policy functions exist:
-
- * :func:`mitogen.core.has_parent_authority`: requires the
- message arrived from a parent context, or a context acting with a
- parent context's authority (``auth_id``).
-
- * :func:`mitogen.parent.is_immediate_child`: requires the
- message arrived from an immediately connected child, for use in
- messaging patterns where either something becomes buggy or
- insecure by permitting indirect upstream communication.
-
- In case of refusal, and the message's ``reply_to`` field is
- nonzero, a :class:`mitogen.core.CallError` is delivered to the
- sender indicating refusal occurred.
-
- :return:
- `handle`, or if `handle` was :data:`None`, the newly allocated
- handle.
-
- .. method:: del_handler (handle)
-
- Remove the handle registered for `handle`
-
- :raises KeyError:
- The handle wasn't registered.
-
- .. method:: _async_route(msg, stream=None)
-
- Arrange for `msg` to be forwarded towards its destination. If its
- destination is the local context, then arrange for it to be dispatched
- using the local handlers.
-
- This is a lower overhead version of :meth:`route` that may only be
- called from the I/O multiplexer thread.
-
- :param mitogen.core.Stream stream:
- If not :data:`None`, a reference to the stream the message arrived
- on. Used for performing source route verification, to ensure
- sensitive messages such as ``CALL_FUNCTION`` arrive only from
- trusted contexts.
-
- .. method:: route(msg)
-
- Arrange for the :class:`Message` `msg` to be delivered to its
- destination using any relevant downstream context, or if none is found,
- by forwarding the message upstream towards the master context. If `msg`
- is destined for the local context, it is dispatched using the handles
- registered with :meth:`add_handler`.
-
- This may be called from any thread.
+.. autoclass:: Router
+ :members:
.. currentmodule:: mitogen.master
-.. class:: Router (broker=None)
-
- Extend :class:`mitogen.core.Router` with functionality useful to
- masters, and child contexts who later become masters. Currently when this
- class is required, the target context's router is upgraded at runtime.
-
- .. note::
-
- You may construct as many routers as desired, and use the same broker
- for multiple routers, however usually only one broker and router need
- exist. Multiple routers may be useful when dealing with separate trust
- domains, for example, manipulating infrastructure belonging to separate
- customers or projects.
-
- :param mitogen.master.Broker broker:
- :class:`Broker` instance to use. If not specified, a private
- :class:`Broker` is created.
-
- .. attribute:: profiling
-
- When :data:`True`, cause the broker thread and any subsequent broker
- and main threads existing in any child to write
- ``/tmp/mitogen.stats...log`` containing a
- :mod:`cProfile` dump on graceful exit. Must be set prior to
- construction of any :class:`Broker`, e.g. via:
-
- .. code::
-
- mitogen.master.Router.profiling = True
-
- .. method:: enable_debug
-
- Cause this context and any descendant child contexts to write debug
- logs to /tmp/mitogen..log.
-
- .. method:: allocate_id
-
- Arrange for a unique context ID to be allocated and associated with a
- route leading to the active context. In masters, the ID is generated
- directly, in children it is forwarded to the master via an
- ``ALLOCATE_ID`` message that causes the master to emit matching
- ``ADD_ROUTE`` messages prior to replying.
-
- .. method:: context_by_id (context_id, via_id=None)
+.. autoclass:: Router (broker=None)
+ :members:
- Messy factory/lookup function to find a context by its ID, or construct
- it. In future this will be replaced by a much more sensible interface.
- .. _context-factories:
+.. _context-factories:
- **Context Factories**
+Connection Methods
+==================
- .. method:: fork (on_fork=None, on_start=None, debug=False, profiling=False, via=None)
-
- Construct a context on the local machine by forking the current
- process. The forked child receives a new identity, sets up a new broker
- and router, and responds to function calls identically to children
- created using other methods.
-
- For long-lived processes, :meth:`local` is always better as it
- guarantees a pristine interpreter state that inherited little from the
- parent. Forking should only be used in performance-sensitive scenarios
- where short-lived children must be spawned to isolate potentially buggy
- code, and only after accounting for all the bad things possible as a
- result of, at a minimum:
-
- * Files open in the parent remaining open in the child,
- causing the lifetime of the underlying object to be extended
- indefinitely.
-
- * From the perspective of external components, this is observable
- in the form of pipes and sockets that are never closed, which may
- break anything relying on closure to signal protocol termination.
-
- * Descriptors that reference temporary files will not have their disk
- space reclaimed until the child exits.
-
- * Third party package state, such as urllib3's HTTP connection pool,
- attempting to write to file descriptors shared with the parent,
- causing random failures in both parent and child.
-
- * UNIX signal handlers installed in the parent process remaining active
- in the child, despite associated resources, such as service threads,
- child processes, resource usage counters or process timers becoming
- absent or reset in the child.
-
- * Library code that makes assumptions about the process ID remaining
- unchanged, for example to implement inter-process locking, or to
- generate file names.
-
- * Anonymous ``MAP_PRIVATE`` memory mappings whose storage requirement
- doubles as either parent or child dirties their pages.
-
- * File-backed memory mappings that cannot have their space freed on
- disk due to the mapping living on in the child.
-
- * Difficult to diagnose memory usage and latency spikes due to object
- graphs becoming unreferenced in either parent or child, causing
- immediate copy-on-write to large portions of the process heap.
+.. currentmodule:: mitogen.parent
+.. method:: Router.fork (on_fork=None, on_start=None, debug=False, profiling=False, via=None)
+
+ Construct a context on the local machine by forking the current
+ process. The forked child receives a new identity, sets up a new broker
+ and router, and responds to function calls identically to children
+ created using other methods.
+
+ The use of this method is strongly discouraged. It requires Python 2.6 or
+ newer, as older Pythons made no effort to reset threading state upon fork.
+
+ For long-lived processes, :meth:`local` is always better as it
+ guarantees a pristine interpreter state that inherited little from the
+ parent. Forking should only be used in performance-sensitive scenarios
+ where short-lived children must be spawned to isolate potentially buggy
+ code, and only after accounting for all the bad things possible as a
+ result of, at a minimum:
+
+ * Files open in the parent remaining open in the child,
+ causing the lifetime of the underlying object to be extended
+ indefinitely.
+
+ * From the perspective of external components, this is observable
+ in the form of pipes and sockets that are never closed, which may
+ break anything relying on closure to signal protocol termination.
+
+ * Descriptors that reference temporary files will not have their disk
+ space reclaimed until the child exits.
+
+ * Third party package state, such as urllib3's HTTP connection pool,
+ attempting to write to file descriptors shared with the parent,
+ causing random failures in both parent and child.
+
+ * UNIX signal handlers installed in the parent process remaining active
+ in the child, despite associated resources, such as service threads,
+ child processes, resource usage counters or process timers becoming
+ absent or reset in the child.
+
+ * Library code that makes assumptions about the process ID remaining
+ unchanged, for example to implement inter-process locking, or to
+ generate file names.
+
+ * Anonymous ``MAP_PRIVATE`` memory mappings whose storage requirement
+ doubles as either parent or child dirties their pages.
+
+ * File-backed memory mappings that cannot have their space freed on
+ disk due to the mapping living on in the child.
+
+ * Difficult to diagnose memory usage and latency spikes due to object
+ graphs becoming unreferenced in either parent or child, causing
+ immediate copy-on-write to large portions of the process heap.
+
+ * Locks held in the parent causing random deadlocks in the child, such
+ as when another thread emits a log entry via the :mod:`logging`
+ package concurrent to another thread calling :meth:`fork`, or when a C
+ extension module calls the C library allocator, or when a thread is using
+ the C library DNS resolver, for example via :func:`socket.gethostbyname`.
+
+ * Objects existing in Thread-Local Storage of every non-:meth:`fork`
+ thread becoming permanently inaccessible, and never having their
+ object destructors called, including TLS usage by native extension
+ code, triggering many new variants of all the issues above.
+
+ * Pseudo-Random Number Generator state that is easily observable by
+ network peers to be duplicate, violating requirements of
+ cryptographic protocols through one-time state reuse. In the worst
+ case, children continually reuse the same state due to repeatedly
+ forking from a static parent.
+
+ :meth:`fork` cleans up Mitogen-internal objects, in addition to
+ locks held by the :mod:`logging` package, reseeds
+ :func:`random.random`, and the OpenSSL PRNG via
+ :func:`ssl.RAND_add`, but only if the :mod:`ssl` module is
+ already loaded. You must arrange for your program's state, including
+ any third party packages in use, to be cleaned up by specifying an
+ `on_fork` function.
+
+ The associated stream implementation is
+ :class:`mitogen.fork.Stream`.
+
+ :param function on_fork:
+ Function invoked as `on_fork()` from within the child process. This
+ permits supplying a program-specific cleanup function to break
+ locks and close file descriptors belonging to the parent from
+ within the child.
+
+ :param function on_start:
+ Invoked as `on_start(econtext)` from within the child process after
+ it has been set up, but before the function dispatch loop starts.
+ This permits supplying a custom child main function that inherits
+ rich data structures that cannot normally be passed via a
+ serialization.
+
+ :param mitogen.core.Context via:
+ Same as the `via` parameter for :meth:`local`.
+
+ :param bool debug:
+ Same as the `debug` parameter for :meth:`local`.
+
+ :param bool profiling:
+ Same as the `profiling` parameter for :meth:`local`.
+
+.. method:: Router.local (remote_name=None, python_path=None, debug=False, connect_timeout=None, profiling=False, via=None)
+
+ Construct a context on the local machine as a subprocess of the current
+ process. The associated stream implementation is
+ :class:`mitogen.master.Stream`.
+
+ :param str remote_name:
+ The ``argv[0]`` suffix for the new process. If `remote_name` is
+ ``test``, the new process ``argv[0]`` will be ``mitogen:test``.
+
+ If unspecified, defaults to ``@:``.
+
+ This variable cannot contain slash characters, as the resulting
+ ``argv[0]`` must be presented in such a way as to allow Python to
+ determine its installation prefix. This is required to support
+ virtualenv.
+
+ :param str|list python_path:
+ String or list path to the Python interpreter to use for bootstrap.
+ Defaults to :data:`sys.executable` for local connections, and
+ ``python`` for remote connections.
+
+ It is possible to pass a list to invoke Python wrapped using
+ another tool, such as ``["/usr/bin/env", "python"]``.
+
+ :param bool debug:
+ If :data:`True`, arrange for debug logging (:meth:`enable_debug`) to
+ be enabled in the new context. Automatically :data:`True` when
+ :meth:`enable_debug` has been called, but may be used
+ selectively otherwise.
+
+ :param bool unidirectional:
+ If :data:`True`, arrange for the child's router to be constructed
+ with :attr:`unidirectional routing
+ ` enabled. Automatically
+ :data:`True` when it was enabled for this router, but may still be
+ explicitly set to :data:`False`.
+
+ :param float connect_timeout:
+ Fractional seconds to wait for the subprocess to indicate it is
+ healthy. Defaults to 30 seconds.
+
+ :param bool profiling:
+ If :data:`True`, arrange for profiling (:data:`profiling`) to be
+ enabled in the new context. Automatically :data:`True` when
+ :data:`profiling` is :data:`True`, but may be used selectively
+ otherwise.
+
+ :param mitogen.core.Context via:
+ If not :data:`None`, arrange for construction to occur via RPCs
+ made to the context `via`, and for :data:`ADD_ROUTE
+ ` messages to be generated as appropriate.
- * Locks held in the parent causing random deadlocks in the child, such
- as when another thread emits a log entry via the :mod:`logging`
- package concurrent to another thread calling :meth:`fork`.
+ .. code-block:: python
- * Objects existing in Thread-Local Storage of every non-:meth:`fork`
- thread becoming permanently inaccessible, and never having their
- object destructors called, including TLS usage by native extension
- code, triggering many new variants of all the issues above.
+ # SSH to the remote machine.
+ remote_machine = router.ssh(hostname='mybox.com')
- * Pseudo-Random Number Generator state that is easily observable by
- network peers to be duplicate, violating requirements of
- cryptographic protocols through one-time state reuse. In the worst
- case, children continually reuse the same state due to repeatedly
- forking from a static parent.
+ # Use the SSH connection to create a sudo connection.
+ remote_root = router.sudo(username='root', via=remote_machine)
- :meth:`fork` cleans up Mitogen-internal objects, in addition to
- locks held by the :mod:`logging` package, reseeds
- :func:`random.random`, and the OpenSSL PRNG via
- :func:`ssl.RAND_add`, but only if the :mod:`ssl` module is
- already loaded. You must arrange for your program's state, including
- any third party packages in use, to be cleaned up by specifying an
- `on_fork` function.
+.. method:: Router.doas (username=None, password=None, doas_path=None, password_prompt=None, incorrect_prompts=None, \**kwargs)
+
+ Construct a context on the local machine over a ``doas`` invocation.
+ The ``doas`` process is started in a newly allocated pseudo-terminal,
+ and supports typing interactive passwords.
- The associated stream implementation is
- :class:`mitogen.fork.Stream`.
+ Accepts all parameters accepted by :meth:`local`, in addition to:
- :param function on_fork:
- Function invoked as `on_fork()` from within the child process. This
- permits supplying a program-specific cleanup function to break
- locks and close file descriptors belonging to the parent from
- within the child.
-
- :param function on_start:
- Invoked as `on_start(econtext)` from within the child process after
- it has been set up, but before the function dispatch loop starts.
- This permits supplying a custom child main function that inherits
- rich data structures that cannot normally be passed via a
- serialization.
-
- :param mitogen.core.Context via:
- Same as the `via` parameter for :meth:`local`.
-
- :param bool debug:
- Same as the `debug` parameter for :meth:`local`.
-
- :param bool profiling:
- Same as the `profiling` parameter for :meth:`local`.
-
- .. method:: local (remote_name=None, python_path=None, debug=False, connect_timeout=None, profiling=False, via=None)
-
- Construct a context on the local machine as a subprocess of the current
- process. The associated stream implementation is
- :class:`mitogen.master.Stream`.
-
- :param str remote_name:
- The ``argv[0]`` suffix for the new process. If `remote_name` is
- ``test``, the new process ``argv[0]`` will be ``mitogen:test``.
-
- If unspecified, defaults to ``@:``.
-
- This variable cannot contain slash characters, as the resulting
- ``argv[0]`` must be presented in such a way as to allow Python to
- determine its installation prefix. This is required to support
- virtualenv.
-
- :param str|list python_path:
- String or list path to the Python interpreter to use for bootstrap.
- Defaults to :data:`sys.executable` for local connections, and
- ``python`` for remote connections.
-
- It is possible to pass a list to invoke Python wrapped using
- another tool, such as ``["/usr/bin/env", "python"]``.
-
- :param bool debug:
- If :data:`True`, arrange for debug logging (:meth:`enable_debug`) to
- be enabled in the new context. Automatically :data:`True` when
- :meth:`enable_debug` has been called, but may be used
- selectively otherwise.
-
- :param bool unidirectional:
- If :data:`True`, arrange for the child's router to be constructed
- with :attr:`unidirectional routing
- ` enabled. Automatically
- :data:`True` when it was enabled for this router, but may still be
- explicitly set to :data:`False`.
-
- :param float connect_timeout:
- Fractional seconds to wait for the subprocess to indicate it is
- healthy. Defaults to 30 seconds.
-
- :param bool profiling:
- If :data:`True`, arrange for profiling (:data:`profiling`) to be
- enabled in the new context. Automatically :data:`True` when
- :data:`profiling` is :data:`True`, but may be used selectively
- otherwise.
-
- :param mitogen.core.Context via:
- If not :data:`None`, arrange for construction to occur via RPCs
- made to the context `via`, and for :data:`ADD_ROUTE
- ` messages to be generated as appropriate.
-
- .. code-block:: python
-
- # SSH to the remote machine.
- remote_machine = router.ssh(hostname='mybox.com')
-
- # Use the SSH connection to create a sudo connection.
- remote_root = router.sudo(username='root', via=remote_machine)
-
- .. method:: doas (username=None, password=None, doas_path=None, password_prompt=None, incorrect_prompts=None, \**kwargs)
-
- Construct a context on the local machine over a ``doas`` invocation.
- The ``doas`` process is started in a newly allocated pseudo-terminal,
- and supports typing interactive passwords.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str username:
- Username to use, defaults to ``root``.
- :param str password:
- The account password to use if requested.
- :param str doas_path:
- Filename or complete path to the ``doas`` binary. ``PATH`` will be
- searched if given as a filename. Defaults to ``doas``.
- :param bytes password_prompt:
- A string that indicates ``doas`` is requesting a password. Defaults
- to ``Password:``.
- :param list incorrect_prompts:
- List of bytestrings indicating the password is incorrect. Defaults
- to `(b"doas: authentication failed")`.
- :raises mitogen.doas.PasswordError:
- A password was requested but none was provided, the supplied
- password was incorrect, or the target account did not exist.
-
- .. method:: docker (container=None, image=None, docker_path=None, \**kwargs)
-
- Construct a context on the local machine within an existing or
- temporary new Docker container using the ``docker`` program. One of
- `container` or `image` must be specified.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str container:
- Existing container to connect to. Defaults to :data:`None`.
- :param str username:
- Username within the container to :func:`setuid` to. Defaults to
- :data:`None`, which Docker interprets as ``root``.
- :param str image:
- Image tag to use to construct a temporary container. Defaults to
- :data:`None`.
- :param str docker_path:
- Filename or complete path to the Docker binary. ``PATH`` will be
- searched if given as a filename. Defaults to ``docker``.
-
- .. method:: jail (container, jexec_path=None, \**kwargs)
-
- Construct a context on the local machine within a FreeBSD jail using
- the ``jexec`` program.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str container:
- Existing container to connect to. Defaults to :data:`None`.
- :param str username:
- Username within the container to :func:`setuid` to. Defaults to
- :data:`None`, which ``jexec`` interprets as ``root``.
- :param str jexec_path:
- Filename or complete path to the ``jexec`` binary. ``PATH`` will be
- searched if given as a filename. Defaults to ``/usr/sbin/jexec``.
-
- .. method:: kubectl (pod, kubectl_path=None, kubectl_args=None, \**kwargs)
-
- Construct a context in a container via the Kubernetes ``kubectl``
- program.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str pod:
- Kubernetes pod to connect to.
- :param str kubectl_path:
- Filename or complete path to the ``kubectl`` binary. ``PATH`` will
- be searched if given as a filename. Defaults to ``kubectl``.
- :param list kubectl_args:
- Additional arguments to pass to the ``kubectl`` command.
-
- .. method:: lxc (container, lxc_attach_path=None, \**kwargs)
-
- Construct a context on the local machine within an LXC classic
- container using the ``lxc-attach`` program.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str container:
- Existing container to connect to. Defaults to :data:`None`.
- :param str lxc_attach_path:
- Filename or complete path to the ``lxc-attach`` binary. ``PATH``
- will be searched if given as a filename. Defaults to
- ``lxc-attach``.
-
- .. method:: lxc (container, lxc_attach_path=None, \**kwargs)
-
- Construct a context on the local machine within a LXD container using
- the ``lxc`` program.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str container:
- Existing container to connect to. Defaults to :data:`None`.
- :param str lxc_path:
- Filename or complete path to the ``lxc`` binary. ``PATH`` will be
- searched if given as a filename. Defaults to ``lxc``.
-
- .. method:: setns (container, kind, username=None, docker_path=None, lxc_info_path=None, machinectl_path=None, \**kwargs)
-
- Construct a context in the style of :meth:`local`, but change the
- active Linux process namespaces via calls to `setns(1)` before
- executing Python.
-
- The namespaces to use, and the active root file system are taken from
- the root PID of a running Docker, LXC, LXD, or systemd-nspawn
- container.
-
- A program is required only to find the root PID, after which management
- of the child Python interpreter is handled directly.
-
- :param str container:
- Container to connect to.
- :param str kind:
- One of ``docker``, ``lxc``, ``lxd`` or ``machinectl``.
- :param str username:
- Username within the container to :func:`setuid` to. Defaults to
- ``root``.
- :param str docker_path:
- Filename or complete path to the Docker binary. ``PATH`` will be
- searched if given as a filename. Defaults to ``docker``.
- :param str lxc_path:
- Filename or complete path to the LXD ``lxc`` binary. ``PATH`` will
- be searched if given as a filename. Defaults to ``lxc``.
- :param str lxc_info_path:
- Filename or complete path to the LXC ``lxc-info`` binary. ``PATH``
- will be searched if given as a filename. Defaults to ``lxc-info``.
- :param str machinectl_path:
- Filename or complete path to the ``machinectl`` binary. ``PATH``
- will be searched if given as a filename. Defaults to
- ``machinectl``.
-
- .. method:: su (username=None, password=None, su_path=None, password_prompt=None, incorrect_prompts=None, \**kwargs)
-
- Construct a context on the local machine over a ``su`` invocation. The
- ``su`` process is started in a newly allocated pseudo-terminal, and
- supports typing interactive passwords.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str username:
- Username to pass to ``su``, defaults to ``root``.
- :param str password:
- The account password to use if requested.
- :param str su_path:
- Filename or complete path to the ``su`` binary. ``PATH`` will be
- searched if given as a filename. Defaults to ``su``.
- :param bytes password_prompt:
- The string that indicates ``su`` is requesting a password. Defaults
- to ``Password:``.
- :param str incorrect_prompts:
- Strings that signal the password is incorrect. Defaults to `("su:
- sorry", "su: authentication failure")`.
-
- :raises mitogen.su.PasswordError:
- A password was requested but none was provided, the supplied
- password was incorrect, or (on BSD) the target account did not
- exist.
-
- .. method:: sudo (username=None, sudo_path=None, password=None, \**kwargs)
-
- Construct a context on the local machine over a ``sudo`` invocation.
- The ``sudo`` process is started in a newly allocated pseudo-terminal,
- and supports typing interactive passwords.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str username:
- Username to pass to sudo as the ``-u`` parameter, defaults to
- ``root``.
- :param str sudo_path:
- Filename or complete path to the sudo binary. ``PATH`` will be
- searched if given as a filename. Defaults to ``sudo``.
- :param str password:
- The password to use if/when sudo requests it. Depending on the sudo
- configuration, this is either the current account password or the
- target account password. :class:`mitogen.sudo.PasswordError`
- will be raised if sudo requests a password but none is provided.
- :param bool set_home:
- If :data:`True`, request ``sudo`` set the ``HOME`` environment
- variable to match the target UNIX account.
- :param bool preserve_env:
- If :data:`True`, request ``sudo`` to preserve the environment of
- the parent process.
- :param list sudo_args:
- Arguments in the style of :data:`sys.argv` that would normally
- be passed to ``sudo``. The arguments are parsed in-process to set
- equivalent parameters. Re-parsing ensures unsupported options cause
- :class:`mitogen.core.StreamError` to be raised, and that
- attributes of the stream match the actual behaviour of ``sudo``.
-
- .. method:: ssh (hostname, username=None, ssh_path=None, ssh_args=None, port=None, check_host_keys='enforce', password=None, identity_file=None, identities_only=True, compression=True, \**kwargs)
-
- Construct a remote context over an OpenSSH ``ssh`` invocation.
-
- The ``ssh`` process is started in a newly allocated pseudo-terminal to
- support typing interactive passwords and responding to prompts, if a
- password is specified, or `check_host_keys=accept`. In other scenarios,
- ``BatchMode`` is enabled and no PTY is allocated. For many-target
- configurations, both options should be avoided as most systems have a
- conservative limit on the number of pseudo-terminals that may exist.
-
- Accepts all parameters accepted by :meth:`local`, in addition to:
-
- :param str username:
- The SSH username; default is unspecified, which causes SSH to pick
- the username to use.
- :param str ssh_path:
- Absolute or relative path to ``ssh``. Defaults to ``ssh``.
- :param list ssh_args:
- Additional arguments to pass to the SSH command.
- :param int port:
- Port number to connect to; default is unspecified, which causes SSH
- to pick the port number.
- :param str check_host_keys:
- Specifies the SSH host key checking mode. Defaults to ``enforce``.
-
- * ``ignore``: no host key checking is performed. Connections never
- fail due to an unknown or changed host key.
- * ``accept``: known hosts keys are checked to ensure they match,
- new host keys are automatically accepted and verified in future
- connections.
- * ``enforce``: known host keys are checked to ensure they match,
- unknown hosts cause a connection failure.
- :param str password:
- Password to type if/when ``ssh`` requests it. If not specified and
- a password is requested, :class:`mitogen.ssh.PasswordError` is
- raised.
- :param str identity_file:
- Path to an SSH private key file to use for authentication. Default
- is unspecified, which causes SSH to pick the identity file.
-
- When this option is specified, only `identity_file` will be used by
- the SSH client to perform authenticaion; agent authentication is
- automatically disabled, as is reading the default private key from
- ``~/.ssh/id_rsa``, or ``~/.ssh/id_dsa``.
- :param bool identities_only:
- If :data:`True` and a password or explicit identity file is
- specified, instruct the SSH client to disable any authentication
- identities inherited from the surrounding environment, such as
- those loaded in any running ``ssh-agent``, or default key files
- present in ``~/.ssh``. This ensures authentication attempts only
- occur using the supplied password or SSH key.
- :param bool compression:
- If :data:`True`, enable ``ssh`` compression support. Compression
- has a minimal effect on the size of modules transmitted, as they
- are already compressed, however it has a large effect on every
- remaining message in the otherwise uncompressed stream protocol,
- such as function call arguments and return values.
- :param int ssh_debug_level:
- Optional integer `0..3` indicating the SSH client debug level.
- :raises mitogen.ssh.PasswordError:
- A password was requested but none was specified, or the specified
- password was incorrect.
-
- :raises mitogen.ssh.HostKeyError:
- When `check_host_keys` is set to either ``accept``, indicates a
- previously recorded key no longer matches the remote machine. When
- set to ``enforce``, as above, but additionally indicates no
- previously recorded key exists for the remote machine.
+ :param str username:
+ Username to use, defaults to ``root``.
+ :param str password:
+ The account password to use if requested.
+ :param str doas_path:
+ Filename or complete path to the ``doas`` binary. ``PATH`` will be
+ searched if given as a filename. Defaults to ``doas``.
+ :param bytes password_prompt:
+ A string that indicates ``doas`` is requesting a password. Defaults
+ to ``Password:``.
+ :param list incorrect_prompts:
+ List of bytestrings indicating the password is incorrect. Defaults
+ to `(b"doas: authentication failed")`.
+ :raises mitogen.doas.PasswordError:
+ A password was requested but none was provided, the supplied
+ password was incorrect, or the target account did not exist.
+
+.. method:: Router.docker (container=None, image=None, docker_path=None, \**kwargs)
+
+ Construct a context on the local machine within an existing or
+ temporary new Docker container using the ``docker`` program. One of
+ `container` or `image` must be specified.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str container:
+ Existing container to connect to. Defaults to :data:`None`.
+ :param str username:
+ Username within the container to :func:`setuid` to. Defaults to
+ :data:`None`, which Docker interprets as ``root``.
+ :param str image:
+ Image tag to use to construct a temporary container. Defaults to
+ :data:`None`.
+ :param str docker_path:
+ Filename or complete path to the Docker binary. ``PATH`` will be
+ searched if given as a filename. Defaults to ``docker``.
+
+.. method:: Router.jail (container, jexec_path=None, \**kwargs)
+
+ Construct a context on the local machine within a FreeBSD jail using
+ the ``jexec`` program.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str container:
+ Existing container to connect to. Defaults to :data:`None`.
+ :param str username:
+ Username within the container to :func:`setuid` to. Defaults to
+ :data:`None`, which ``jexec`` interprets as ``root``.
+ :param str jexec_path:
+ Filename or complete path to the ``jexec`` binary. ``PATH`` will be
+ searched if given as a filename. Defaults to ``/usr/sbin/jexec``.
+
+.. method:: Router.kubectl (pod, kubectl_path=None, kubectl_args=None, \**kwargs)
+
+ Construct a context in a container via the Kubernetes ``kubectl``
+ program.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str pod:
+ Kubernetes pod to connect to.
+ :param str kubectl_path:
+ Filename or complete path to the ``kubectl`` binary. ``PATH`` will
+ be searched if given as a filename. Defaults to ``kubectl``.
+ :param list kubectl_args:
+ Additional arguments to pass to the ``kubectl`` command.
+
+.. method:: Router.lxc (container, lxc_attach_path=None, \**kwargs)
+
+ Construct a context on the local machine within an LXC classic
+ container using the ``lxc-attach`` program.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str container:
+ Existing container to connect to. Defaults to :data:`None`.
+ :param str lxc_attach_path:
+ Filename or complete path to the ``lxc-attach`` binary. ``PATH``
+ will be searched if given as a filename. Defaults to
+ ``lxc-attach``.
+
+.. method:: Router.lxd (container, lxc_path=None, \**kwargs)
+
+ Construct a context on the local machine within a LXD container using
+ the ``lxc`` program.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str container:
+ Existing container to connect to. Defaults to :data:`None`.
+ :param str lxc_path:
+ Filename or complete path to the ``lxc`` binary. ``PATH`` will be
+ searched if given as a filename. Defaults to ``lxc``.
+
+.. method:: Router.setns (container, kind, username=None, docker_path=None, lxc_info_path=None, machinectl_path=None, \**kwargs)
+
+ Construct a context in the style of :meth:`local`, but change the
+ active Linux process namespaces via calls to `setns(2)` before
+ executing Python.
+
+ The namespaces to use, and the active root file system are taken from
+ the root PID of a running Docker, LXC, LXD, or systemd-nspawn
+ container.
+
+ A program is required only to find the root PID, after which management
+ of the child Python interpreter is handled directly.
+
+ :param str container:
+ Container to connect to.
+ :param str kind:
+ One of ``docker``, ``lxc``, ``lxd`` or ``machinectl``.
+ :param str username:
+ Username within the container to :func:`setuid` to. Defaults to
+ ``root``.
+ :param str docker_path:
+ Filename or complete path to the Docker binary. ``PATH`` will be
+ searched if given as a filename. Defaults to ``docker``.
+ :param str lxc_path:
+ Filename or complete path to the LXD ``lxc`` binary. ``PATH`` will
+ be searched if given as a filename. Defaults to ``lxc``.
+ :param str lxc_info_path:
+ Filename or complete path to the LXC ``lxc-info`` binary. ``PATH``
+ will be searched if given as a filename. Defaults to ``lxc-info``.
+ :param str machinectl_path:
+ Filename or complete path to the ``machinectl`` binary. ``PATH``
+ will be searched if given as a filename. Defaults to
+ ``machinectl``.
+
+.. method:: Router.su (username=None, password=None, su_path=None, password_prompt=None, incorrect_prompts=None, \**kwargs)
+
+ Construct a context on the local machine over a ``su`` invocation. The
+ ``su`` process is started in a newly allocated pseudo-terminal, and
+ supports typing interactive passwords.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str username:
+ Username to pass to ``su``, defaults to ``root``.
+ :param str password:
+ The account password to use if requested.
+ :param str su_path:
+ Filename or complete path to the ``su`` binary. ``PATH`` will be
+ searched if given as a filename. Defaults to ``su``.
+ :param bytes password_prompt:
+ The string that indicates ``su`` is requesting a password. Defaults
+ to ``Password:``.
+ :param str incorrect_prompts:
+ Strings that signal the password is incorrect. Defaults to `("su:
+ sorry", "su: authentication failure")`.
+
+ :raises mitogen.su.PasswordError:
+ A password was requested but none was provided, the supplied
+ password was incorrect, or (on BSD) the target account did not
+ exist.
+
+.. method:: Router.sudo (username=None, sudo_path=None, password=None, \**kwargs)
+
+ Construct a context on the local machine over a ``sudo`` invocation.
+ The ``sudo`` process is started in a newly allocated pseudo-terminal,
+ and supports typing interactive passwords.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str username:
+ Username to pass to sudo as the ``-u`` parameter, defaults to
+ ``root``.
+ :param str sudo_path:
+ Filename or complete path to the sudo binary. ``PATH`` will be
+ searched if given as a filename. Defaults to ``sudo``.
+ :param str password:
+ The password to use if/when sudo requests it. Depending on the sudo
+ configuration, this is either the current account password or the
+ target account password. :class:`mitogen.sudo.PasswordError`
+ will be raised if sudo requests a password but none is provided.
+ :param bool set_home:
+ If :data:`True`, request ``sudo`` set the ``HOME`` environment
+ variable to match the target UNIX account.
+ :param bool preserve_env:
+ If :data:`True`, request ``sudo`` to preserve the environment of
+ the parent process.
+ :param str selinux_type:
+ If not :data:`None`, the SELinux security context to use.
+ :param str selinux_role:
+ If not :data:`None`, the SELinux role to use.
+ :param list sudo_args:
+ Arguments in the style of :data:`sys.argv` that would normally
+ be passed to ``sudo``. The arguments are parsed in-process to set
+ equivalent parameters. Re-parsing ensures unsupported options cause
+ :class:`mitogen.core.StreamError` to be raised, and that
+ attributes of the stream match the actual behaviour of ``sudo``.
+
+.. method:: Router.ssh (hostname, username=None, ssh_path=None, ssh_args=None, port=None, check_host_keys='enforce', password=None, identity_file=None, identities_only=True, compression=True, \**kwargs)
+
+ Construct a remote context over an OpenSSH ``ssh`` invocation.
+
+ The ``ssh`` process is started in a newly allocated pseudo-terminal to
+ support typing interactive passwords and responding to prompts, if a
+ password is specified, or `check_host_keys=accept`. In other scenarios,
+ ``BatchMode`` is enabled and no PTY is allocated. For many-target
+ configurations, both options should be avoided as most systems have a
+ conservative limit on the number of pseudo-terminals that may exist.
+
+ Accepts all parameters accepted by :meth:`local`, in addition to:
+
+ :param str username:
+ The SSH username; default is unspecified, which causes SSH to pick
+ the username to use.
+ :param str ssh_path:
+ Absolute or relative path to ``ssh``. Defaults to ``ssh``.
+ :param list ssh_args:
+ Additional arguments to pass to the SSH command.
+ :param int port:
+ Port number to connect to; default is unspecified, which causes SSH
+ to pick the port number.
+ :param str check_host_keys:
+ Specifies the SSH host key checking mode. Defaults to ``enforce``.
+
+ * ``ignore``: no host key checking is performed. Connections never
+ fail due to an unknown or changed host key.
+ * ``accept``: known hosts keys are checked to ensure they match,
+ new host keys are automatically accepted and verified in future
+ connections.
+ * ``enforce``: known host keys are checked to ensure they match,
+ unknown hosts cause a connection failure.
+ :param str password:
+ Password to type if/when ``ssh`` requests it. If not specified and
+ a password is requested, :class:`mitogen.ssh.PasswordError` is
+ raised.
+ :param str identity_file:
+ Path to an SSH private key file to use for authentication. Default
+ is unspecified, which causes SSH to pick the identity file.
+
+ When this option is specified, only `identity_file` will be used by
+ the SSH client to perform authenticaion; agent authentication is
+ automatically disabled, as is reading the default private key from
+ ``~/.ssh/id_rsa``, or ``~/.ssh/id_dsa``.
+ :param bool identities_only:
+ If :data:`True` and a password or explicit identity file is
+ specified, instruct the SSH client to disable any authentication
+ identities inherited from the surrounding environment, such as
+ those loaded in any running ``ssh-agent``, or default key files
+ present in ``~/.ssh``. This ensures authentication attempts only
+ occur using the supplied password or SSH key.
+ :param bool compression:
+ If :data:`True`, enable ``ssh`` compression support. Compression
+ has a minimal effect on the size of modules transmitted, as they
+ are already compressed, however it has a large effect on every
+ remaining message in the otherwise uncompressed stream protocol,
+ such as function call arguments and return values.
+ :param int ssh_debug_level:
+ Optional integer `0..3` indicating the SSH client debug level.
+ :raises mitogen.ssh.PasswordError:
+ A password was requested but none was specified, or the specified
+ password was incorrect.
+
+ :raises mitogen.ssh.HostKeyError:
+ When `check_host_keys` is set to either ``accept``, indicates a
+ previously recorded key no longer matches the remote machine. When
+ set to ``enforce``, as above, but additionally indicates no
+ previously recorded key exists for the remote machine.
Context Class
=============
.. currentmodule:: mitogen.core
-
-.. class:: Context
-
- Represent a remote context regardless of connection method.
-
- **Note:** This is the somewhat limited core version of the Context class
- used by child contexts. The master subclass is documented below this one.
-
- .. method:: send (msg)
-
- Arrange for `msg` to be delivered to this context.
- :attr:`dst_id ` is set to the target context ID.
-
- :param mitogen.core.Message msg:
- The message.
-
- .. method:: send_async (msg, persist=False)
-
- Arrange for `msg` to be delivered to this context, with replies
- directed to a newly constructed receiver. :attr:`dst_id
- ` is set to the target context ID, and :attr:`reply_to
- ` is set to the newly constructed receiver's handle.
-
- :param bool persist:
- If :data:`False`, the handler will be unregistered after a single
- message has been received.
-
- :param mitogen.core.Message msg:
- The message.
-
- :returns:
- :class:`mitogen.core.Receiver` configured to receive any replies
- sent to the message's `reply_to` handle.
-
- .. method:: send_await (msg, deadline=None)
-
- Like :meth:`send_async`, but expect a single reply (`persist=False`)
- delivered within `deadline` seconds.
-
- :param mitogen.core.Message msg:
- The message.
- :param float deadline:
- If not :data:`None`, seconds before timing out waiting for a reply.
- :returns:
- The deserialized reply.
- :raises mitogen.core.TimeoutError:
- No message was received and `deadline` passed.
+.. autoclass:: Context
+ :members:
.. currentmodule:: mitogen.parent
@@ -853,458 +554,55 @@ Context Class
.. autoclass:: CallChain
:members:
-.. class:: Context
-
- Extend :class:`mitogen.core.Context` with functionality useful to masters,
- and child contexts who later become parents. Currently when this class is
- required, the target context's router is upgraded at runtime.
-
- .. attribute:: default_call_chain
-
- A :class:`CallChain` instance constructed by default, with pipelining
- disabled. :meth:`call`, :meth:`call_async` and :meth:`call_no_reply`
- use this instance.
-
- .. method:: shutdown (wait=False)
-
- Arrange for the context to receive a ``SHUTDOWN`` message, triggering
- graceful shutdown.
-
- Due to a lack of support for timers, no attempt is made yet to force
- terminate a hung context using this method. This will be fixed shortly.
-
- :param bool wait:
- If :data:`True`, block the calling thread until the context has
- completely terminated.
- :returns:
- If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch`
- whose :meth:`get() ` method returns
- :data:`None` when shutdown completes. The `timeout` parameter may
- be used to implement graceful timeouts.
-
- .. method:: call_async (fn, \*args, \*\*kwargs)
-
- See :meth:`CallChain.call_async`.
-
- .. method:: call (fn, \*args, \*\*kwargs)
-
- See :meth:`CallChain.call`.
-
- .. method:: call_no_reply (fn, \*args, \*\*kwargs)
-
- See :meth:`CallChain.call_no_reply`.
+.. autoclass:: Context
+ :members:
Receiver Class
==============
.. currentmodule:: mitogen.core
-
-.. class:: Receiver (router, handle=None, persist=True, respondent=None)
-
- Receivers are used to wait for pickled responses from another context to be
- sent to a handle registered in this context. A receiver may be single-use
- (as in the case of :meth:`mitogen.parent.Context.call_async`) or
- multiple use.
-
- :param mitogen.core.Router router:
- Router to register the handler on.
-
- :param int handle:
- If not :data:`None`, an explicit handle to register, otherwise an
- unused handle is chosen.
-
- :param bool persist:
- If :data:`True`, do not unregister the receiver's handler after the
- first message.
-
- :param mitogen.core.Context respondent:
- Reference to the context this receiver is receiving from. If not
- :data:`None`, arranges for the receiver to receive a dead message if
- messages can no longer be routed to the context, due to disconnection
- or exit.
-
- .. attribute:: notify = None
-
- If not :data:`None`, a reference to a function invoked as
- `notify(receiver)` when a new message is delivered to this receiver.
- Used by :class:`mitogen.select.Select` to implement waiting on
- multiple receivers.
-
- .. py:method:: to_sender ()
-
- Return a :class:`mitogen.core.Sender` configured to deliver messages
- to this receiver. Since a Sender can be serialized, this makes it
- convenient to pass `(context_id, handle)` pairs around::
-
- def deliver_monthly_report(sender):
- for line in open('monthly_report.txt'):
- sender.send(line)
- sender.close()
-
- remote = router.ssh(hostname='mainframe')
- recv = mitogen.core.Receiver(router)
- remote.call(deliver_monthly_report, recv.to_sender())
- for msg in recv:
- print(msg)
-
- .. py:method:: empty ()
-
- Return :data:`True` if calling :meth:`get` would block.
-
- As with :class:`Queue.Queue`, :data:`True` may be returned even
- though a subsequent call to :meth:`get` will succeed, since a
- message may be posted at any moment between :meth:`empty` and
- :meth:`get`.
-
- :meth:`empty` is only useful to avoid a race while installing
- :attr:`notify`:
-
- .. code-block:: python
-
- recv.notify = _my_notify_function
- if not recv.empty():
- _my_notify_function(recv)
-
- # It is guaranteed the receiver was empty after the notification
- # function was installed, or that it was non-empty and the
- # notification function was invoked at least once.
-
- .. py:method:: close ()
-
- Cause :class:`mitogen.core.ChannelError` to be raised in any thread
- waiting in :meth:`get` on this receiver.
-
- .. py:method:: get (timeout=None)
-
- Sleep waiting for a message to arrive on this receiver.
-
- :param float timeout:
- If not :data:`None`, specifies a timeout in seconds.
-
- :raises mitogen.core.ChannelError:
- The remote end indicated the channel should be closed, or
- communication with its parent context was lost.
-
- :raises mitogen.core.TimeoutError:
- Timeout was reached.
-
- :returns:
- `(msg, data)` tuple, where `msg` is the
- :class:`mitogen.core.Message` that was received, and `data` is
- its unpickled data part.
-
- .. py:method:: get_data (timeout=None)
-
- Like :meth:`get`, except only return the data part.
-
- .. py:method:: __iter__ ()
-
- Block and yield `(msg, data)` pairs delivered to this receiver until
- :class:`mitogen.core.ChannelError` is raised.
+.. autoclass:: Receiver
+ :members:
Sender Class
============
.. currentmodule:: mitogen.core
-
-.. class:: Sender (context, dst_handle)
-
- Senders are used to send pickled messages to a handle in another context,
- it is the inverse of :class:`mitogen.core.Sender`.
-
- Senders may be serialized, making them convenient to wire up data flows.
- See :meth:`mitogen.core.Receiver.to_sender` for more information.
-
- :param mitogen.core.Context context:
- Context to send messages to.
- :param int dst_handle:
- Destination handle to send messages to.
-
- .. py:method:: close ()
-
- Send a dead message to the remote end, causing :meth:`ChannelError`
- to be raised in any waiting thread.
-
- .. py:method:: send (data)
-
- Send `data` to the remote end.
+.. autoclass:: Sender
+ :members:
Select Class
============
.. module:: mitogen.select
-
.. currentmodule:: mitogen.select
-
-.. class:: Select (receivers=(), oneshot=True)
-
- Support scatter/gather asynchronous calls and waiting on multiple
- receivers, channels, and sub-Selects. Accepts a sequence of
- :class:`mitogen.core.Receiver` or :class:`mitogen.select.Select`
- instances and returns the first value posted to any receiver or select.
-
- If `oneshot` is :data:`True`, then remove each receiver as it yields a
- result; since :meth:`__iter__` terminates once the final receiver is
- removed, this makes it convenient to respond to calls made in parallel:
-
- .. code-block:: python
-
- total = 0
- recvs = [c.call_async(long_running_operation) for c in contexts]
-
- for msg in mitogen.select.Select(recvs):
- print('Got %s from %s' % (msg, msg.receiver))
- total += msg.unpickle()
-
- # Iteration ends when last Receiver yields a result.
- print('Received total %s from %s receivers' % (total, len(recvs)))
-
- :class:`Select` may drive a long-running scheduler:
-
- .. code-block:: python
-
- with mitogen.select.Select(oneshot=False) as select:
- while running():
- for msg in select:
- process_result(msg.receiver.context, msg.unpickle())
- for context, workfunc in get_new_work():
- select.add(context.call_async(workfunc))
-
- :class:`Select` may be nested:
-
- .. code-block:: python
-
- subselects = [
- mitogen.select.Select(get_some_work()),
- mitogen.select.Select(get_some_work()),
- mitogen.select.Select([
- mitogen.select.Select(get_some_work()),
- mitogen.select.Select(get_some_work())
- ])
- ]
-
- for msg in mitogen.select.Select(selects):
- print(msg.unpickle())
-
- .. py:classmethod:: all (it)
-
- Take an iterable of receivers and retrieve a :class:`Message` from
- each, returning the result of calling `msg.unpickle()` on each in turn.
- Results are returned in the order they arrived.
-
- This is sugar for handling batch
- :meth:`Context.call_async `
- invocations:
-
- .. code-block:: python
-
- print('Total disk usage: %.02fMiB' % (sum(
- mitogen.select.Select.all(
- context.call_async(get_disk_usage)
- for context in contexts
- ) / 1048576.0
- ),))
-
- However, unlike in a naive comprehension such as:
-
- .. code-block:: python
-
- recvs = [c.call_async(get_disk_usage) for c in contexts]
- sum(recv.get().unpickle() for recv in recvs)
-
- Result processing happens in the order results arrive, rather than the
- order requests were issued, so :meth:`all` should always be faster.
-
- .. py:method:: get (timeout=None, block=True)
-
- Fetch the next available value from any receiver, or raise
- :class:`mitogen.core.TimeoutError` if no value is available within
- `timeout` seconds.
-
- On success, the message's :attr:`receiver
- ` attribute is set to the receiver.
-
- :param float timeout:
- Timeout in seconds.
- :param bool block:
- If :data:`False`, immediately raise
- :class:`mitogen.core.TimeoutError` if the select is empty.
- :return:
- :class:`mitogen.core.Message`
- :raises mitogen.core.TimeoutError:
- Timeout was reached.
- :raises mitogen.core.LatchError:
- :meth:`close` has been called, and the underlying latch is no
- longer valid.
-
- .. py:method:: __bool__ ()
-
- Return :data:`True` if any receivers are registered with this select.
-
- .. py:method:: close ()
-
- Remove the select's notifier function from each registered receiver,
- mark the associated latch as closed, and cause any thread currently
- sleeping in :meth:`get` to be woken with
- :class:`mitogen.core.LatchError`.
-
- This is necessary to prevent memory leaks in long-running receivers. It
- is called automatically when the Python :keyword:`with` statement is
- used.
-
- .. py:method:: empty ()
-
- Return :data:`True` if calling :meth:`get` would block.
-
- As with :class:`Queue.Queue`, :data:`True` may be returned even
- though a subsequent call to :meth:`get` will succeed, since a
- message may be posted at any moment between :meth:`empty` and
- :meth:`get`.
-
- :meth:`empty` may return :data:`False` even when :meth:`get`
- would block if another thread has drained a receiver added to this
- select. This can be avoided by only consuming each receiver from a
- single thread.
-
- .. py:method:: __iter__ (self)
-
- Yield the result of :meth:`get` until no receivers remain in the
- select, either because `oneshot` is :data:`True`, or each receiver was
- explicitly removed via :meth:`remove`.
-
- .. py:method:: add (recv)
-
- Add the :class:`mitogen.core.Receiver` or
- :class:`mitogen.core.Channel` `recv` to the select.
-
- .. py:method:: remove (recv)
-
- Remove the :class:`mitogen.core.Receiver` or
- :class:`mitogen.core.Channel` `recv` from the select. Note that if
- the receiver has notified prior to :meth:`remove`, then it will
- still be returned by a subsequent :meth:`get`. This may change in a
- future version.
+.. autoclass:: Select
+ :members:
Channel Class
=============
.. currentmodule:: mitogen.core
+.. autoclass:: Channel
+ :members:
-.. class:: Channel (router, context, dst_handle, handle=None)
-
- A channel inherits from :class:`mitogen.core.Sender` and
- `mitogen.core.Receiver` to provide bidirectional functionality.
-
- Since all handles aren't known until after both ends are constructed, for
- both ends to communicate through a channel, it is necessary for one end to
- retrieve the handle allocated to the other and reconfigure its own channel
- to match. Currently this is a manual task.
Broker Class
============
.. currentmodule:: mitogen.core
-.. class:: Broker
-
- Responsible for handling I/O multiplexing in a private thread.
-
- **Note:** This is the somewhat limited core version of the Broker class
- used by child contexts. The master subclass is documented below.
-
- .. attribute:: shutdown_timeout = 3.0
-
- Seconds grace to allow :class:`streams ` to shutdown
- gracefully before force-disconnecting them during :meth:`shutdown`.
-
- .. method:: defer (func, \*args, \*kwargs)
-
- Arrange for `func(\*args, \**kwargs)` to be executed on the broker
- thread, or immediately if the current thread is the broker thread. Safe
- to call from any thread.
-
- .. method:: start_receive (stream)
-
- Mark the :attr:`receive_side ` on `stream` as
- ready for reading. Safe to call from any thread. When the associated
- file descriptor becomes ready for reading,
- :meth:`BasicStream.on_receive` will be called.
-
- .. method:: stop_receive (stream)
-
- Mark the :attr:`receive_side ` on `stream` as
- not ready for reading. Safe to call from any thread.
-
- .. method:: _start_transmit (stream)
-
- Mark the :attr:`transmit_side ` on `stream` as
- ready for writing. Must only be called from the Broker thread. When the
- associated file descriptor becomes ready for writing,
- :meth:`BasicStream.on_transmit` will be called.
-
- .. method:: stop_receive (stream)
-
- Mark the :attr:`transmit_side ` on `stream` as
- not ready for writing. Safe to call from any thread.
-
- .. method:: shutdown
-
- Request broker gracefully disconnect streams and stop.
-
- .. method:: join
-
- Wait for the broker to stop, expected to be called after
- :meth:`shutdown`.
-
- .. method:: keep_alive
-
- Return :data:`True` if any reader's :attr:`Side.keep_alive`
- attribute is :data:`True`, or any
- :class:`Context ` is still
- registered that is not the master. Used to delay shutdown while some
- important work is in progress (e.g. log draining).
-
- **Internal Methods**
-
- .. method:: _broker_main
-
- Handle events until :meth:`shutdown`. On shutdown, invoke
- :meth:`Stream.on_shutdown` for every active stream, then allow up to
- :attr:`shutdown_timeout` seconds for the streams to unregister
- themselves before forcefully calling
- :meth:`Stream.on_disconnect`.
+.. autoclass:: Broker
+ :members:
.. currentmodule:: mitogen.master
-.. class:: Broker (install_watcher=True)
-
- .. note::
-
- You may construct as many brokers as desired, and use the same broker
- for multiple routers, however usually only one broker need exist.
- Multiple brokers may be useful when dealing with sets of children with
- differing lifetimes. For example, a subscription service where
- non-payment results in termination for one customer.
-
- :param bool install_watcher:
- If :data:`True`, an additional thread is started to monitor the
- lifetime of the main thread, triggering :meth:`shutdown`
- automatically in case the user forgets to call it, or their code
- crashed.
-
- You should not rely on this functionality in your program, it is only
- intended as a fail-safe and to simplify the API for new users. In
- particular, alternative Python implementations may not be able to
- support watching the main thread.
-
- .. attribute:: shutdown_timeout = 5.0
-
- Seconds grace to allow :class:`streams ` to shutdown
- gracefully before force-disconnecting them during :meth:`shutdown`.
+.. autoclass:: Broker
+ :members:
Utility Functions
@@ -1315,64 +613,14 @@ Utility Functions
A random assortment of utility functions useful on masters and children.
.. currentmodule:: mitogen.utils
-.. function:: cast (obj)
-
- Many tools love to subclass built-in types in order to implement useful
- functionality, such as annotating the safety of a Unicode string, or adding
- additional methods to a dict. However, cPickle loves to preserve those
- subtypes during serialization, resulting in CallError during :meth:`call
- ` in the target when it tries to deserialize
- the data.
-
- This function walks the object graph `obj`, producing a copy with any
- custom sub-types removed. The functionality is not default since the
- resulting walk may be computationally expensive given a large enough graph.
-
- See :ref:`serialization-rules` for a list of supported types.
-
- :param obj:
- Object to undecorate.
- :returns:
- Undecorated object.
-
-.. currentmodule:: mitogen.utils
-.. function:: disable_site_packages
-
- Remove all entries mentioning ``site-packages`` or ``Extras`` from the
- system path. Used primarily for testing on OS X within a virtualenv, where
- OS X bundles some ancient version of the :mod:`six` module.
-
-.. currentmodule:: mitogen.utils
-.. function:: log_to_file (path=None, io=False, level='INFO')
-
- Install a new :class:`logging.Handler` writing applications logs to the
- filesystem. Useful when debugging slave IO problems.
-
- Parameters to this function may be overridden at runtime using environment
- variables. See :ref:`logging-env-vars`.
+.. autofunction:: cast
- :param str path:
- If not :data:`None`, a filesystem path to write logs to. Otherwise,
- logs are written to :data:`sys.stderr`.
-
- :param bool io:
- If :data:`True`, include extremely verbose IO logs in the output.
- Useful for debugging hangs, less useful for debugging application code.
-
- :param str level:
- Name of the :mod:`logging` package constant that is the minimum
- level to log at. Useful levels are ``DEBUG``, ``INFO``, ``WARNING``,
- and ``ERROR``.
.. currentmodule:: mitogen.utils
-.. function:: run_with_router(func, \*args, \**kwargs)
-
- Arrange for `func(router, \*args, \**kwargs)` to run with a temporary
- :class:`mitogen.master.Router`, ensuring the Router and Broker are
- correctly shut down during normal or exceptional return.
-
- :returns:
- `func`'s return value.
+.. autofunction:: setup_gil
+.. autofunction:: disable_site_packages
+.. autofunction:: log_to_file
+.. autofunction:: run_with_router(func, \*args, \**kwargs)
.. currentmodule:: mitogen.utils
.. decorator:: with_router
diff --git a/docs/changelog.rst b/docs/changelog.rst
index 099b253b..bcca6088 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -15,6 +15,528 @@ Release Notes
+.. _known_issues:
+
+Known Issues
+------------
+
+Mitogen For Ansible
+~~~~~~~~~~~~~~~~~~~
+
+* The Ansible 2.7 `reboot
+ `_ module
+ may require a ``pre_reboot_delay`` on systemd hosts, as insufficient time
+ exists for the reboot command's exit status to be reported before necessary
+ processes are torn down.
+
+* On OS X when a SSH password is specified and the default connection type of
+ ``smart`` is used, Ansible may select the Paramiko plug-in rather than
+ Mitogen. If you specify a password on OS X, ensure ``connection: ssh``
+ appears in your playbook, ``ansible.cfg``, or as ``-c ssh`` on the
+ command-line.
+
+* The ``raw`` action executes as a regular Mitogen connection, which requires
+ Python on the target, precluding its use for installing Python. This will be
+ addressed in a future 0.2 release. For now, simply mix Mitogen and vanilla
+ Ansible strategies in your playbook:
+
+ .. code-block:: yaml
+
+ - hosts: web-servers
+ strategy: linear
+ tasks:
+ - name: Install Python if necessary.
+ raw: test -e /usr/bin/python || apt install -y python-minimal
+
+ - hosts: web-servers
+ strategy: mitogen_linear
+ roles:
+ - nginx
+ - initech_app
+ - y2k_fix
+
+.. * When running with ``-vvv``, log messages will be printed to the console
+ *after* the Ansible run completes, as connection multiplexer shutdown only
+ begins after Ansible exits. This is due to a lack of suitable shutdown hook
+ in Ansible, and is fairly harmless, albeit cosmetically annoying. A future
+ release may include a solution.
+
+.. * Configurations will break that rely on the `hashbang argument splitting
+ behaviour `_ of the
+ ``ansible_python_interpreter`` setting, contrary to the Ansible
+ documentation. This will be addressed in a future 0.2 release.
+
+* Performance does not scale linearly with target count. This requires
+ significant additional work, as major bottlenecks exist in the surrounding
+ Ansible code. Performance-related bug reports for any scenario remain
+ welcome with open arms.
+
+* Performance on Python 3 is significantly worse than on Python 2. While this
+ has not yet been investigated, at least some of the regression appears to be
+ part of the core library, and should therefore be straightforward to fix as
+ part of 0.2.x.
+
+* *Module Replacer* style Ansible modules are not supported.
+
+* Actions are single-threaded for each `(host, user account)` combination,
+ including actions that execute on the local machine. Playbooks may experience
+ slowdown compared to vanilla Ansible if they employ long-running
+ ``local_action`` or ``delegate_to`` tasks delegating many target hosts to a
+ single machine and user account.
+
+* Connection Delegation remains in preview and has bugs around how it infers
+ connections. Connection establishment will remain single-threaded for the 0.2
+ series, however connection inference bugs will be addressed in a future 0.2
+ release.
+
+* Connection Delegation does not support automatic tunnelling of SSH-dependent
+ actions, such as the ``synchronize`` module. This will be addressed in the
+ 0.3 series.
+
+
+Core Library
+~~~~~~~~~~~~
+
+* Serialization is still based on :mod:`pickle`. While there is high confidence
+ remote code execution is impossible in Mitogen's configuration, an untrusted
+ context may at least trigger disproportionately high memory usage injecting
+ small messages (*"billion laughs attack"*). Replacement is an important
+ future priority, but not critical for an initial release.
+
+* Child processes are not reliably reaped, leading to a pileup of zombie
+ processes when a program makes many short-lived connections in a single
+ invocation. This does not impact Mitogen for Ansible, however it limits the
+ usefulness of the core library. A future 0.2 release will address it.
+
+* Some races remain around :class:`mitogen.core.Broker ` destruction,
+ disconnection and corresponding file descriptor closure. These are only
+ problematic in situations where child process reaping is also problematic.
+
+* The `fakessh` component does not shut down correctly and requires flow
+ control added to the design. While minimal fixes are possible, due to the
+ absence of flow control the original design is functionally incomplete.
+
+* The multi-threaded :ref:`service` remains in a state of design flux and
+ should be considered obsolete, despite heavy use in Mitogen for Ansible. A
+ future replacement may be integrated more tightly with, or entirely replace
+ the RPC dispatcher on the main thread.
+
+* Documentation is in a state of disrepair. This will be improved over the 0.2
+ series.
+
+
+v0.2.4 (2018-??-??)
+-------------------
+
+Mitogen for Ansible
+~~~~~~~~~~~~~~~~~~~
+
+This release includes a huge variety of important fixes and new optimizations.
+It is 35% faster than 0.2.3 on a synthetic 64 target run that places heavy load
+on the connection multiplexer.
+
+Enhancements
+^^^^^^^^^^^^
+
+* `#76 `_,
+ `#351 `_,
+ `#352 `_: disconnect propagation
+ has improved, allowing Ansible to cancel waits for responses from abruptly
+ disconnected targets. This ensures a task will reliably fail rather than
+ hang, for example on network failure or EC2 instance maintenance.
+
+* `#369 `_,
+ `#407 `_: :meth:`Connection.reset`
+ is implemented, allowing `meta: reset_connection
+ `_ to shut
+ down the remote interpreter as documented, and improving support for the
+ `reboot
+ `_
+ module.
+
+* `09aa27a6 `_: the
+ ``mitogen_host_pinned`` strategy wraps the ``host_pinned`` strategy
+ introduced in Ansible 2.7.
+
+* `#477 `_: Python 2.4 is fully
+ supported by the core library and tested automatically, in any parent/child
+ combination of 2.4, 2.6, 2.7 and 3.6 interpreters.
+
+* `#477 `_: Ansible 2.3 is fully
+ supported and tested automatically. In combination with the core library
+ Python 2.4 support, this allows Red Hat Enterprise Linux 5 targets to be
+ managed with Mitogen. The ``simplejson`` package need not be installed on
+ such targets, as is usually required by Ansible.
+
+* `#412 `_: to simplify diagnosing
+ connection configuration problems, Mitogen ships a ``mitogen_get_stack``
+ action that is automatically added to the action plug-in path. See
+ :ref:`mitogen-get-stack` for more information.
+
+* `152effc2 `_,
+ `bd4b04ae `_: a CPU affinity
+ policy was added for Linux controllers, reducing latency and SMP overhead on
+ hot paths exercised for every task. This yielded a 19% speedup in a 64-target
+ job composed of many short tasks, and should easily be visible as a runtime
+ improvement in many-host runs.
+
+* `2b44d598 `_: work around a
+ defective caching mechanism by pre-heating it before spawning workers. This
+ saves 40% runtime on a synthetic repetitive task.
+
+* `0979422a `_: an expensive
+ dependency scanning step was redundantly invoked for every task,
+ bottlenecking the connection multiplexer.
+
+* `eaa990a97 `_: a new
+ ``mitogen_ssh_compression`` variable is supported, allowing Mitogen's default
+ SSH compression to be disabled. SSH compression is a large contributor to CPU
+ usage in many-target runs, and severely limits file transfer. On a `"shell:
+ hostname"` task repeated 500 times, Mitogen requires around 800 bytes per
+ task with compression, rising to 3 KiB without. File transfer throughput
+ rises from ~25MiB/s when enabled to ~200MiB/s when disabled.
+
+* `#260 `_,
+ `a18a083c `_: brokers no
+ longer wait for readiness indication to transmit, and instead assume
+ transmission will succeed. As this is usually true, one loop iteration and
+ two poller reconfigurations are avoided, yielding a significant reduction in
+ interprocess round-trip latency.
+
+* `#415 `_,
+ `#491 `_,
+ `#493 `_: the interface employed
+ for in-process queues changed from `kqueue
+ `_ / `epoll
+ `_ to `poll()
+ `_, which requires no setup
+ or teardown, yielding a 38% latency reduction for inter-thread communication.
+
+
+Fixes
+^^^^^
+
+* `#251 `_,
+ `#359 `_,
+ `#396 `_,
+ `#401 `_,
+ `#404 `_,
+ `#412 `_,
+ `#434 `_,
+ `#436 `_,
+ `#465 `_: connection delegation and
+ ``delegate_to:`` handling suffered a major regression in 0.2.3. The 0.2.2
+ behaviour has been restored, and further work has been made to improve the
+ compatibility of connection delegation's configuration building methods.
+
+* `#323 `_,
+ `#333 `_: work around a Windows
+ Subsystem for Linux bug that caused tracebacks to appear during shutdown.
+
+* `#334 `_: the SSH method
+ tilde-expands private key paths using Ansible's logic. Previously the path
+ was passed unmodified to SSH, which expanded it using :func:`pwd.getpwnam`.
+ This differs from :func:`os.path.expanduser`, which uses the ``HOME``
+ environment variable if it is set, causing behaviour to diverge when Ansible
+ was invoked across user accounts via ``sudo``.
+
+* `#364 `_: file transfers from
+ controllers running Python 2.7.2 or earlier could be interrupted due to a
+ forking bug in the :mod:`tempfile` module.
+
+* `#370 `_: the Ansible
+ `reboot `_
+ module is supported.
+
+* `#373 `_: the LXC and LXD methods
+ print a useful hint on failure, as no useful error is normally logged to the
+ console by these tools.
+
+* `#374 `_,
+ `#391 `_: file transfer and module
+ execution from 2.x controllers to 3.x targets was broken due to a regression
+ caused by refactoring, and compounded by `#426
+ `_.
+
+* `#400 `_: work around a threading
+ bug in the AWX display callback when running with high verbosity setting.
+
+* `#409 `_: the setns method was
+ silently broken due to missing tests. Basic coverage was added to prevent a
+ recurrence.
+
+* `#409 `_: the LXC and LXD methods
+ support ``mitogen_lxc_path`` and ``mitogen_lxc_attach_path`` variables to
+ control the location of third pary utilities.
+
+* `#410 `_: the sudo method supports
+ the SELinux ``--type`` and ``--role`` options.
+
+* `#420 `_: if a :class:`Connection`
+ was constructed in the Ansible top-level process, for example while executing
+ ``meta: reset_connection``, resources could become undesirably shared in
+ subsequent children.
+
+* `#426 `_: an oversight while
+ porting to Python 3 meant no automated 2->3 tests were running. A significant
+ number of 2->3 bugs were fixed, mostly in the form of Unicode/bytes
+ mismatches.
+
+* `#429 `_: the ``sudo`` method can
+ now recognize internationalized password prompts.
+
+* `#362 `_,
+ `#435 `_: the previous fix for slow
+ Python 2.x subprocess creation on Red Hat caused newly spawned children to
+ have a reduced open files limit. A more intrusive fix has been added to
+ directly address the problem without modifying the subprocess environment.
+
+* `#397 `_,
+ `#454 `_: the previous approach to
+ handling modern Ansible temporary file cleanup was too aggressive, and could
+ trigger early finalization of Cython-based extension modules, leading to
+ segmentation faults.
+
+* `#499 `_: the ``allow_same_user``
+ Ansible configuration setting is respected.
+
+* `#527 `_: crashes in modules are
+ trapped and reported in a manner that matches Ansible. In particular, a
+ module crash no longer leads to an exception that may crash the corresponding
+ action plug-in.
+
+* `dc1d4251 `_: the
+ ``synchronize`` module could fail with the Docker transport due to a missing
+ attribute.
+
+* `599da068 `_: fix a race
+ when starting async tasks, where it was possible for the controller to
+ observe no status file on disk before the task had a chance to write one.
+
+* `2c7af9f04 `_: Ansible
+ modules were repeatedly re-transferred. The bug was hidden by the previously
+ mandatorily enabled SSH compression.
+
+
+Core Library
+~~~~~~~~~~~~
+
+* `#76 `_: routing records the
+ destination context IDs ever received on each stream, and when disconnection
+ occurs, propagates :data:`mitogen.core.DEL_ROUTE` messages towards every
+ stream that ever communicated with the disappearing peer, rather than simply
+ towards parents. Conversations between nodes anywhere in the tree receive
+ :data:`mitogen.core.DEL_ROUTE` when either participant disconnects, allowing
+ receivers to wake with :class:`mitogen.core.ChannelError`, even when one
+ participant is not a parent of the other.
+
+* `#109 `_,
+ `57504ba6 `_: newer Python 3
+ releases explicitly populate :data:`sys.meta_path` with importer internals,
+ causing Mitogen to install itself at the end of the importer chain rather
+ than the front.
+
+* `#310 `_: support has returned for
+ trying to figure out the real source of non-module objects installed in
+ :data:`sys.modules`, so they can be imported. This is needed to handle syntax
+ sugar used by packages like :mod:`plumbum`.
+
+* `#349 `_: an incorrect format
+ string could cause large stack traces when attempting to import built-in
+ modules on Python 3.
+
+* `#387 `_,
+ `#413 `_: dead messages include an
+ optional reason in their body. This is used to cause
+ :class:`mitogen.core.ChannelError` to report far more useful diagnostics at
+ the point the error occurs that previously would have been buried in debug
+ log output from an unrelated context.
+
+* `#408 `_: a variety of fixes were
+ made to restore Python 2.4 compatibility.
+
+* `#399 `_,
+ `#437 `_: ignore a
+ :class:`DeprecationWarning` to avoid failure of the ``su`` method on Python
+ 3.7.
+
+* `#405 `_: if an oversized message
+ is rejected, and it has a ``reply_to`` set, a dead message is returned to the
+ sender. This ensures function calls exceeding the configured maximum size
+ crash rather than hang.
+
+* `#406 `_:
+ :class:`mitogen.core.Broker` did not call :meth:`mitogen.core.Poller.close`
+ during shutdown, leaking the underlying poller FD in masters and parents.
+
+* `#406 `_: connections could leak
+ FDs when a child process failed to start.
+
+* `#288 `_,
+ `#406 `_,
+ `#417 `_: connections could leave
+ FD wrapper objects that had not been closed lying around to be closed during
+ garbage collection, causing reused FD numbers to be closed at random moments.
+
+* `#411 `_: the SSH method typed
+ "``y``" rather than the requisite "``yes``" when `check_host_keys="accept"`
+ was configured. This would lead to connection timeouts due to the hung
+ response.
+
+* `#414 `_,
+ `#425 `_: avoid deadlock of forked
+ children by reinitializing the :mod:`mitogen.service` pool lock.
+
+* `#416 `_: around 1.4KiB of memory
+ was leaked on every RPC, due to a list of strong references keeping alive any
+ handler ever registered for disconnect notification.
+
+* `#418 `_: the
+ :func:`mitogen.parent.iter_read` helper would leak poller FDs, because
+ execution of its :keyword:`finally` block was delayed on Python 3. Now
+ callers explicitly close the generator when finished.
+
+* `#422 `_: the fork method could
+ fail to start if :data:`sys.stdout` was opened in block buffered mode, and
+ buffered data was pending in the parent prior to fork.
+
+* `#438 `_: a descriptive error is
+ logged when stream corruption is detected.
+
+* `#439 `_: descriptive errors are
+ raised when attempting to invoke unsupported function types.
+
+* `#444 `_: messages regarding
+ unforwardable extension module are no longer logged as errors.
+
+* `#445 `_: service pools unregister
+ the :data:`mitogen.core.CALL_SERVICE` handle at shutdown, ensuring any
+ outstanding messages are either processed by the pool as it shuts down, or
+ have dead messages sent in reply to them, preventing peer contexts from
+ hanging due to a forgotten buffered message.
+
+* `#446 `_: given thread A calling
+ :meth:`mitogen.core.Receiver.close`, and thread B, C, and D sleeping in
+ :meth:`mitogen.core.Receiver.get`, previously only one sleeping thread would
+ be woken with :class:`mitogen.core.ChannelError` when the receiver was
+ closed. Now all threads are woken per the docstring.
+
+* `#447 `_: duplicate attempts to
+ invoke :meth:`mitogen.core.Router.add_handler` cause an error to be raised,
+ ensuring accidental re-registration of service pools are reported correctly.
+
+* `#448 `_: the import hook
+ implementation now raises :class:`ModuleNotFoundError` instead of
+ :class:`ImportError` in Python 3.6 and above, to cope with an upcoming
+ version of the :mod:`subprocess` module requiring this new subclass to be
+ raised.
+
+* `#453 `_: the loggers used in
+ children for standard IO redirection have propagation disabled, preventing
+ accidental reconfiguration of the :mod:`logging` package in a child from
+ setting up a feedback loop.
+
+* `#456 `_: a descriptive error is
+ logged when :meth:`mitogen.core.Broker.defer` is called after the broker has
+ shut down, preventing new messages being enqueued that will never be sent,
+ and subsequently producing a program hang.
+
+* `#459 `_: the beginnings of a
+ :meth:`mitogen.master.Router.get_stats` call has been added. The initial
+ statistics cover the module loader only.
+
+* `#462 `_: Mitogen could fail to
+ open a PTY on broken Linux systems due to a bad interaction between the glibc
+ :func:`grantpt` function and an incorrectly mounted ``/dev/pts`` filesystem.
+ Since correct group ownership is not required in most scenarios, when this
+ problem is detected, the PTY is allocated and opened directly by the library.
+
+* `#479 `_: Mitogen could fail to
+ import :mod:`__main__` on Python 3.4 and newer due to a breaking change in
+ the :mod:`pkgutil` API. The program's main script is now handled specially.
+
+* `#481 `_: the version of `sudo`
+ that shipped with CentOS 5 replaced itself with the program to be executed,
+ and therefore did not hold any child PTY open on our behalf. The child
+ context is updated to preserve any PTY FD in order to avoid the kernel
+ sending `SIGHUP` early during startup.
+
+* `#523 `_: the test suite didn't
+ generate a code coverage report if any test failed.
+
+* `#524 `_: Python 3.6+ emitted a
+ :class:`DeprecationWarning` for :func:`mitogen.utils.run_with_router`.
+
+* `#529 `_: Code coverage of the
+ test suite was not measured across all Python versions.
+
+* `16ca111e `_: handle OpenSSH
+ 7.5 permission denied prompts when ``~/.ssh/config`` rewrites are present.
+
+* `9ec360c2 `_: a new
+ :meth:`mitogen.core.Broker.defer_sync` utility function is provided.
+
+* `f20e0bba `_:
+ :meth:`mitogen.service.FileService.register_prefix` permits granting
+ unprivileged access to whole filesystem subtrees, rather than single files at
+ a time.
+
+* `8f85ee03 `_:
+ :meth:`mitogen.core.Router.myself` returns a :class:`mitogen.core.Context`
+ referring to the current process.
+
+* `824c7931 `_: exceptions
+ raised by the import hook were updated to include probable reasons for
+ a failure.
+
+* `57b652ed `_: a stray import
+ meant an extra roundtrip and ~4KiB of data was wasted for any context that
+ imported :mod:`mitogen.parent`.
+
+
+Thanks!
+~~~~~~~
+
+Mitogen would not be possible without the support of users. A huge thanks for
+bug reports, testing, features and fixes in this release contributed by
+`Alex Willmer `_,
+`Andreas Krüger `_,
+`Anton Stroganov `_,
+`Berend De Schouwer `_,
+`Brian Candler `_,
+`dsgnr `_,
+`Duane Zamrok `_,
+`Eric Chang `_,
+`Gerben Meijer `_,
+`Guy Knights `_,
+`Jesse London `_,
+`Jiří Vávra `_,
+`Johan Beisser `_,
+`Jonathan Rosser `_,
+`Josh Smift `_,
+`Kevin Carter `_,
+`Mehdi `_,
+`Michael DeHaan `_,
+`Michal Medvecky `_,
+`Mohammed Naser `_,
+`Peter V. Saveliev `_,
+`Pieter Avonts `_,
+`Ross Williams `_,
+`Sergey `_,
+`Stéphane `_,
+`Strahinja Kustudic `_,
+`Tom Parker-Shemilt `_,
+`Younès HAFRI `_,
+`@killua-eu `_,
+`@myssa91 `_,
+`@ohmer1 `_,
+`@s3c70r `_,
+`@syntonym `_,
+`@trim777 `_,
+`@whky `_, and
+`@yodatak `_.
+
+
v0.2.3 (2018-10-23)
-------------------
@@ -217,7 +739,7 @@ Thanks!
~~~~~~~
Mitogen would not be possible without the support of users. A huge thanks for
-bug reports, features and fixes in this release contributed by
+bug reports, testing, features and fixes in this release contributed by
`Alex Russu `_,
`Alex Willmer `_,
`atoom `_,
@@ -398,69 +920,6 @@ Mitogen for Ansible
* Built-in file transfer compatible with connection delegation.
-**Known Issues**
-
-* The ``raw`` action executes as a regular Mitogen connection, which requires
- Python on the target, precluding its use for installing Python. This will be
- addressed in a future 0.2 release. For now, simply mix Mitogen and vanilla
- Ansible strategies in your playbook:
-
- .. code-block:: yaml
-
- - hosts: web-servers
- strategy: linear
- tasks:
- - name: Install Python if necessary.
- raw: test -e /usr/bin/python || apt install -y python-minimal
-
- - hosts: web-servers
- strategy: mitogen_linear
- roles:
- - nginx
- - initech_app
- - y2k_fix
-
-.. * When running with ``-vvv``, log messages will be printed to the console
- *after* the Ansible run completes, as connection multiplexer shutdown only
- begins after Ansible exits. This is due to a lack of suitable shutdown hook
- in Ansible, and is fairly harmless, albeit cosmetically annoying. A future
- release may include a solution.
-
-.. * Configurations will break that rely on the `hashbang argument splitting
- behaviour `_ of the
- ``ansible_python_interpreter`` setting, contrary to the Ansible
- documentation. This will be addressed in a future 0.2 release.
-
-* The Ansible 2.7 ``reboot`` module is not yet supported.
-
-* Performance does not scale linearly with target count. This requires
- significant additional work, as major bottlenecks exist in the surrounding
- Ansible code. Performance-related bug reports for any scenario remain
- welcome with open arms.
-
-* Performance on Python 3 is significantly worse than on Python 2. While this
- has not yet been investigated, at least some of the regression appears to be
- part of the core library, and should therefore be straightforward to fix as
- part of 0.2.x.
-
-* *Module Replacer* style Ansible modules are not supported.
-
-* Actions are single-threaded for each `(host, user account)` combination,
- including actions that execute on the local machine. Playbooks may experience
- slowdown compared to vanilla Ansible if they employ long-running
- ``local_action`` or ``delegate_to`` tasks delegating many target hosts to a
- single machine and user account.
-
-* Connection Delegation remains in preview and has bugs around how it infers
- connections. Connection establishment will remain single-threaded for the 0.2
- series, however connection inference bugs will be addressed in a future 0.2
- release.
-
-* Connection Delegation does not support automatic tunnelling of SSH-dependent
- actions, such as the ``synchronize`` module. This will be addressed in the
- 0.3 series.
-
-
Core Library
~~~~~~~~~~~~
@@ -473,33 +932,3 @@ Core Library
Windows Subsystem for Linux explicitly supported.
* Automatic tests covering Python 2.6, 2.7 and 3.6 on Linux only.
-
-
-**Known Issues**
-
-* Serialization is still based on :mod:`pickle`. While there is high confidence
- remote code execution is impossible in Mitogen's configuration, an untrusted
- context may at least trigger disproportionately high memory usage injecting
- small messages (*"billion laughs attack"*). Replacement is an important
- future priority, but not critical for an initial release.
-
-* Child processes are not reliably reaped, leading to a pileup of zombie
- processes when a program makes many short-lived connections in a single
- invocation. This does not impact Mitogen for Ansible, however it limits the
- usefulness of the core library. A future 0.2 release will address it.
-
-* Some races remain around :class:`mitogen.core.Broker ` destruction,
- disconnection and corresponding file descriptor closure. These are only
- problematic in situations where child process reaping is also problematic.
-
-* The `fakessh` component does not shut down correctly and requires flow
- control added to the design. While minimal fixes are possible, due to the
- absence of flow control the original design is functionally incomplete.
-
-* The multi-threaded :ref:`service` remains in a state of design flux and
- should be considered obsolete, despite heavy use in Mitogen for Ansible. A
- future replacement may be integrated more tightly with, or entirely replace
- the RPC dispatcher on the main thread.
-
-* Documentation is in a state of disrepair. This will be improved over the 0.2
- series.
diff --git a/docs/examples.rst b/docs/examples.rst
index e3414c80..c75d8f70 100644
--- a/docs/examples.rst
+++ b/docs/examples.rst
@@ -137,6 +137,9 @@ We could instead express the above using Mitogen:
::
+ import shutil, os, subprocess
+ import mitogen
+
def run(*args):
return subprocess.check_call(args)
@@ -144,22 +147,24 @@ We could instead express the above using Mitogen:
with open(path, 'rb') as fp:
return s in fp.read()
- device = '/dev/sdb1'
- mount_point = '/media/Media Volume'
+ @mitogen.main()
+ def main(router):
+ device = '/dev/sdb1'
+ mount_point = '/media/Media Volume'
- bastion = router.ssh(hostname='bastion')
- bastion_sudo = router.sudo(via=bastion)
+ bastion = router.ssh(hostname='bastion')
+ bastion_sudo = router.sudo(via=bastion)
- if PROD:
- fileserver = router.ssh(hostname='fileserver', via=bastion)
- if fileserver.call(file_contains, device, '/proc/mounts'):
- print('{} already mounted!'.format(device))
- fileserver.call(run, 'umount', device)
- fileserver.call(shutil.rmtree, mount_point)
- fileserver.call(os.mkdir, mount_point, 0777)
- fileserver.call(run, 'mount', device, mount_point)
+ if PROD:
+ fileserver = router.ssh(hostname='fileserver', via=bastion)
+ if fileserver.call(file_contains, device, '/proc/mounts'):
+ print('{} already mounted!'.format(device))
+ fileserver.call(run, 'umount', device)
+ fileserver.call(shutil.rmtree, mount_point)
+ fileserver.call(os.mkdir, mount_point, 0777)
+ fileserver.call(run, 'mount', device, mount_point)
- bastion_sudo.call(run, 'touch', '/var/run/start_backup')
+ bastion_sudo.call(run, 'touch', '/var/run/start_backup')
* In which context must the ``PROD`` variable be defined?
* On which machine is each step executed?
@@ -185,9 +190,9 @@ nested.py:
.. code-block:: python
import os
- import mitogen.utils
+ import mitogen
- @mitogen.utils.run_with_router
+ @mitogen.main()
def main(router):
mitogen.utils.log_to_file()
diff --git a/docs/getting_started.rst b/docs/getting_started.rst
index 061d0f51..020760bc 100644
--- a/docs/getting_started.rst
+++ b/docs/getting_started.rst
@@ -248,7 +248,7 @@ Running User Functions
----------------------
So far we have used the interactive interpreter to call some standard library
-functions, but if since source code typed at the interpreter cannot be
+functions, but since the source code typed at the interpreter cannot be
recovered, Mitogen is unable to execute functions defined in this way.
We must therefore continue by writing our code as a script::
diff --git a/docs/howitworks.rst b/docs/howitworks.rst
index 6db58ed4..5bc7b53d 100644
--- a/docs/howitworks.rst
+++ b/docs/howitworks.rst
@@ -16,17 +16,17 @@ The UNIX First Stage
To allow delivery of the bootstrap compressed using :py:mod:`zlib`, it is
necessary for something on the remote to be prepared to decompress the payload
-and feed it to a Python interpreter. Since we would like to avoid writing an
-error-prone shell fragment to implement this, and since we must avoid writing
-to the remote machine's disk in case it is read-only, the Python process
-started on the remote machine by Mitogen immediately forks in order to
+and feed it to a Python interpreter [#f1]_. Since we would like to avoid
+writing an error-prone shell fragment to implement this, and since we must
+avoid writing to the remote machine's disk in case it is read-only, the Python
+process started on the remote machine by Mitogen immediately forks in order to
implement the decompression.
Python Command Line
###################
-The Python command line sent to the host is a :mod:`zlib`-compressed [#f1]_ and
+The Python command line sent to the host is a :mod:`zlib`-compressed [#f2]_ and
base64-encoded copy of the :py:meth:`mitogen.master.Stream._first_stage`
function, which has been carefully optimized to reduce its size. Prior to
compression and encoding, ``CONTEXT_NAME`` is replaced with the desired context
@@ -65,10 +65,10 @@ allowing reading by the first stage of exactly the required bytes.
Configuring argv[0]
###################
-Forking provides us with an excellent opportunity for tidying up the eventual
-Python interpreter, in particular, restarting it using a fresh command-line to
-get rid of the large base64-encoded first stage parameter, and to replace
-**argv[0]** with something descriptive.
+Forking provides an excellent opportunity to tidy up the eventual Python
+interpreter, in particular, restarting it using a fresh command-line to get rid
+of the large base64-encoded first stage parameter, and to replace **argv[0]**
+with something descriptive.
After configuring its ``stdin`` to point to the read end of the pipe, the
parent half of the fork re-executes Python, with **argv[0]** taken from the
@@ -273,6 +273,10 @@ parent and child. Integers use big endian in their encoded form.
- Size
- Description
+ * - `magic`
+ - 2
+ - Integer 0x4d49 (``MI``), used to detect stream corruption.
+
* - `dst_id`
- 4
- Integer target context ID. :py:class:`Router` delivers messages
@@ -577,6 +581,28 @@ When ``sudo:node22a:webapp`` wants to send a message to
:class: mitogen-full-width
+Disconnect Propagation
+######################
+
+To ensure timely shutdown when a failure occurs, where some context is awaiting
+a response from another context that has become disconnected,
+:class:`mitogen.core.Router` additionally records the destination context ID of
+every message received on a particular stream.
+
+When ``DEL_ROUTE`` is generated locally or received on some other stream,
+:class:`mitogen.parent.RouteMonitor` uses this to find every stream that ever
+communicated with the route that is about to go away, and forwards the message
+to each found.
+
+The recipient ``DEL_ROUTE`` handler in turn uses the message to find any
+:class:`mitogen.core.Context` in the local process corresponding to the
+disappearing route, and if found, fires a ``disconnected`` event on it.
+
+Any interested party, such as :class:`mitogen.core.Receiver`, may subscribe to
+the event and use it to abort any threads that were asleep waiting for a reply
+that will never arrive.
+
+
.. _source-verification:
Source Verification
@@ -998,7 +1024,13 @@ receive items in the order they are requested, as they become available.
.. rubric:: Footnotes
-.. [#f1] Compression may seem redundant, however it is basically free and reducing IO
+.. [#f1] Although some connection methods such as SSH support compression, and
+ Mitogen enables SSH compression by default, there are circumstances where
+ disabling SSH compression is desirable, and many scenarios for future
+ connection methods where transport-layer compression is not supported at
+ all.
+
+.. [#f2] Compression may seem redundant, however it is basically free and reducing IO
is always a good idea. The 33% / 200 byte saving may mean the presence or
absence of an additional frame on the network, or in real world terms after
accounting for SSH overhead, around a 2% reduced chance of a stall during
diff --git a/docs/images/ansible/ansible_mitogen.svg b/docs/images/ansible/ansible_mitogen.svg
index 922f1200..feae7d1a 100644
--- a/docs/images/ansible/ansible_mitogen.svg
+++ b/docs/images/ansible/ansible_mitogen.svg
@@ -1,2 +1,2 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/images/ansible/costapp.png b/docs/images/ansible/costapp.png
deleted file mode 100644
index 56827070..00000000
Binary files a/docs/images/ansible/costapp.png and /dev/null differ
diff --git a/docs/images/ansible/pcaps/.gitattributes b/docs/images/ansible/pcaps/.gitattributes
new file mode 100644
index 00000000..ae890c41
--- /dev/null
+++ b/docs/images/ansible/pcaps/.gitattributes
@@ -0,0 +1,3 @@
+**pcap** filter=lfs diff=lfs merge=lfs -text
+run_hostname_100_times_mito.pcap.gz filter=lfs diff=lfs merge=lfs -text
+run_hostname_100_times_vanilla.pcap.gz filter=lfs diff=lfs merge=lfs -text
diff --git a/docs/images/ansible/pcaps/costapp-uk-india.svg b/docs/images/ansible/pcaps/costapp-uk-india.svg
new file mode 100644
index 00000000..7a549ba0
--- /dev/null
+++ b/docs/images/ansible/pcaps/costapp-uk-india.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/images/ansible/pcaps/debops-uk-india.svg b/docs/images/ansible/pcaps/debops-uk-india.svg
new file mode 100644
index 00000000..ddf8d1f5
--- /dev/null
+++ b/docs/images/ansible/pcaps/debops-uk-india.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/images/ansible/pcaps/loop-100-items-local-detail.svg b/docs/images/ansible/pcaps/loop-100-items-local-detail.svg
new file mode 100644
index 00000000..7ae2cf3c
--- /dev/null
+++ b/docs/images/ansible/pcaps/loop-100-items-local-detail.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/images/ansible/pcaps/loop-100-items-local.svg b/docs/images/ansible/pcaps/loop-100-items-local.svg
new file mode 100644
index 00000000..8f74fbef
--- /dev/null
+++ b/docs/images/ansible/pcaps/loop-100-items-local.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/images/ansible/pcaps/stroke-width.py b/docs/images/ansible/pcaps/stroke-width.py
new file mode 100644
index 00000000..7a0ba435
--- /dev/null
+++ b/docs/images/ansible/pcaps/stroke-width.py
@@ -0,0 +1,16 @@
+
+import sys
+# Add viewBox attr to SVGs lacking it, so IE scales properly.
+
+import lxml.etree
+import glob
+
+
+for name in sys.argv[1:]: # glob.glob('*/*.svg'): #+ glob.glob('images/ansible/*.svg'):
+ doc = lxml.etree.parse(open(name))
+ svg = doc.getroot()
+ for elem in svg.cssselect('[stroke-width]'):
+ if elem.attrib['stroke-width'] < '2':
+ elem.attrib['stroke-width'] = '2'
+
+ open(name, 'w').write(lxml.etree.tostring(svg, xml_declaration=True, encoding='UTF-8'))
diff --git a/docs/images/ansible/pcaps/svg-boxify.py b/docs/images/ansible/pcaps/svg-boxify.py
new file mode 100644
index 00000000..728b9241
--- /dev/null
+++ b/docs/images/ansible/pcaps/svg-boxify.py
@@ -0,0 +1,13 @@
+
+# Add viewBox attr to SVGs lacking it, so IE scales properly.
+
+import lxml.etree
+import glob
+
+
+for name in glob.glob('images/*.svg') + glob.glob('images/ansible/*.svg'):
+ doc = lxml.etree.parse(open(name))
+ svg = doc.getroot()
+ if 'viewBox' not in svg.attrib:
+ svg.attrib['viewBox'] = '0 0 %(width)s %(height)s' % svg.attrib
+ open(name, 'w').write(lxml.etree.tostring(svg, xml_declaration=True, encoding='UTF-8'))
diff --git a/docs/index.rst b/docs/index.rst
index 6aff35aa..066d6716 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -2,8 +2,6 @@
Mitogen
=======
-Mitogen is a Python library for writing distributed self-replicating programs.
-
.. raw:: html
.. image:: images/mitogen.svg
- :class: mitogen-right-225
+ :class: mitogen-right-200 mitogen-logo-wrap
+
+Mitogen is a Python library for writing distributed self-replicating programs.
There is no requirement for installing packages, copying files around, writing
shell snippets, upfront configuration, or providing any secondary link to a
@@ -351,6 +351,7 @@ usual into the slave process.
os.system('tar zxvf my_app.tar.gz')
+ @mitogen.main()
def main(broker):
if len(sys.argv) != 2:
print(__doc__)
@@ -359,10 +360,6 @@ usual into the slave process.
context = mitogen.ssh.connect(broker, sys.argv[1])
context.call(install_app)
- if __name__ == '__main__' and mitogen.is_master:
- import mitogen.utils
- mitogen.utils.run_with_broker(main)
-
Event-driven IO
###############
@@ -398,12 +395,12 @@ a large fleet of machines, or to alert the parent of unexpected state changes.
Compatibility
#############
-Mitogen is syntax-compatible with **Python 2.4** released November 2004, making
-it suitable for managing a fleet of potentially ancient corporate hardware,
-such as Red Hat Enterprise Linux 5, released in 2007.
+Mitogen is compatible with **Python 2.4** released November 2004, making it
+suitable for managing a fleet of potentially ancient corporate hardware, such
+as Red Hat Enterprise Linux 5, released in 2007.
Every combination of Python 3.x/2.x parent and child should be possible,
-however at present only Python 2.6, 2.7 and 3.6 are tested automatically.
+however at present only Python 2.4, 2.6, 2.7 and 3.6 are tested automatically.
Zero Dependencies
diff --git a/docs/internals.rst b/docs/internals.rst
index 03f12e1e..e1dd4a41 100644
--- a/docs/internals.rst
+++ b/docs/internals.rst
@@ -15,6 +15,20 @@ Constants
.. autodata:: CHUNK_SIZE
+Poller Classes
+==============
+
+.. currentmodule:: mitogen.core
+.. autoclass:: Poller
+ :members:
+
+.. currentmodule:: mitogen.parent
+.. autoclass:: EpollPoller
+
+.. currentmodule:: mitogen.parent
+.. autoclass:: KqueuePoller
+
+
Latch Class
===========
@@ -32,171 +46,42 @@ PidfulStreamHandler Class
Side Class
-----------
+==========
.. currentmodule:: mitogen.core
-
-.. class:: Side (stream, fd, keep_alive=True)
-
- Represent a single side of a :py:class:`BasicStream`. This exists to allow
- streams implemented using unidirectional (e.g. UNIX pipe) and bidirectional
- (e.g. UNIX socket) file descriptors to operate identically.
-
- :param mitogen.core.Stream stream:
- The stream this side is associated with.
-
- :param int fd:
- Underlying file descriptor.
-
- :param bool keep_alive:
- Value for :py:attr:`keep_alive`
-
- During construction, the file descriptor has its :py:data:`os.O_NONBLOCK`
- flag enabled using :py:func:`fcntl.fcntl`.
-
- .. attribute:: stream
-
- The :py:class:`Stream` for which this is a read or write side.
-
- .. attribute:: fd
-
- Integer file descriptor to perform IO on, or :data:`None` if
- :py:meth:`close` has been called.
-
- .. attribute:: keep_alive
-
- If :data:`True`, causes presence of this side in :py:class:`Broker`'s
- active reader set to defer shutdown until the side is disconnected.
-
- .. method:: fileno
-
- Return :py:attr:`fd` if it is not :data:`None`, otherwise raise
- :py:class:`StreamError`. This method is implemented so that
- :py:class:`Side` can be used directly by :py:func:`select.select`.
-
- .. method:: close
-
- Call :py:func:`os.close` on :py:attr:`fd` if it is not :data:`None`,
- then set it to :data:`None`.
-
- .. method:: read (n=CHUNK_SIZE)
-
- Read up to `n` bytes from the file descriptor, wrapping the underlying
- :py:func:`os.read` call with :py:func:`io_op` to trap common
- disconnection conditions.
-
- :py:meth:`read` always behaves as if it is reading from a regular UNIX
- file; socket, pipe, and TTY disconnection errors are masked and result
- in a 0-sized read just like a regular file.
-
- :returns:
- Bytes read, or the empty to string to indicate disconnection was
- detected.
-
- .. method:: write (s)
-
- Write as much of the bytes from `s` as possible to the file descriptor,
- wrapping the underlying :py:func:`os.write` call with :py:func:`io_op`
- to trap common disconnection connditions.
-
- :returns:
- Number of bytes written, or :data:`None` if disconnection was
- detected.
+.. autoclass:: Side
+ :members:
Stream Classes
---------------
+==============
.. currentmodule:: mitogen.core
-
-.. class:: BasicStream
-
- .. attribute:: receive_side
-
- A :py:class:`Side` representing the stream's receive file descriptor.
-
- .. attribute:: transmit_side
-
- A :py:class:`Side` representing the stream's transmit file descriptor.
-
- .. method:: on_disconnect (broker)
-
- Called by :py:class:`Broker` to force disconnect the stream. The base
- implementation simply closes :py:attr:`receive_side` and
- :py:attr:`transmit_side` and unregisters the stream from the broker.
-
- .. method:: on_receive (broker)
-
- Called by :py:class:`Broker` when the stream's :py:attr:`receive_side` has
- been marked readable using :py:meth:`Broker.start_receive` and the
- broker has detected the associated file descriptor is ready for
- reading.
-
- Subclasses must implement this method if
- :py:meth:`Broker.start_receive` is ever called on them, and the method
- must call :py:meth:`on_disconect` if reading produces an empty string.
-
- .. method:: on_transmit (broker)
-
- Called by :py:class:`Broker` when the stream's :py:attr:`transmit_side`
- has been marked writeable using :py:meth:`Broker._start_transmit` and
- the broker has detected the associated file descriptor is ready for
- writing.
-
- Subclasses must implement this method if
- :py:meth:`Broker._start_transmit` is ever called on them.
-
- .. method:: on_shutdown (broker)
-
- Called by :py:meth:`Broker.shutdown` to allow the stream time to
- gracefully shutdown. The base implementation simply called
- :py:meth:`on_disconnect`.
+.. autoclass:: BasicStream
+ :members:
.. autoclass:: Stream
:members:
- .. method:: pending_bytes ()
-
- Returns the number of bytes queued for transmission on this stream.
- This can be used to limit the amount of data buffered in RAM by an
- otherwise unlimited consumer.
-
- For an accurate result, this method should be called from the Broker
- thread, using a wrapper like:
-
- ::
-
- def get_pending_bytes(self, stream):
- latch = mitogen.core.Latch()
- self.broker.defer(
- lambda: latch.put(stream.pending_bytes())
- )
- return latch.get()
-
-
.. currentmodule:: mitogen.fork
-
.. autoclass:: Stream
:members:
.. currentmodule:: mitogen.parent
-
.. autoclass:: Stream
:members:
.. currentmodule:: mitogen.ssh
-
.. autoclass:: Stream
:members:
.. currentmodule:: mitogen.sudo
-
.. autoclass:: Stream
:members:
Other Stream Subclasses
------------------------
+=======================
.. currentmodule:: mitogen.core
@@ -208,10 +93,11 @@ Other Stream Subclasses
Poller Class
-------------
+============
.. currentmodule:: mitogen.core
.. autoclass:: Poller
+ :members:
.. currentmodule:: mitogen.parent
.. autoclass:: KqueuePoller
@@ -221,7 +107,7 @@ Poller Class
Importer Class
---------------
+==============
.. currentmodule:: mitogen.core
.. autoclass:: Importer
@@ -229,15 +115,23 @@ Importer Class
Responder Class
----------------
+===============
.. currentmodule:: mitogen.master
.. autoclass:: ModuleResponder
:members:
+RouteMonitor Class
+==================
+
+.. currentmodule:: mitogen.parent
+.. autoclass:: RouteMonitor
+ :members:
+
+
Forwarder Class
----------------
+===============
.. currentmodule:: mitogen.parent
.. autoclass:: ModuleForwarder
@@ -245,67 +139,19 @@ Forwarder Class
ExternalContext Class
----------------------
+=====================
.. currentmodule:: mitogen.core
+.. autoclass:: ExternalContext
+ :members:
-.. class:: ExternalContext
-
- External context implementation.
-
- .. attribute:: broker
-
- The :py:class:`mitogen.core.Broker` instance.
-
- .. attribute:: context
-
- The :py:class:`mitogen.core.Context` instance.
-
- .. attribute:: channel
-
- The :py:class:`mitogen.core.Channel` over which
- :py:data:`CALL_FUNCTION` requests are received.
-
- .. attribute:: stdout_log
-
- The :py:class:`mitogen.core.IoLogger` connected to ``stdout``.
-
- .. attribute:: importer
-
- The :py:class:`mitogen.core.Importer` instance.
-
- .. attribute:: stdout_log
-
- The :py:class:`IoLogger` connected to ``stdout``.
-
- .. attribute:: stderr_log
-
- The :py:class:`IoLogger` connected to ``stderr``.
-
- .. method:: _dispatch_calls
-
- Implementation for the main thread in every child context.
mitogen.master
==============
-.. currentmodule:: mitogen.master
-
-.. class:: ProcessMonitor
-
- Install a :py:data:`signal.SIGCHLD` handler that generates callbacks when a
- specific child process has exitted.
-
- .. method:: add (pid, callback)
-
- Add a callback function to be notified of the exit status of a process.
-
- :param int pid:
- Process ID to be notified of.
-
- :param callback:
- Function invoked as `callback(status)`, where `status` is the raw
- exit status of the child process.
+.. currentmodule:: mitogen.parent
+.. autoclass:: ProcessMonitor
+ :members:
Blocking I/O Functions
diff --git a/docs/docs-requirements.txt b/docs/requirements.txt
similarity index 51%
rename from docs/docs-requirements.txt
rename to docs/requirements.txt
index f0bddf36..a93c2140 100644
--- a/docs/docs-requirements.txt
+++ b/docs/requirements.txt
@@ -1,4 +1,3 @@
Sphinx==1.7.1
-sphinx-autobuild==0.6.0 # Last version to support Python 2.6
sphinxcontrib-programoutput==0.11
alabaster==0.7.10
diff --git a/docs/shame.rst b/docs/shame.rst
deleted file mode 100644
index 041feb2a..00000000
--- a/docs/shame.rst
+++ /dev/null
@@ -1,90 +0,0 @@
-
-Importer Wall Of Shame
-----------------------
-
-The following modules and packages violate protocol or best practice in some way:
-
-* They run magic during ``__init.py__`` that makes life hard for Mitogen.
- Executing code during module import is always bad, and Mitogen is a concrete
- benchmark for why it's bad.
-
-* They install crap in :py:data:`sys.modules` that completely ignore or
- partially implement the protocols laid out in PEP-302.
-
-* They "vendor" a third party package, either incompletely, using hacks visible
- through the runtime's standard interfaces, or with ancient versions of code
- that in turn mess with :py:data:`sys.modules` in some horrible way.
-
-Bugs will probably be filed for these in time, but it does not address the huge
-installed base of existing old software versions, so hacks are needed anyway.
-
-
-``pbr``
-=======
-
-It claims to use ``pkg_resources`` to read version information
-(``_get_version_from_pkg_metadata()``), which would result in PEP-302 being
-reused and everything just working wonderfully, but instead it actually does
-direct filesystem access.
-
-**What could it do instead?**
-
-* ``pkg_resources.resource_stream()``
-
-**What Mitogen is forced to do**
-
-When it sees ``pbr`` being loaded, it smodges the process environment with a
-``PBR_VERSION`` variable to override any attempt to auto-detect the version.
-This will probably break code I haven't seen yet.
-
-
-``pkg_resources``
-=================
-
-Anything that imports ``pkg_resources`` will eventually cause ``pkg_resources``
-to try and import and scan ``__main__`` for its ``__requires__`` attribute
-(``pkg_resources/__init__.py::_build_master()``). This breaks any app that is
-not expecting its ``__main__`` to suddenly be sucked over a network and
-injected into a remote process, like py.test.
-
-A future version of Mitogen might have a more general hack that doesn't import
-the master's ``__main__`` as ``__main__`` in the slave, avoiding all kinds of
-issues like these.
-
-**What could it do instead?**
-
-* Explicit is better than implicit: wait until the magical behaviour is
- explicitly requested (i.e. an API call).
-
-* Use ``get("__main__")`` on :py:data:`sys.modules` rather than ``import``, but
- this method isn't general enough, it only really helps tools like Mitogen.
-
-**What Mitogen is forced to do**
-
-Examine the stack during every attempt to import ``__main__`` and check if the
-requestee module is named ``pkg_resources``, if so then refuse the import.
-
-
-``six``
-=======
-
-The ``six`` module makes some effort to conform to PEP-302, but it is missing
-several critical pieces, e.g. the ``__loader__`` attribute. This not only
-breaks the Python standard library tooling (such as the :py:mod:`inspect`
-module), but also Mitogen. Newer versions of ``six`` improve things somewhat,
-but there are still outstanding issues preventing Mitogen from working with
-``six``.
-
-This package is sufficiently popular that it must eventually be supported. See
-`here for an example issue`_.
-
-.. _here for an example issue: https://github.com/dw/mitogen/issues/31
-
-**What could it do instead?**
-
-* Any custom hacks installed into :py:data:`sys.modules` should support the
- protocols laid out in PEP-302.
-
-**What Mitogen is forced to do**
-
-Vendored versions of ``six`` currently don't work at all.
diff --git a/docs/toc.rst b/docs/toc.rst
index 7b3274a9..abd52a1f 100644
--- a/docs/toc.rst
+++ b/docs/toc.rst
@@ -14,7 +14,6 @@ Table Of Contents
api
examples
internals
- shame
.. toctree::
:hidden:
diff --git a/examples/mitogen-fuse.py b/examples/mitogen-fuse.py
index 7421a0e2..d0cd9a3a 100644
--- a/examples/mitogen-fuse.py
+++ b/examples/mitogen-fuse.py
@@ -20,7 +20,6 @@ import mitogen.master
import mitogen.utils
import __main__
-import posix
import os
diff --git a/examples/ping_pong.py b/examples/ping_pong.py
new file mode 100644
index 00000000..406b6e02
--- /dev/null
+++ b/examples/ping_pong.py
@@ -0,0 +1,46 @@
+# Wire up a ping/pong counting loop between 2 subprocesses.
+
+from __future__ import print_function
+import mitogen.core
+import mitogen.select
+
+
+@mitogen.core.takes_router
+def ping_pong(control_sender, router):
+ with mitogen.core.Receiver(router) as recv:
+ # Tell caller how to communicate with us.
+ control_sender.send(recv.to_sender())
+
+ # Wait for caller to tell us how to talk back:
+ data_sender = recv.get().unpickle()
+
+ n = 0
+ while (n + 1) < 30:
+ n = recv.get().unpickle()
+ print('the number is currently', n)
+ data_sender.send(n + 1)
+
+
+@mitogen.main()
+def main(router):
+ # Create a receiver for control messages.
+ with mitogen.core.Receiver(router) as recv:
+ # Start ping_pong() in child 1 and fetch its sender.
+ c1 = router.local()
+ c1_call = c1.call_async(ping_pong, recv.to_sender())
+ c1_sender = recv.get().unpickle()
+
+ # Start ping_pong() in child 2 and fetch its sender.
+ c2 = router.local()
+ c2_call = c2.call_async(ping_pong, recv.to_sender())
+ c2_sender = recv.get().unpickle()
+
+ # Tell the children about each others' senders.
+ c1_sender.send(c2_sender)
+ c2_sender.send(c1_sender)
+
+ # Start the loop.
+ c1_sender.send(0)
+
+ # Wait for both functions to return.
+ mitogen.select.Select.all([c1_call, c2_call])
diff --git a/examples/select_loop.py b/examples/select_loop.py
new file mode 100644
index 00000000..a46d8ed6
--- /dev/null
+++ b/examples/select_loop.py
@@ -0,0 +1,103 @@
+
+#
+# This demonstrates using a nested select.Select() to simultaneously watch for
+# in-progress events generated by a bunch of function calls, and the completion
+# of those function calls.
+#
+# We start 5 children and run a function in each of them in parallel. The
+# function writes the numbers 1..5 to a Sender before returning. The master
+# reads the numbers from each child as they are generated, and exits the loop
+# when the last function returns.
+#
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import time
+
+import mitogen
+import mitogen.select
+
+
+def count_to(sender, n, wait=0.333):
+ for x in range(n):
+ sender.send(x)
+ time.sleep(wait)
+
+
+@mitogen.main()
+def main(router):
+ # Start 5 subprocesses and give them made up names.
+ contexts = {
+ 'host%d' % (i,): router.local()
+ for i in range(5)
+ }
+
+ # Used later to recover hostname. A future Mitogen will provide a better
+ # way to get app data references back out of its IO primitives, for now you
+ # need to do it manually.
+ hostname_by_context_id = {
+ context.context_id: hostname
+ for hostname, context in contexts.items()
+ }
+
+ # I am a select that holds the receivers that will receive the function
+ # call results. Selects are one-shot by default, which means each receiver
+ # is removed from them as a result arrives. Therefore it means the last
+ # function has completed when bool(calls_sel) is False.
+ calls_sel = mitogen.select.Select()
+
+ # I receive the numbers as they are counted.
+ status_recv = mitogen.core.Receiver(router)
+
+ # Start the function calls
+ for hostname, context in contexts.items():
+ calls_sel.add(
+ context.call_async(
+ count_to,
+ sender=status_recv.to_sender(),
+ n=5,
+ wait=0.333
+ )
+ )
+
+ # Create a select subscribed to the function call result Select, and to the
+ # number-counting receiver. Any message arriving on any child of this
+ # Select will wake it up -- be it a message arriving on the status
+ # receiver, or any message arriving on any of the function call result
+ # receivers.
+
+ # Once last call is completed, calls_sel will be empty since it's
+ # oneshot=True (the default), causing __bool__ to be False
+ both_sel = mitogen.select.Select([status_recv, calls_sel], oneshot=False)
+
+ # Internally selects store a strong reference from Receiver->Select that
+ # will keep the Select alive as long as the receiver is alive. If a
+ # receiver or select otherwise 'outlives' some parent select, attempting to
+ # re-add it to a new select will raise an error. In all cases it's
+ # desirable to call Select.close(). This can be done as a context manager.
+ with calls_sel, both_sel:
+ while calls_sel:
+ try:
+ msg = both_sel.get(timeout=60.0)
+ except mitogen.core.TimeoutError:
+ print("No update in 60 seconds, something's broke")
+ break
+
+ hostname = hostname_by_context_id[msg.src_id]
+
+ if msg.receiver is status_recv: # https://mitogen.readthedocs.io/en/stable/api.html#mitogen.core.Message.receiver
+ # handle a status update
+ print('Got status update from %s: %s' % (hostname, msg.unpickle()))
+ elif msg.receiver is calls_sel: # subselect
+ # handle a function call result.
+ try:
+ assert None == msg.unpickle()
+ print('Task succeeded on %s' % (hostname,))
+ except mitogen.core.CallError as e:
+ print('Task failed on host %s: %s' % (hostname, e))
+
+ if calls_sel:
+ print('Some tasks did not complete.')
+ else:
+ print('All tasks completed.')
diff --git a/examples/service/client.py b/examples/service/client.py
index e2d78fc0..fc2d8427 100644
--- a/examples/service/client.py
+++ b/examples/service/client.py
@@ -1,6 +1,4 @@
-import socket
-
import mitogen.master
import mitogen.unix
import mitogen.service
diff --git a/examples/service/server.py b/examples/service/server.py
index 2f488d20..1f8c1475 100644
--- a/examples/service/server.py
+++ b/examples/service/server.py
@@ -3,8 +3,6 @@
# hopefully lose those hard-coded magic numbers somehow), but meanwhile this is
# a taster of how it looks today.
-import time
-
import mitogen
import mitogen.service
import mitogen.unix
diff --git a/examples/the_basics.py b/examples/the_basics.py
new file mode 100644
index 00000000..0dcd4049
--- /dev/null
+++ b/examples/the_basics.py
@@ -0,0 +1,295 @@
+
+#
+# This program is a stand-in for good intro docs. It just documents various
+# basics of using Mitogen.
+#
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import hashlib
+import io
+import os
+import spwd
+
+import mitogen.core
+import mitogen.master
+import mitogen.service
+import mitogen.utils
+
+
+
+def get_file_contents(path):
+ """
+ Get the contents of a file.
+ """
+ with open(path, 'rb') as fp:
+ # mitogen.core.Blob() is a bytes subclass with a repr() that returns a
+ # summary of the blob, rather than the raw blob data. This makes
+ # logging output *much* nicer. Unlike most custom types, blobs can be
+ # serialized.
+ return mitogen.core.Blob(fp.read())
+
+
+def put_file_contents(path, s):
+ """
+ Write the contents of a file.
+ """
+ with open(path, 'wb') as fp:
+ fp.write(s)
+
+
+def streamy_download_file(context, path):
+ """
+ Fetch a file from the FileService hosted by `context`.
+ """
+ bio = io.BytesIO()
+
+ # FileService.get() is not actually an exposed service method, it's just a
+ # classmethod that wraps up the complicated dance of implementing the
+ # transfer.
+ ok, metadata = mitogen.service.FileService.get(context, path, bio)
+
+ return {
+ 'success': ok,
+ 'metadata': metadata,
+ 'size': len(bio.getvalue()),
+ }
+
+
+def get_password_hash(username):
+ """
+ Fetch a user's password hash.
+ """
+ try:
+ h = spwd.getspnam(username)
+ except KeyError:
+ return None
+
+ # mitogen.core.Secret() is a Unicode subclass with a repr() that hides the
+ # secret data. This keeps secret stuff out of logs. Like blobs, secrets can
+ # also be serialized.
+ return mitogen.core.Secret(h)
+
+
+def md5sum(path):
+ """
+ Return the MD5 checksum for a file.
+ """
+ return hashlib.md5(get_file_contents(path)).hexdigest()
+
+
+
+def work_on_machine(context):
+ """
+ Do stuff to a remote context.
+ """
+ print("Created context. Context ID is", context.context_id)
+
+ # You don't need to understand any/all of this, but it's helpful to grok
+ # the whole chain:
+
+ # - Context.call() is a light wrapper around .call_async(), the wrapper
+ # simply blocks the caller until a reply arrives.
+ # - .call_async() serializes the call signature into a message and passes
+ # it to .send_async()
+ # - .send_async() creates a mitogen.core.Receiver() on the local router.
+ # The receiver constructor uses Router.add_handle() to allocate a
+ # 'reply_to' handle and install a callback function that wakes the
+ # receiver when a reply message arrives.
+ # - .send_async() puts the reply handle in Message.reply_to field and
+ # passes it to .send()
+ # - Context.send() stamps the destination context ID into the
+ # Message.dst_id field and passes it to Router.route()
+ # - Router.route() uses Broker.defer() to schedule _async_route(msg)
+ # on the Broker thread.
+ # [broker thread]
+ # - The broker thread wakes and calls _async_route(msg)
+ # - Router._async_route() notices 'dst_id' is for a remote context and
+ # looks up the stream on which messages for dst_id should be sent (may be
+ # direct connection or not), and calls Stream.send()
+ # - Stream.send() packs the message into a bytestring, appends it to
+ # Stream._output_buf, and calls Broker.start_transmit()
+ # - Broker finishes work, reenters IO loop. IO loop wakes due to writeable
+ # stream.
+ # - Stream.on_transmit() writes the full/partial buffer to SSH, calls
+ # stop_transmit() to mark the stream unwriteable once _output_buf is
+ # empty.
+ # - Broker IO loop sleeps, no readers/writers.
+ # - Broker wakes due to SSH stream readable.
+ # - Stream.on_receive() called, reads the reply message, converts it to a
+ # Message and passes it to Router._async_route().
+ # - Router._async_route() notices message is for local context, looks up
+ # target handle in the .add_handle() registry.
+ # - Receiver._on_receive() called, appends message to receiver queue.
+ # [main thread]
+ # - Receiver.get() used to block the original Context.call() wakes and pops
+ # the message from the queue.
+ # - Message data (pickled return value) is deserialized and returned to the
+ # caller.
+ print("It's running on the local machine. Its PID is",
+ context.call(os.getpid))
+
+ # Now let's call a function defined in this module. On receiving the
+ # function call request, the child attempts to import __main__, which is
+ # initially missing, causing the importer in the child to request it from
+ # its parent. That causes _this script_ to be sent as the module source
+ # over the wire.
+ print("Calling md5sum(/etc/passwd) in the child:",
+ context.call(md5sum, '/etc/passwd'))
+
+ # Now let's "transfer" a file. The simplest way to do this is calling a
+ # function that returns the file data, which is totally fine for small
+ # files.
+ print("Download /etc/passwd via function call: %d bytes" % (
+ len(context.call(get_file_contents, '/etc/passwd'))
+ ))
+
+ # And using function calls, in the other direction:
+ print("Upload /tmp/blah via function call: %s" % (
+ context.call(put_file_contents, '/tmp/blah', b'blah!'),
+ ))
+
+ # Now lets transfer what might be a big files. The problem with big files
+ # is that they may not fit in RAM. This uses mitogen.services.FileService
+ # to implement streamy file transfer instead. The sender must have a
+ # 'service pool' running that will host FileService. First let's do the
+ # 'upload' direction, where the master hosts FileService.
+
+ # Steals the 'Router' reference from the context object. In a real app the
+ # pool would be constructed once at startup, this is just demo code.
+ file_service = mitogen.service.FileService(context.router)
+
+ # Start the pool.
+ pool = mitogen.service.Pool(context.router, services=[file_service])
+
+ # Grant access to a file on the local disk from unprivileged contexts.
+ # .register() is also exposed as a service method -- you can call it on a
+ # child context from any more privileged context.
+ file_service.register('/etc/passwd')
+
+ # Now call our wrapper function that knows how to handle the transfer. In a
+ # real app, this wrapper might also set ownership/modes or do any other
+ # app-specific stuff relating to the file that was transferred.
+ print("Streamy upload /etc/passwd: remote result: %s" % (
+ context.call(
+ streamy_download_file,
+ # To avoid hard-wiring streamy_download_file(), we want to pass it
+ # a Context object that hosts the file service it should request
+ # files from. Router.myself() returns a Context referring to this
+ # process.
+ context=router.myself(),
+ path='/etc/passwd',
+ ),
+ ))
+
+ # Shut down the pool now we're done with it, else app will hang at exit.
+ # Once again, this should only happen once at app startup/exit, not for
+ # every file transfer!
+ pool.stop(join=True)
+
+ # Now let's do the same thing but in reverse: we use FileService on the
+ # remote download a file. This uses context.call_service(), which invokes a
+ # special code path that causes auto-initialization of a thread pool in the
+ # target, and auto-construction of the target service, but only if the
+ # service call was made by a more privileged context. We could write a
+ # helper function that runs in the remote to do all that by hand, but the
+ # library handles it for us.
+
+ # Make the file accessible. A future FileService could avoid the need for
+ # this for privileged contexts.
+ context.call_service(
+ service_name=mitogen.service.FileService,
+ method_name='register',
+ path='/etc/passwd'
+ )
+
+ # Now we can use our streamy_download_file() function in reverse -- running
+ # it from this process and having it fetch from the remote process:
+ print("Streamy download /etc/passwd: result: %s" % (
+ streamy_download_file(context, '/etc/passwd'),
+ ))
+
+
+def main():
+ # Setup logging. Mitogen produces a LOT of logging. Over the course of the
+ # stable series, Mitogen's loggers will be carved up so more selective /
+ # user-friendly logging is possible. mitogen.log_to_file() just sets up
+ # something basic, defaulting to INFO level, but you can override from the
+ # command-line by passing MITOGEN_LOG_LEVEL=debug or MITOGEN_LOG_LEVEL=io.
+ # IO logging is sometimes useful for hangs, but it is often creates more
+ # confusion than it solves.
+ mitogen.utils.log_to_file()
+
+ # Construct the Broker thread. It manages an async IO loop listening for
+ # reads from any active connection, or wakes from any non-Broker thread.
+ # Because Mitogen uses a background worker thread, it is extremely
+ # important to pay attention to the use of UNIX fork in your code --
+ # forking entails making a snapshot of the state of all locks in the
+ # program, including those in the logging module, and thus can create code
+ # that appears to work for a long time, before deadlocking randomly.
+ # Forking in a Mitogen app requires significant upfront planning!
+ broker = mitogen.master.Broker()
+
+ # Construct a Router. This accepts messages (mitogen.core.Message) and
+ # either dispatches locally addressed messages to local handlers (added via
+ # Router.add_handle()) on the broker thread, or forwards the message
+ # towards the target context.
+
+ # The router also acts as an uglyish God object for creating new
+ # connections. This was a design mistake, really those methods should be
+ # directly imported from e.g. 'mitogen.ssh'.
+ router = mitogen.master.Router(broker)
+
+ # Router can act like a context manager. It simply ensures
+ # Broker.shutdown() is called on exception / exit. That prevents the app
+ # hanging due to a forgotten background thread. For throwaway scripts,
+ # there are also decorator versions "@mitogen.main()" and
+ # "@mitogen.utils.with_router" that do the same thing with less typing.
+ with router:
+ # Now let's construct a context. The '.local()' constructor just creates
+ # the context as a subprocess, the simplest possible case.
+ child = router.local()
+ print("Created a context:", child)
+ print()
+
+ # This demonstrates the standard IO redirection. We call the print
+ # function in the remote context, that should cause a log message to be
+ # emitted. Any subprocesses started by the remote also get the same
+ # treatment, so it's very easy to spot otherwise discarded errors/etc.
+ # from remote tools.
+ child.call(print, "Hello from child.")
+
+ # Context objects make it semi-convenient to treat the local machine the
+ # same as a remote machine.
+ work_on_machine(child)
+
+ # Now let's construct a proxied context. We'll simply use the .local()
+ # constructor again, but construct it via 'child'. In effect we are
+ # constructing a sub-sub-process. Instead of .local() here, we could
+ # have used .sudo() or .ssh() or anything else.
+ subchild = router.local(via=child)
+ print()
+ print()
+ print()
+ print("Created a context as a child of another context:", subchild)
+
+ # Do everything again with the new child.
+ work_on_machine(subchild)
+
+ # We can selectively shut down individual children if we want:
+ subchild.shutdown(wait=True)
+
+ # Or we can simply fall off the end of the scope, effectively calling
+ # Broker.shutdown(), which causes all children to die as part of
+ # shutdown.
+
+
+# The child module importer detects the execution guard below and removes any
+# code appearing after it, and refuses to execute "__main__" if it is absent.
+# This is necessary to prevent a common problem where people try to call
+# functions defined in __main__ without first wrapping it up to be importable
+# as a module, which previously hung the target, or caused bizarre recursive
+# script runs.
+if __name__ == '__main__':
+ main()
diff --git a/mitogen/__init__.py b/mitogen/__init__.py
index 58ef2030..26e48aff 100644
--- a/mitogen/__init__.py
+++ b/mitogen/__init__.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
On the Mitogen master, this is imported from ``mitogen/__init__.py`` as would
be expected. On the slave, it is built dynamically during startup.
@@ -33,7 +35,7 @@ be expected. On the slave, it is built dynamically during startup.
#: Library version as a tuple.
-__version__ = (0, 2, 3)
+__version__ = (0, 2, 4)
#: This is :data:`False` in slave contexts. Previously it was used to prevent
@@ -57,7 +59,12 @@ parent_id = None
parent_ids = []
-def main(log_level='INFO', profiling=False):
+import os
+_default_profiling = os.environ.get('MITOGEN_PROFILING') is not None
+del os
+
+
+def main(log_level='INFO', profiling=_default_profiling):
"""
Convenience decorator primarily useful for writing discardable test
scripts.
@@ -106,7 +113,7 @@ def main(log_level='INFO', profiling=False):
mitogen.master.Router.profiling = profiling
utils.log_to_file(level=log_level)
return mitogen.core._profile_hook(
- 'main',
+ 'app.main',
utils.run_with_router,
func,
)
diff --git a/mitogen/compat/functools.py b/mitogen/compat/functools.py
deleted file mode 100644
index 6ca75292..00000000
--- a/mitogen/compat/functools.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# encoding: utf-8
-"""Selected backports from Python stdlib functools module
-"""
-# Written by Nick Coghlan ,
-# Raymond Hettinger ,
-# and Łukasz Langa .
-# Copyright (C) 2006-2013 Python Software Foundation.
-
-__all__ = [
- 'update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
- 'lru_cache',
-]
-
-from threading import RLock
-
-
-################################################################################
-### update_wrapper() and wraps() decorator
-################################################################################
-
-# update_wrapper() and wraps() are tools to help write
-# wrapper functions that can handle naive introspection
-
-WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
- '__annotations__')
-WRAPPER_UPDATES = ('__dict__',)
-def update_wrapper(wrapper,
- wrapped,
- assigned = WRAPPER_ASSIGNMENTS,
- updated = WRAPPER_UPDATES):
- """Update a wrapper function to look like the wrapped function
- wrapper is the function to be updated
- wrapped is the original function
- assigned is a tuple naming the attributes assigned directly
- from the wrapped function to the wrapper function (defaults to
- functools.WRAPPER_ASSIGNMENTS)
- updated is a tuple naming the attributes of the wrapper that
- are updated with the corresponding attribute from the wrapped
- function (defaults to functools.WRAPPER_UPDATES)
- """
- for attr in assigned:
- try:
- value = getattr(wrapped, attr)
- except AttributeError:
- pass
- else:
- setattr(wrapper, attr, value)
- for attr in updated:
- getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
- # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
- # from the wrapped function when updating __dict__
- wrapper.__wrapped__ = wrapped
- # Return the wrapper so this can be used as a decorator via partial()
- return wrapper
-
-def wraps(wrapped,
- assigned = WRAPPER_ASSIGNMENTS,
- updated = WRAPPER_UPDATES):
- """Decorator factory to apply update_wrapper() to a wrapper function
- Returns a decorator that invokes update_wrapper() with the decorated
- function as the wrapper argument and the arguments to wraps() as the
- remaining arguments. Default arguments are as for update_wrapper().
- This is a convenience function to simplify applying partial() to
- update_wrapper().
- """
- return partial(update_wrapper, wrapped=wrapped,
- assigned=assigned, updated=updated)
-
-
-################################################################################
-### partial() argument application
-################################################################################
-
-# Purely functional, no descriptor behaviour
-def partial(func, *args, **keywords):
- """New function with partial application of the given arguments
- and keywords.
- """
- if hasattr(func, 'func'):
- args = func.args + args
- tmpkw = func.keywords.copy()
- tmpkw.update(keywords)
- keywords = tmpkw
- del tmpkw
- func = func.func
-
- def newfunc(*fargs, **fkeywords):
- newkeywords = keywords.copy()
- newkeywords.update(fkeywords)
- return func(*(args + fargs), **newkeywords)
- newfunc.func = func
- newfunc.args = args
- newfunc.keywords = keywords
- return newfunc
-
-
-################################################################################
-### LRU Cache function decorator
-################################################################################
-
-class _HashedSeq(list):
- """ This class guarantees that hash() will be called no more than once
- per element. This is important because the lru_cache() will hash
- the key multiple times on a cache miss.
- """
-
- __slots__ = 'hashvalue'
-
- def __init__(self, tup, hash=hash):
- self[:] = tup
- self.hashvalue = hash(tup)
-
- def __hash__(self):
- return self.hashvalue
-
-def _make_key(args, kwds, typed,
- kwd_mark = (object(),),
- fasttypes = set([int, str, frozenset, type(None)]),
- sorted=sorted, tuple=tuple, type=type, len=len):
- """Make a cache key from optionally typed positional and keyword arguments
- The key is constructed in a way that is flat as possible rather than
- as a nested structure that would take more memory.
- If there is only a single argument and its data type is known to cache
- its hash value, then that argument is returned without a wrapper. This
- saves space and improves lookup speed.
- """
- key = args
- if kwds:
- sorted_items = sorted(kwds.items())
- key += kwd_mark
- for item in sorted_items:
- key += item
- if typed:
- key += tuple(type(v) for v in args)
- if kwds:
- key += tuple(type(v) for k, v in sorted_items)
- elif len(key) == 1 and type(key[0]) in fasttypes:
- return key[0]
- return _HashedSeq(key)
-
-def lru_cache(maxsize=128, typed=False):
- """Least-recently-used cache decorator.
- If *maxsize* is set to None, the LRU features are disabled and the cache
- can grow without bound.
- If *typed* is True, arguments of different types will be cached separately.
- For example, f(3.0) and f(3) will be treated as distinct calls with
- distinct results.
- Arguments to the cached function must be hashable.
- View the cache statistics named tuple (hits, misses, maxsize, currsize)
- with f.cache_info(). Clear the cache and statistics with f.cache_clear().
- Access the underlying function with f.__wrapped__.
- See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
- """
-
- # Users should only access the lru_cache through its public API:
- # cache_info, cache_clear, and f.__wrapped__
- # The internals of the lru_cache are encapsulated for thread safety and
- # to allow the implementation to change (including a possible C version).
-
- # Early detection of an erroneous call to @lru_cache without any arguments
- # resulting in the inner function being passed to maxsize instead of an
- # integer or None.
- if maxsize is not None and not isinstance(maxsize, int):
- raise TypeError('Expected maxsize to be an integer or None')
-
- def decorating_function(user_function):
- wrapper = _lru_cache_wrapper(user_function, maxsize, typed)
- return update_wrapper(wrapper, user_function)
-
- return decorating_function
-
-def _lru_cache_wrapper(user_function, maxsize, typed):
- # Constants shared by all lru cache instances:
- sentinel = object() # unique object used to signal cache misses
- make_key = _make_key # build a key from the function arguments
- PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
-
- cache = {}
- cache_get = cache.get # bound method to lookup a key or return None
- lock = RLock() # because linkedlist updates aren't threadsafe
- root = [] # root of the circular doubly linked list
- root[:] = [root, root, None, None] # initialize by pointing to self
- hits_misses_full_root = [0, 0, False, root]
- HITS,MISSES,FULL,ROOT = 0, 1, 2, 3
-
- if maxsize == 0:
-
- def wrapper(*args, **kwds):
- # No caching -- just a statistics update after a successful call
- result = user_function(*args, **kwds)
- hits_misses_full_root[MISSES] += 1
- return result
-
- elif maxsize is None:
-
- def wrapper(*args, **kwds):
- # Simple caching without ordering or size limit
- key = make_key(args, kwds, typed)
- result = cache_get(key, sentinel)
- if result is not sentinel:
- hits_misses_full_root[HITS] += 1
- return result
- result = user_function(*args, **kwds)
- cache[key] = result
- hits_misses_full_root[MISSES] += 1
- return result
-
- else:
-
- def wrapper(*args, **kwds):
- # Size limited caching that tracks accesses by recency
- key = make_key(args, kwds, typed)
- lock.acquire()
- try:
- link = cache_get(key)
- if link is not None:
- # Move the link to the front of the circular queue
- root = hits_misses_full_root[ROOT]
- link_prev, link_next, _key, result = link
- link_prev[NEXT] = link_next
- link_next[PREV] = link_prev
- last = root[PREV]
- last[NEXT] = root[PREV] = link
- link[PREV] = last
- link[NEXT] = root
- hits_misses_full_root[HITS] += 1
- return result
- finally:
- lock.release()
- result = user_function(*args, **kwds)
- lock.acquire()
- try:
- if key in cache:
- # Getting here means that this same key was added to the
- # cache while the lock was released. Since the link
- # update is already done, we need only return the
- # computed result and update the count of misses.
- pass
- elif hits_misses_full_root[FULL]:
- # Use the old root to store the new key and result.
- oldroot = root = hits_misses_full_root[ROOT]
- oldroot[KEY] = key
- oldroot[RESULT] = result
- # Empty the oldest link and make it the new root.
- # Keep a reference to the old key and old result to
- # prevent their ref counts from going to zero during the
- # update. That will prevent potentially arbitrary object
- # clean-up code (i.e. __del__) from running while we're
- # still adjusting the links.
- root = hits_misses_full_root[ROOT] = oldroot[NEXT]
- oldkey = root[KEY]
- oldresult = root[RESULT]
- root[KEY] = root[RESULT] = None
- # Now update the cache dictionary.
- del cache[oldkey]
- # Save the potentially reentrant cache[key] assignment
- # for last, after the root and links have been put in
- # a consistent state.
- cache[key] = oldroot
- else:
- # Put result in a new link at the front of the queue.
- root = hits_misses_full_root[ROOT]
- last = root[PREV]
- link = [last, root, key, result]
- last[NEXT] = root[PREV] = cache[key] = link
- # Use the __len__() method instead of the len() function
- # which could potentially be wrapped in an lru_cache itself.
- hits_misses_full_root[FULL] = (cache.__len__() >= maxsize)
- hits_misses_full_root[MISSES]
- finally:
- lock.release()
- return result
-
- def cache_clear():
- """Clear the cache and cache statistics"""
- lock.acquire()
- try:
- cache.clear()
- root = hits_misses_full_root[ROOT]
- root[:] = [root, root, None, None]
- hits_misses_full[HITS] = 0
- hits_misses_full[MISSES] = 0
- hits_misses_full[FULL] = False
- finally:
- lock.release()
-
- wrapper.cache_clear = cache_clear
- return wrapper
diff --git a/mitogen/compat/pkgutil.py b/mitogen/compat/pkgutil.py
index ce072ec9..28e2aead 100644
--- a/mitogen/compat/pkgutil.py
+++ b/mitogen/compat/pkgutil.py
@@ -1,5 +1,7 @@
"""Utilities to support packages."""
+# !mitogen: minify_safe
+
# NOTE: This module must remain compatible with Python 2.3, as it is shared
# by setuptools for distribution with Python 2.3 and up.
diff --git a/mitogen/compat/tokenize.py b/mitogen/compat/tokenize.py
index dd12af83..0473c6a5 100644
--- a/mitogen/compat/tokenize.py
+++ b/mitogen/compat/tokenize.py
@@ -22,6 +22,8 @@ are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
+# !mitogen: minify_safe
+
__author__ = 'Ka-Ping Yee '
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger')
@@ -392,8 +394,11 @@ def generate_tokens(readline):
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
- yield (NL if parenlev > 0 else NEWLINE,
- token, spos, epos, line)
+ if parenlev > 0:
+ n = NL
+ else:
+ n = NEWLINE
+ yield (n, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
diff --git a/mitogen/core.py b/mitogen/core.py
index dadf0924..a48e13ed 100644
--- a/mitogen/core.py
+++ b/mitogen/core.py
@@ -26,20 +26,25 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
This module implements most package functionality, but remains separate from
non-essential code in order to reduce its size, since it is also serves as the
bootstrap implementation sent to every new slave context.
"""
+import binascii
import collections
import encodings.latin_1
import errno
import fcntl
-import imp
import itertools
+import linecache
import logging
import os
+import pickle as py_pickle
+import pstats
import signal
import socket
import struct
@@ -51,9 +56,23 @@ import warnings
import weakref
import zlib
+# Python >3.7 deprecated the imp module.
+warnings.filterwarnings('ignore', message='the imp module is deprecated')
+import imp
+
# Absolute imports for <2.5.
select = __import__('select')
+try:
+ import cProfile
+except ImportError:
+ cProfile = None
+
+try:
+ import thread
+except ImportError:
+ import threading as thread
+
try:
import cPickle as pickle
except ImportError:
@@ -64,6 +83,16 @@ try:
except ImportError:
from io import BytesIO
+try:
+ BaseException
+except NameError:
+ BaseException = Exception
+
+try:
+ ModuleNotFoundError
+except NameError:
+ ModuleNotFoundError = ImportError
+
# TODO: usage of 'import' after setting __name__, but before fixing up
# sys.modules generates a warning. This happens when profiling = True.
warnings.filterwarnings('ignore',
@@ -100,7 +129,7 @@ CALL_SERVICE = 110
#: * a remote receiver is disconnected or explicitly closed.
#: * a related message could not be delivered due to no route existing for it.
#: * a router is being torn down, as a sentinel value to notify
-#: :py:meth:`mitogen.core.Router.add_handler` callbacks to clean up.
+#: :meth:`mitogen.core.Router.add_handler` callbacks to clean up.
IS_DEAD = 999
try:
@@ -108,6 +137,8 @@ try:
except NameError:
BaseException = Exception
+IS_WSL = 'Microsoft' in os.uname()[2]
+PY24 = sys.version_info < (2, 5)
PY3 = sys.version_info > (3,)
if PY3:
b = str.encode
@@ -125,9 +156,12 @@ else:
AnyTextType = (BytesType, UnicodeType)
-if sys.version_info < (2, 5):
+try:
+ next
+except NameError:
next = lambda it: it.next()
+
#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the
#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This
#: value has many performance implications, 128KiB seems to be a sweet spot.
@@ -186,7 +220,7 @@ class Error(Exception):
class LatchError(Error):
- """Raised when an attempt is made to use a :py:class:`mitogen.core.Latch`
+ """Raised when an attempt is made to use a :class:`mitogen.core.Latch`
that has been marked closed."""
pass
@@ -217,10 +251,15 @@ class Secret(UnicodeType):
class Kwargs(dict):
- """A serializable dict subclass that indicates the contained keys should be
- be coerced to Unicode on Python 3 as required. Python 2 produces keyword
- argument dicts whose keys are bytestrings, requiring a helper to ensure
- compatibility with Python 3."""
+ """
+ A serializable dict subclass that indicates its keys should be coerced to
+ Unicode on Python 3 and bytes on Python<2.6.
+
+ Python 2 produces keyword argument dicts whose keys are bytes, requiring a
+ helper to ensure compatibility with Python 3 where Unicode is required,
+ whereas Python 3 produces keyword argument dicts whose keys are Unicode,
+ requiring a helper for Python 2.4/2.5, where bytes are required.
+ """
if PY3:
def __init__(self, dct):
for k, v in dct.items():
@@ -228,6 +267,13 @@ class Kwargs(dict):
self[k.decode()] = v
else:
self[k] = v
+ elif sys.version_info < (2, 6):
+ def __init__(self, dct):
+ for k, v in dct.iteritems():
+ if type(k) is unicode:
+ self[k.encode()] = v
+ else:
+ self[k] = v
def __repr__(self):
return 'Kwargs(%s)' % (dict.__repr__(self),)
@@ -237,17 +283,18 @@ class Kwargs(dict):
class CallError(Error):
- """Serializable :class:`Error` subclass raised when
- :py:meth:`Context.call() ` fails. A copy of
- the traceback from the external context is appended to the exception
- message."""
+ """
+ Serializable :class:`Error` subclass raised when :meth:`Context.call()
+ ` fails. A copy of the traceback from the
+ external context is appended to the exception message.
+ """
def __init__(self, fmt=None, *args):
if not isinstance(fmt, BaseException):
Error.__init__(self, fmt, *args)
else:
e = fmt
- fmt = '%s.%s: %s' % (type(e).__module__, type(e).__name__, e)
- args = ()
+ cls = e.__class__
+ fmt = '%s.%s: %s' % (cls.__module__, cls.__name__, e)
tb = sys.exc_info()[2]
if tb:
fmt += '\n'
@@ -261,9 +308,7 @@ class CallError(Error):
def _unpickle_call_error(s):
if not (type(s) is UnicodeType and len(s) < 10000):
raise TypeError('cannot unpickle CallError: bad input')
- inst = CallError.__new__(CallError)
- Exception.__init__(inst, s)
- return inst
+ return CallError(s)
class ChannelError(Error):
@@ -291,6 +336,39 @@ def to_text(o):
return UnicodeType(o)
+# Python 2.4
+try:
+ any
+except NameError:
+ def any(it):
+ for elem in it:
+ if elem:
+ return True
+
+
+def _partition(s, sep, find):
+ """
+ (str|unicode).(partition|rpartition) for Python 2.4/2.5.
+ """
+ idx = find(sep)
+ if idx != -1:
+ left = s[0:idx]
+ return left, sep, s[len(left)+len(sep):]
+
+
+if hasattr(UnicodeType, 'rpartition'):
+ str_partition = UnicodeType.partition
+ str_rpartition = UnicodeType.rpartition
+ bytes_partition = BytesType.partition
+else:
+ def str_partition(s, sep):
+ return _partition(s, sep, s.find) or (s, u'', u'')
+ def str_rpartition(s, sep):
+ return _partition(s, sep, s.rfind) or (u'', u'', s)
+ def bytes_partition(s, sep):
+ return _partition(s, sep, s.find) or (s, '', '')
+
+
def has_parent_authority(msg, _stream=None):
"""Policy function for use with :class:`Receiver` and
:meth:`Router.add_handler` that requires incoming messages to originate
@@ -316,7 +394,8 @@ def fire(obj, name, *args, **kwargs):
registered for the named signal on `obj`.
"""
signals = vars(obj).get('_signals', {})
- return [func(*args, **kwargs) for func in signals.get(name, ())]
+ for func in signals.get(name, ()):
+ func(*args, **kwargs)
def takes_econtext(func):
@@ -385,20 +464,20 @@ def io_op(func, *args):
signalled by :data:`errno.EPIPE`.
:returns:
- Tuple of `(return_value, disconnected)`, where `return_value` is the
- return value of `func(*args)`, and `disconnected` is :data:`True` if
- disconnection was detected, otherwise :data:`False`.
+ Tuple of `(return_value, disconnect_reason)`, where `return_value` is
+ the return value of `func(*args)`, and `disconnected` is an exception
+ instance when disconnection was detected, otherwise :data:`None`.
"""
while True:
try:
- return func(*args), False
+ return func(*args), None
except (select.error, OSError, IOError):
e = sys.exc_info()[1]
_vv and IOLOG.debug('io_op(%r) -> OSError: %s', func, e)
if e.args[0] == errno.EINTR:
continue
if e.args[0] in (errno.EIO, errno.ECONNRESET, errno.EPIPE):
- return None, True
+ return None, e
raise
@@ -457,27 +536,48 @@ def enable_debug_logging():
_profile_hook = lambda name, func, *args: func(*args)
+_profile_fmt = os.environ.get(
+ 'MITOGEN_PROFILE_FMT',
+ '/tmp/mitogen.stats.%(pid)s.%(identity)s.%(now)s.%(ext)s',
+)
-def enable_profiling():
- global _profile_hook
- import cProfile
- import pstats
- def _profile_hook(name, func, *args):
- profiler = cProfile.Profile()
- profiler.enable()
+def _profile_hook(name, func, *args):
+ """
+ Call `func(*args)` and return its result. This function is replaced by
+ :func:`_real_profile_hook` when :func:`enable_profiling` is called. This
+ interface is obsolete and will be replaced by a signals-based integration
+ later on.
+ """
+ return func(*args)
+
+
+def _real_profile_hook(name, func, *args):
+ profiler = cProfile.Profile()
+ profiler.enable()
+ try:
+ return func(*args)
+ finally:
+ path = _profile_fmt % {
+ 'now': int(1e6 * time.time()),
+ 'identity': name,
+ 'pid': os.getpid(),
+ 'ext': '%s'
+ }
+ profiler.dump_stats(path % ('pstats',))
+ profiler.create_stats()
+ fp = open(path % ('log',), 'w')
try:
- return func(*args)
+ stats = pstats.Stats(profiler, stream=fp)
+ stats.sort_stats('cumulative')
+ stats.print_stats()
finally:
- profiler.dump_stats('/tmp/mitogen.%d.%s.pstat' % (os.getpid(), name))
- profiler.create_stats()
- fp = open('/tmp/mitogen.stats.%d.%s.log' % (os.getpid(), name), 'w')
- try:
- stats = pstats.Stats(profiler, stream=fp)
- stats.sort_stats('cumulative')
- stats.print_stats()
- finally:
- fp.close()
+ fp.close()
+
+
+def enable_profiling(econtext=None):
+ global _profile_hook
+ _profile_hook = _real_profile_hook
def import_module(modname):
@@ -487,38 +587,109 @@ def import_module(modname):
return __import__(modname, None, None, [''])
+class Py24Pickler(py_pickle.Pickler):
+ """
+ Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle
+ offers little control over how a classic instance is pickled. Therefore 2.4
+ uses a pure-Python pickler, so CallError can be made to look as it does on
+ newer Pythons.
+
+ This mess will go away once proper serialization exists.
+ """
+ @classmethod
+ def dumps(cls, obj, protocol):
+ bio = BytesIO()
+ self = cls(bio, protocol=protocol)
+ self.dump(obj)
+ return bio.getvalue()
+
+ def save_exc_inst(self, obj):
+ if isinstance(obj, CallError):
+ func, args = obj.__reduce__()
+ self.save(func)
+ self.save(args)
+ self.write(py_pickle.REDUCE)
+ else:
+ py_pickle.Pickler.save_inst(self, obj)
+
+ if PY24:
+ dispatch = py_pickle.Pickler.dispatch.copy()
+ dispatch[py_pickle.InstanceType] = save_exc_inst
+
+
if PY3:
# In 3.x Unpickler is a class exposing find_class as an overridable, but it
# cannot be overridden without subclassing.
class _Unpickler(pickle.Unpickler):
def find_class(self, module, func):
return self.find_global(module, func)
+ pickle__dumps = pickle.dumps
+elif PY24:
+ # On Python 2.4, we must use a pure-Python pickler.
+ pickle__dumps = Py24Pickler.dumps
+ _Unpickler = pickle.Unpickler
else:
+ pickle__dumps = pickle.dumps
# In 2.x Unpickler is a function exposing a writeable find_global
# attribute.
_Unpickler = pickle.Unpickler
class Message(object):
+ """
+ Messages are the fundamental unit of communication, comprising fields from
+ the :ref:`stream-protocol` header, an optional reference to the receiving
+ :class:`mitogen.core.Router` for ingress messages, and helper methods for
+ deserialization and generating replies.
+ """
+ #: Integer target context ID. :class:`Router` delivers messages locally
+ #: when their :attr:`dst_id` matches :data:`mitogen.context_id`, otherwise
+ #: they are routed up or downstream.
dst_id = None
+
+ #: Integer source context ID. Used as the target of replies if any are
+ #: generated.
src_id = None
+
+ #: Context ID under whose authority the message is acting. See
+ #: :ref:`source-verification`.
auth_id = None
+
+ #: Integer target handle in the destination context. This is one of the
+ #: :ref:`standard-handles`, or a dynamically generated handle used to
+ #: receive a one-time reply, such as the return value of a function call.
handle = None
+
+ #: Integer target handle to direct any reply to this message. Used to
+ #: receive a one-time reply, such as the return value of a function call.
+ #: :data:`IS_DEAD` has a special meaning when it appears in this field.
reply_to = None
+
+ #: Raw message data bytes.
data = b('')
+
_unpickled = object()
+ #: The :class:`Router` responsible for routing the message. This is
+ #: :data:`None` for locally originated messages.
router = None
+
+ #: The :class:`Receiver` over which the message was last received. Part of
+ #: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`.
receiver = None
def __init__(self, **kwargs):
+ """
+ Construct a message from from the supplied `kwargs`. :attr:`src_id` and
+ :attr:`auth_id` are always set to :data:`mitogen.context_id`.
+ """
self.src_id = mitogen.context_id
self.auth_id = mitogen.context_id
vars(self).update(kwargs)
assert isinstance(self.data, BytesType)
def _unpickle_context(self, context_id, name):
- return _unpickle_context(self.router, context_id, name)
+ return _unpickle_context(context_id, name, router=self.router)
def _unpickle_sender(self, context_id, dst_handle):
return _unpickle_sender(self.router, context_id, dst_handle)
@@ -531,7 +702,7 @@ class Message(object):
"""Return the class implementing `module_name.class_name` or raise
`StreamError` if the module is not whitelisted."""
if module == __name__:
- if func == '_unpickle_call_error':
+ if func == '_unpickle_call_error' or func == 'CallError':
return _unpickle_call_error
elif func == '_unpickle_sender':
return self._unpickle_sender
@@ -551,23 +722,52 @@ class Message(object):
@property
def is_dead(self):
+ """
+ :data:`True` if :attr:`reply_to` is set to the magic value
+ :data:`IS_DEAD`, indicating the sender considers the channel dead. Dead
+ messages can be raised in a variety of circumstances, see
+ :data:`IS_DEAD` for more information.
+ """
return self.reply_to == IS_DEAD
@classmethod
- def dead(cls, **kwargs):
+ def dead(cls, reason=None, **kwargs):
+ """
+ Syntax helper to construct a dead message.
+ """
+ kwargs['data'] = (reason or u'').encode()
return cls(reply_to=IS_DEAD, **kwargs)
@classmethod
def pickled(cls, obj, **kwargs):
+ """
+ Construct a pickled message, setting :attr:`data` to the serialization
+ of `obj`, and setting remaining fields using `kwargs`.
+
+ :returns:
+ The new message.
+ """
self = cls(**kwargs)
try:
- self.data = pickle.dumps(obj, protocol=2)
+ self.data = pickle__dumps(obj, protocol=2)
except pickle.PicklingError:
e = sys.exc_info()[1]
- self.data = pickle.dumps(CallError(e), protocol=2)
+ self.data = pickle__dumps(CallError(e), protocol=2)
return self
def reply(self, msg, router=None, **kwargs):
+ """
+ Compose a reply to this message and send it using :attr:`router`, or
+ `router` is :attr:`router` is :data:`None`.
+
+ :param obj:
+ Either a :class:`Message`, or an object to be serialized in order
+ to construct a new message.
+ :param router:
+ Optional router to use if :attr:`router` is :data:`None`.
+ :param kwargs:
+ Optional keyword parameters overriding message fields in the reply.
+ """
if not isinstance(msg, Message):
msg = Message.pickled(msg)
msg.dst_id = self.src_id
@@ -583,11 +783,30 @@ class Message(object):
else:
UNPICKLER_KWARGS = {}
+ def _throw_dead(self):
+ if len(self.data):
+ raise ChannelError(self.data.decode('utf-8', 'replace'))
+ elif self.src_id == mitogen.context_id:
+ raise ChannelError(ChannelError.local_msg)
+ else:
+ raise ChannelError(ChannelError.remote_msg)
+
def unpickle(self, throw=True, throw_dead=True):
- """Deserialize `data` into an object."""
+ """
+ Unpickle :attr:`data`, optionally raising any exceptions present.
+
+ :param bool throw_dead:
+ If :data:`True`, raise exceptions, otherwise it is the caller's
+ responsibility.
+
+ :raises CallError:
+ The serialized data contained CallError exception.
+ :raises ChannelError:
+ The `is_dead` field was set.
+ """
_vv and IOLOG.debug('%r.unpickle()', self)
if throw_dead and self.is_dead:
- raise ChannelError(ChannelError.remote_msg)
+ self._throw_dead()
obj = self._unpickled
if obj is Message._unpickled:
@@ -616,25 +835,49 @@ class Message(object):
class Sender(object):
+ """
+ Senders are used to send pickled messages to a handle in another context,
+ it is the inverse of :class:`mitogen.core.Receiver`.
+
+ Senders may be serialized, making them convenient to wire up data flows.
+ See :meth:`mitogen.core.Receiver.to_sender` for more information.
+
+ :param Context context:
+ Context to send messages to.
+ :param int dst_handle:
+ Destination handle to send messages to.
+ """
def __init__(self, context, dst_handle):
self.context = context
self.dst_handle = dst_handle
- def __repr__(self):
- return 'Sender(%r, %r)' % (self.context, self.dst_handle)
+ def send(self, data):
+ """
+ Send `data` to the remote end.
+ """
+ _vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100])
+ self.context.send(Message.pickled(data, handle=self.dst_handle))
- def __reduce__(self):
- return _unpickle_sender, (self.context.context_id, self.dst_handle)
+ explicit_close_msg = 'Sender was explicitly closed'
def close(self):
- """Indicate this channel is closed to the remote side."""
+ """
+ Send a dead message to the remote, causing :meth:`ChannelError` to be
+ raised in any waiting thread.
+ """
_vv and IOLOG.debug('%r.close()', self)
- self.context.send(Message.dead(handle=self.dst_handle))
+ self.context.send(
+ Message.dead(
+ reason=self.explicit_close_msg,
+ handle=self.dst_handle
+ )
+ )
- def send(self, data):
- """Send `data` to the remote."""
- _vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100])
- self.context.send(Message.pickled(data, handle=self.dst_handle))
+ def __repr__(self):
+ return 'Sender(%r, %r)' % (self.context, self.dst_handle)
+
+ def __reduce__(self):
+ return _unpickle_sender, (self.context.context_id, self.dst_handle)
def _unpickle_sender(router, context_id, dst_handle):
@@ -646,12 +889,41 @@ def _unpickle_sender(router, context_id, dst_handle):
class Receiver(object):
+ """
+ Receivers maintain a thread-safe queue of messages sent to a handle of this
+ context from another context.
+
+ :param mitogen.core.Router router:
+ Router to register the handler on.
+
+ :param int handle:
+ If not :data:`None`, an explicit handle to register, otherwise an
+ unused handle is chosen.
+
+ :param bool persist:
+ If :data:`False`, unregister the handler after one message is received.
+ Single-message receivers are intended for RPC-like transactions, such
+ as in the case of :meth:`mitogen.parent.Context.call_async`.
+
+ :param mitogen.core.Context respondent:
+ Context this receiver is receiving from. If not :data:`None`, arranges
+ for the receiver to receive a dead message if messages can no longer be
+ routed to the context due to disconnection, and ignores messages that
+ did not originate from the respondent context.
+ """
+ #: If not :data:`None`, a reference to a function invoked as
+ #: `notify(receiver)` when a new message is delivered to this receiver. The
+ #: function is invoked on the broker thread, therefore it must not block.
+ #: Used by :class:`mitogen.select.Select` to implement waiting on multiple
+ #: receivers.
notify = None
+
raise_channelerror = True
def __init__(self, router, handle=None, persist=True,
- respondent=None, policy=None):
+ respondent=None, policy=None, overwrite=False):
self.router = router
+ #: The handle.
self.handle = handle # Avoid __repr__ crash in add_handler()
self._latch = Latch() # Must exist prior to .add_handler()
self.handle = router.add_handler(
@@ -660,54 +932,137 @@ class Receiver(object):
policy=policy,
persist=persist,
respondent=respondent,
+ overwrite=overwrite,
)
def __repr__(self):
return 'Receiver(%r, %r)' % (self.router, self.handle)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, _1, _2, _3):
+ self.close()
+
def to_sender(self):
- context = Context(self.router, mitogen.context_id)
- return Sender(context, self.handle)
+ """
+ Return a :class:`Sender` configured to deliver messages to this
+ receiver. As senders are serializable, this makes it convenient to pass
+ `(context_id, handle)` pairs around::
+
+ def deliver_monthly_report(sender):
+ for line in open('monthly_report.txt'):
+ sender.send(line)
+ sender.close()
+
+ @mitogen.main()
+ def main(router):
+ remote = router.ssh(hostname='mainframe')
+ recv = mitogen.core.Receiver(router)
+ remote.call(deliver_monthly_report, recv.to_sender())
+ for msg in recv:
+ print(msg)
+ """
+ return Sender(self.router.myself(), self.handle)
def _on_receive(self, msg):
- """Callback from the Stream; appends data to the internal queue."""
+ """
+ Callback registered for the handle with :class:`Router`; appends data
+ to the internal queue.
+ """
_vv and IOLOG.debug('%r._on_receive(%r)', self, msg)
self._latch.put(msg)
if self.notify:
self.notify(self)
+ closed_msg = 'the Receiver has been closed'
+
def close(self):
+ """
+ Unregister the receiver's handle from its associated router, and cause
+ :class:`ChannelError` to be raised in any thread waiting in :meth:`get`
+ on this receiver.
+ """
if self.handle:
self.router.del_handler(self.handle)
self.handle = None
- self._latch.put(Message.dead())
+ self._latch.close()
def empty(self):
+ """
+ Return :data:`True` if calling :meth:`get` would block.
+
+ As with :class:`Queue.Queue`, :data:`True` may be returned even though
+ a subsequent call to :meth:`get` will succeed, since a message may be
+ posted at any moment between :meth:`empty` and :meth:`get`.
+ """
return self._latch.empty()
def get(self, timeout=None, block=True, throw_dead=True):
+ """
+ Sleep waiting for a message to arrive on this receiver.
+
+ :param float timeout:
+ If not :data:`None`, specifies a timeout in seconds.
+
+ :raises mitogen.core.ChannelError:
+ The remote end indicated the channel should be closed,
+ communication with it was lost, or :meth:`close` was called in the
+ local process.
+
+ :raises mitogen.core.TimeoutError:
+ Timeout was reached.
+
+ :returns:
+ :class:`Message` that was received.
+ """
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block)
- msg = self._latch.get(timeout=timeout, block=block)
+ try:
+ msg = self._latch.get(timeout=timeout, block=block)
+ except LatchError:
+ raise ChannelError(self.closed_msg)
if msg.is_dead and throw_dead:
- if msg.src_id == mitogen.context_id:
- raise ChannelError(ChannelError.local_msg)
- else:
- raise ChannelError(ChannelError.remote_msg)
+ msg._throw_dead()
return msg
def __iter__(self):
+ """
+ Yield consecutive :class:`Message` instances delivered to this receiver
+ until :class:`ChannelError` is raised.
+ """
while True:
- msg = self.get(throw_dead=False)
- if msg.is_dead:
+ try:
+ msg = self.get()
+ except ChannelError:
return
yield msg
class Channel(Sender, Receiver):
+ """
+ A channel inherits from :class:`mitogen.core.Sender` and
+ `mitogen.core.Receiver` to provide bidirectional functionality.
+
+ This class is incomplete and obsolete, it will be removed in Mitogen 0.3.
+ Channels were an early attempt at syntax sugar. It is always easier to pass
+ around unidirectional pairs of senders/receivers, even though the syntax is
+ baroque:
+
+ .. literalinclude:: ../examples/ping_pong.py
+
+ Since all handles aren't known until after both ends are constructed, for
+ both ends to communicate through a channel, it is necessary for one end to
+ retrieve the handle allocated to the other and reconfigure its own channel
+ to match. Currently this is a manual task.
+ """
def __init__(self, router, context, dst_handle, handle=None):
Sender.__init__(self, context, dst_handle)
Receiver.__init__(self, router, handle)
+ def close(self):
+ Receiver.close(self)
+ Sender.close(self)
+
def __repr__(self):
return 'Channel(%s, %s)' % (
Sender.__repr__(self),
@@ -722,58 +1077,83 @@ class Importer(object):
:param context: Context to communicate via.
"""
+ # The Mitogen package is handled specially, since the child context must
+ # construct it manually during startup.
+ MITOGEN_PKG_CONTENT = [
+ 'compat',
+ 'debug',
+ 'doas',
+ 'docker',
+ 'kubectl',
+ 'fakessh',
+ 'fork',
+ 'jail',
+ 'lxc',
+ 'lxd',
+ 'master',
+ 'minify',
+ 'parent',
+ 'select',
+ 'service',
+ 'setns',
+ 'ssh',
+ 'su',
+ 'sudo',
+ 'utils',
+ ]
+
+ ALWAYS_BLACKLIST = [
+ # 2.x generates needless imports for 'builtins', while 3.x does the
+ # same for '__builtin__'. The correct one is built-in, the other always
+ # a negative round-trip.
+ 'builtins',
+ '__builtin__',
+ 'thread',
+
+ # org.python.core imported by copy, pickle, xml.sax; breaks Jython, but
+ # very unlikely to trigger a bug report.
+ 'org',
+ ]
+
+ if PY3:
+ ALWAYS_BLACKLIST += ['cStringIO']
+
def __init__(self, router, context, core_src, whitelist=(), blacklist=()):
self._context = context
- self._present = {'mitogen': [
- 'compat',
- 'debug',
- 'doas',
- 'docker',
- 'kubectl',
- 'fakessh',
- 'fork',
- 'jail',
- 'lxc',
- 'lxd',
- 'master',
- 'minify',
- 'parent',
- 'select',
- 'service',
- 'setns',
- 'ssh',
- 'su',
- 'sudo',
- 'utils',
- ]}
+ self._present = {'mitogen': self.MITOGEN_PKG_CONTENT}
self._lock = threading.Lock()
self.whitelist = list(whitelist) or ['']
- self.blacklist = list(blacklist) + [
- # 2.x generates needless imports for 'builtins', while 3.x does the
- # same for '__builtin__'. The correct one is built-in, the other
- # always a negative round-trip.
- 'builtins',
- '__builtin__',
- # org.python.core imported by copy, pickle, xml.sax; breaks Jython,
- # but very unlikely to trigger a bug report.
- 'org',
- ]
- if PY3:
- self.blacklist += ['cStringIO']
+ self.blacklist = list(blacklist) + self.ALWAYS_BLACKLIST
# Presence of an entry in this map indicates in-flight GET_MODULE.
self._callbacks = {}
self._cache = {}
if core_src:
+ self._update_linecache('x/mitogen/core.py', core_src)
self._cache['mitogen.core'] = (
'mitogen.core',
None,
- 'mitogen/core.py',
+ 'x/mitogen/core.py',
zlib.compress(core_src, 9),
[],
)
self._install_handler(router)
+ def _update_linecache(self, path, data):
+ """
+ The Python 2.4 linecache module, used to fetch source code for
+ tracebacks and :func:`inspect.getsource`, does not support PEP-302,
+ meaning it needs extra help to for Mitogen-loaded modules. Directly
+ populate its cache if a loaded module belongs to the Mitogen package.
+ """
+ if PY24 and 'mitogen' in path:
+ linecache.cache[path] = (
+ len(data),
+ 0.0,
+ [line+'\n' for line in data.splitlines()],
+ path,
+ )
+
def _install_handler(self, router):
router.add_handler(
fn=self._on_load_module,
@@ -789,9 +1169,9 @@ class Importer(object):
# built-in module. That means it exists on a special linked list deep
# within the bowels of the interpreter. We must special case it.
if fullname == '__main__':
- raise ImportError()
+ raise ModuleNotFoundError()
- parent, _, modname = fullname.rpartition('.')
+ parent, _, modname = str_rpartition(fullname, '.')
if parent:
path = sys.modules[parent].__path__
else:
@@ -808,7 +1188,8 @@ class Importer(object):
_tls.running = True
try:
_v and LOG.debug('%r.find_module(%r)', self, fullname)
- pkgname, dot, _ = fullname.rpartition('.')
+ fullname = to_text(fullname)
+ pkgname, dot, _ = str_rpartition(fullname, '.')
pkg = sys.modules.get(pkgname)
if pkgname and getattr(pkg, '__loader__', None) is not self:
LOG.debug('%r: %r is submodule of a package we did not load',
@@ -836,9 +1217,25 @@ class Importer(object):
finally:
del _tls.running
+ blacklisted_msg = (
+ '%r is present in the Mitogen importer blacklist, therefore this '
+ 'context will not attempt to request it from the master, as the '
+ 'request will always be refused.'
+ )
+ pkg_resources_msg = (
+ 'pkg_resources is prohibited from importing __main__, as it causes '
+ 'problems in applications whose main module is not designed to be '
+ 're-imported by children.'
+ )
+ absent_msg = (
+ 'The Mitogen master process was unable to serve %r. It may be a '
+ 'native Python extension, or it may be missing entirely. Check the '
+ 'importer debug logs on the master for more information.'
+ )
+
def _refuse_imports(self, fullname):
if is_blacklisted_import(self, fullname):
- raise ImportError('Refused: ' + fullname)
+ raise ModuleNotFoundError(self.blacklisted_msg % (fullname,))
f = sys._getframe(2)
requestee = f.f_globals['__name__']
@@ -850,7 +1247,7 @@ class Importer(object):
# breaks any app that is not expecting its __main__ to suddenly be
# sucked over a network and injected into a remote process, like
# py.test.
- raise ImportError('Refused')
+ raise ModuleNotFoundError(self.pkg_resources_msg)
if fullname == 'pbr':
# It claims to use pkg_resources to read version information, which
@@ -871,6 +1268,11 @@ class Importer(object):
self._lock.acquire()
try:
self._cache[fullname] = tup
+ if tup[2] is not None and PY24:
+ self._update_linecache(
+ path='master:' + tup[2],
+ data=zlib.decompress(tup[3])
+ )
callbacks = self._callbacks.pop(fullname, [])
finally:
self._lock.release()
@@ -910,7 +1312,7 @@ class Importer(object):
ret = self._cache[fullname]
if ret[2] is None:
- raise ImportError('Master does not have %r' % (fullname,))
+ raise ModuleNotFoundError(self.absent_msg % (fullname,))
pkg_present = ret[1]
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
@@ -921,14 +1323,19 @@ class Importer(object):
mod.__package__ = fullname
self._present[fullname] = pkg_present
else:
- mod.__package__ = fullname.rpartition('.')[0] or None
+ mod.__package__ = str_rpartition(fullname, '.')[0] or None
if mod.__package__ and not PY3:
# 2.x requires __package__ to be exactly a string.
mod.__package__ = mod.__package__.encode()
source = self.get_source(fullname)
- code = compile(source, mod.__file__, 'exec', 0, 1)
+ try:
+ code = compile(source, mod.__file__, 'exec', 0, 1)
+ except SyntaxError:
+ LOG.exception('while importing %r', fullname)
+ raise
+
if PY3:
exec(code, vars(mod))
else:
@@ -943,14 +1350,14 @@ class Importer(object):
# reveals the module can't be loaded, and so load_module()
# throws ImportError, on Python 3.x it is still possible for
# the loader to be called to fetch metadata.
- raise ImportError('master cannot serve %r' % (fullname,))
+ raise ModuleNotFoundError(self.absent_msg % (fullname,))
return u'master:' + self._cache[fullname][2]
def get_source(self, fullname):
if fullname in self._cache:
compressed = self._cache[fullname][3]
if compressed is None:
- raise ImportError('master cannot serve %r' % (fullname,))
+ raise ModuleNotFoundError(self.absent_msg % (fullname,))
source = zlib.decompress(self._cache[fullname][3])
if PY3:
@@ -966,6 +1373,11 @@ class LogHandler(logging.Handler):
self._buffer = []
def uncork(self):
+ """
+ #305: during startup :class:`LogHandler` may be installed before it is
+ possible to route messages, therefore messages are buffered until
+ :meth:`uncork` is called by :class:`ExternalContext`.
+ """
self._send = self.context.send
for msg in self._buffer:
self._send(msg)
@@ -992,12 +1404,35 @@ class LogHandler(logging.Handler):
class Side(object):
+ """
+ Represent a single side of a :class:`BasicStream`. This exists to allow
+ streams implemented using unidirectional (e.g. UNIX pipe) and bidirectional
+ (e.g. UNIX socket) file descriptors to operate identically.
+
+ :param mitogen.core.Stream stream:
+ The stream this side is associated with.
+
+ :param int fd:
+ Underlying file descriptor.
+
+ :param bool keep_alive:
+ Value for :attr:`keep_alive`
+
+ During construction, the file descriptor has its :data:`os.O_NONBLOCK` flag
+ enabled using :func:`fcntl.fcntl`.
+ """
_fork_refs = weakref.WeakValueDictionary()
def __init__(self, stream, fd, cloexec=True, keep_alive=True, blocking=False):
+ #: The :class:`Stream` for which this is a read or write side.
self.stream = stream
+ #: Integer file descriptor to perform IO on, or :data:`None` if
+ #: :meth:`close` has been called.
self.fd = fd
self.closed = False
+ #: If :data:`True`, causes presence of this side in
+ #: :class:`Broker`'s active reader set to defer shutdown until the
+ #: side is disconnected.
self.keep_alive = keep_alive
self._fork_refs[id(self)] = self
if cloexec:
@@ -1010,26 +1445,55 @@ class Side(object):
@classmethod
def _on_fork(cls):
- for side in list(cls._fork_refs.values()):
+ while cls._fork_refs:
+ _, side = cls._fork_refs.popitem()
+ _vv and IOLOG.debug('Side._on_fork() closing %r', side)
side.close()
def close(self):
+ """
+ Call :func:`os.close` on :attr:`fd` if it is not :data:`None`,
+ then set it to :data:`None`.
+ """
if not self.closed:
_vv and IOLOG.debug('%r.close()', self)
self.closed = True
os.close(self.fd)
def read(self, n=CHUNK_SIZE):
+ """
+ Read up to `n` bytes from the file descriptor, wrapping the underlying
+ :func:`os.read` call with :func:`io_op` to trap common disconnection
+ conditions.
+
+ :meth:`read` always behaves as if it is reading from a regular UNIX
+ file; socket, pipe, and TTY disconnection errors are masked and result
+ in a 0-sized read like a regular file.
+
+ :returns:
+ Bytes read, or the empty to string to indicate disconnection was
+ detected.
+ """
if self.closed:
# Refuse to touch the handle after closed, it may have been reused
# by another thread. TODO: synchronize read()/write()/close().
return b('')
s, disconnected = io_op(os.read, self.fd, n)
if disconnected:
+ LOG.debug('%r.read(): disconnected: %s', self, disconnected)
return b('')
return s
def write(self, s):
+ """
+ Write as much of the bytes from `s` as possible to the file descriptor,
+ wrapping the underlying :func:`os.write` call with :func:`io_op` to
+ trap common disconnection conditions.
+
+ :returns:
+ Number of bytes written, or :data:`None` if disconnection was
+ detected.
+ """
if self.closed or self.fd is None:
# Refuse to touch the handle after closed, it may have been reused
# by another thread.
@@ -1037,15 +1501,58 @@ class Side(object):
written, disconnected = io_op(os.write, self.fd, s)
if disconnected:
+ LOG.debug('%r.write(): disconnected: %s', self, disconnected)
return None
return written
class BasicStream(object):
+ #: A :class:`Side` representing the stream's receive file descriptor.
receive_side = None
+
+ #: A :class:`Side` representing the stream's transmit file descriptor.
transmit_side = None
+ def on_receive(self, broker):
+ """
+ Called by :class:`Broker` when the stream's :attr:`receive_side` has
+ been marked readable using :meth:`Broker.start_receive` and the broker
+ has detected the associated file descriptor is ready for reading.
+
+ Subclasses must implement this if :meth:`Broker.start_receive` is ever
+ called on them, and the method must call :meth:`on_disconect` if
+ reading produces an empty string.
+ """
+ pass
+
+ def on_transmit(self, broker):
+ """
+ Called by :class:`Broker` when the stream's :attr:`transmit_side`
+ has been marked writeable using :meth:`Broker._start_transmit` and
+ the broker has detected the associated file descriptor is ready for
+ writing.
+
+ Subclasses must implement this if :meth:`Broker._start_transmit` is
+ ever called on them.
+ """
+ pass
+
+ def on_shutdown(self, broker):
+ """
+ Called by :meth:`Broker.shutdown` to allow the stream time to
+ gracefully shutdown. The base implementation simply called
+ :meth:`on_disconnect`.
+ """
+ _v and LOG.debug('%r.on_shutdown()', self)
+ fire(self, 'shutdown')
+ self.on_disconnect(broker)
+
def on_disconnect(self, broker):
+ """
+ Called by :class:`Broker` to force disconnect the stream. The base
+ implementation simply closes :attr:`receive_side` and
+ :attr:`transmit_side` and unregisters the stream from the broker.
+ """
LOG.debug('%r.on_disconnect()', self)
if self.receive_side:
broker.stop_receive(self)
@@ -1055,19 +1562,14 @@ class BasicStream(object):
self.transmit_side.close()
fire(self, 'disconnect')
- def on_shutdown(self, broker):
- _v and LOG.debug('%r.on_shutdown()', self)
- fire(self, 'shutdown')
- self.on_disconnect(broker)
-
class Stream(BasicStream):
"""
- :py:class:`BasicStream` subclass implementing mitogen's :ref:`stream
+ :class:`BasicStream` subclass implementing mitogen's :ref:`stream
protocol `.
"""
- #: If not :data:`None`, :py:class:`Router` stamps this into
- #: :py:attr:`Message.auth_id` of every message received on this stream.
+ #: If not :data:`None`, :class:`Router` stamps this into
+ #: :attr:`Message.auth_id` of every message received on this stream.
auth_id = None
#: If not :data:`False`, indicates the stream has :attr:`auth_id` set and
@@ -1085,6 +1587,9 @@ class Stream(BasicStream):
self._output_buf = collections.deque()
self._input_buf_len = 0
self._output_buf_len = 0
+ #: Routing records the dst_id of every message arriving from this
+ #: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID.
+ self.egress_ids = set()
def construct(self):
pass
@@ -1101,7 +1606,7 @@ class Stream(BasicStream):
def on_receive(self, broker):
"""Handle the next complete message on the stream. Raise
- :py:class:`StreamError` on failure."""
+ :class:`StreamError` on failure."""
_vv and IOLOG.debug('%r.on_receive()', self)
buf = self.receive_side.read()
@@ -1110,8 +1615,16 @@ class Stream(BasicStream):
self._internal_receive(broker, buf)
- HEADER_FMT = '>LLLLLL'
+ HEADER_FMT = '>hLLLLLL'
HEADER_LEN = struct.calcsize(HEADER_FMT)
+ HEADER_MAGIC = 0x4d49 # 'MI'
+
+ corrupt_msg = (
+ 'Corruption detected: frame signature incorrect. This likely means '
+ 'some external process is interfering with the connection. Received:'
+ '\n\n'
+ '%r'
+ )
def _receive_one(self, broker):
if self._input_buf_len < self.HEADER_LEN:
@@ -1119,12 +1632,17 @@ class Stream(BasicStream):
msg = Message()
msg.router = self._router
- (msg.dst_id, msg.src_id, msg.auth_id,
+ (magic, msg.dst_id, msg.src_id, msg.auth_id,
msg.handle, msg.reply_to, msg_len) = struct.unpack(
self.HEADER_FMT,
self._input_buf[0][:self.HEADER_LEN],
)
+ if magic != self.HEADER_MAGIC:
+ LOG.error(self.corrupt_msg, self._input_buf[0][:2048])
+ self.on_disconnect(broker)
+ return False
+
if msg_len > self._router.max_message_size:
LOG.error('Maximum message size exceeded (got %d, max %d)',
msg_len, self._router.max_message_size)
@@ -1158,6 +1676,14 @@ class Stream(BasicStream):
return True
def pending_bytes(self):
+ """
+ Return the number of bytes queued for transmission on this stream. This
+ can be used to limit the amount of data buffered in RAM by an otherwise
+ unlimited consumer.
+
+ For an accurate result, this method should be called from the Broker
+ thread, for example by using :meth:`Broker.defer_sync`.
+ """
return self._output_buf_len
def on_transmit(self, broker):
@@ -1182,10 +1708,23 @@ class Stream(BasicStream):
def _send(self, msg):
_vv and IOLOG.debug('%r._send(%r)', self, msg)
- pkt = struct.pack(self.HEADER_FMT, msg.dst_id, msg.src_id,
- msg.auth_id, msg.handle, msg.reply_to or 0,
- len(msg.data)) + msg.data
+ pkt = struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, msg.dst_id,
+ msg.src_id, msg.auth_id, msg.handle,
+ msg.reply_to or 0, len(msg.data)) + msg.data
+
if not self._output_buf_len:
+ # Modifying epoll/Kqueue state is expensive, as are needless broker
+ # loops. Rather than wait for writeability, just write immediately,
+ # and fall back to the broker loop on error or full buffer.
+ try:
+ n = self.transmit_side.write(pkt)
+ if n:
+ if n == len(pkt):
+ return
+ pkt = pkt[n:]
+ except OSError:
+ pass
+
self._router.broker._start_transmit(self)
self._output_buf.append(pkt)
self._output_buf_len += len(pkt)
@@ -1206,10 +1745,32 @@ class Stream(BasicStream):
def __repr__(self):
cls = type(self)
- return '%s.%s(%r)' % (cls.__module__, cls.__name__, self.name)
+ return "%s.%s('%s')" % (cls.__module__, cls.__name__, self.name)
class Context(object):
+ """
+ Represent a remote context regardless of the underlying connection method.
+ Context objects are simple facades that emit messages through an
+ associated router, and have :ref:`signals` raised against them in response
+ to various events relating to the context.
+
+ **Note:** This is the somewhat limited core version, used by child
+ contexts. The master subclass is documented below this one.
+
+ Contexts maintain no internal state and are thread-safe.
+
+ Prefer :meth:`Router.context_by_id` over constructing context objects
+ explicitly, as that method is deduplicating, and returns the only context
+ instance :ref:`signals` will be raised on.
+
+ :param Router router:
+ Router to emit messages through.
+ :param int context_id:
+ Context ID.
+ :param str name:
+ Context name.
+ """
remote_name = None
def __init__(self, router, context_id, name=None):
@@ -1228,6 +1789,23 @@ class Context(object):
fire(self, 'disconnect')
def send_async(self, msg, persist=False):
+ """
+ Arrange for `msg` to be delivered to this context, with replies
+ directed to a newly constructed receiver. :attr:`dst_id
+ ` is set to the target context ID, and :attr:`reply_to
+ ` is set to the newly constructed receiver's handle.
+
+ :param bool persist:
+ If :data:`False`, the handler will be unregistered after a single
+ message has been received.
+
+ :param mitogen.core.Message msg:
+ The message.
+
+ :returns:
+ :class:`Receiver` configured to receive any replies sent to the
+ message's `reply_to` handle.
+ """
if self.router.broker._thread == threading.currentThread(): # TODO
raise SystemError('Cannot making blocking call on broker thread')
@@ -1251,8 +1829,13 @@ class Context(object):
return self.send_async(msg)
def send(self, msg):
- """send `obj` to `handle`, and tell the broker we have output. May
- be called from any thread."""
+ """
+ Arrange for `msg` to be delivered to this context. :attr:`dst_id
+ ` is set to the target context ID.
+
+ :param Message msg:
+ Message.
+ """
msg.dst_id = self.context_id
self.router.route(msg)
@@ -1261,7 +1844,19 @@ class Context(object):
return recv.get().unpickle()
def send_await(self, msg, deadline=None):
- """Send `msg` and wait for a response with an optional timeout."""
+ """
+ Like :meth:`send_async`, but expect a single reply (`persist=False`)
+ delivered within `deadline` seconds.
+
+ :param mitogen.core.Message msg:
+ The message.
+ :param float deadline:
+ If not :data:`None`, seconds before timing out waiting for a reply.
+ :returns:
+ Deserialized reply.
+ :raises TimeoutError:
+ No message was received and `deadline` passed.
+ """
receiver = self.send_async(msg)
response = receiver.get(deadline)
data = response.unpickle()
@@ -1272,75 +1867,180 @@ class Context(object):
return 'Context(%s, %r)' % (self.context_id, self.name)
-def _unpickle_context(router, context_id, name):
- if not (isinstance(router, Router) and
- isinstance(context_id, (int, long)) and context_id >= 0 and (
- (name is None) or
- (isinstance(name, UnicodeType) and len(name) < 100))
- ):
+def _unpickle_context(context_id, name, router=None):
+ if not (isinstance(context_id, (int, long)) and context_id >= 0 and (
+ (name is None) or
+ (isinstance(name, UnicodeType) and len(name) < 100))
+ ):
raise TypeError('cannot unpickle Context: bad input')
- return router.context_class(router, context_id, name)
+
+ if isinstance(router, Router):
+ return router.context_by_id(context_id, name=name)
+ return Context(None, context_id, name) # For plain Jane pickle.
class Poller(object):
+ """
+ A poller manages OS file descriptors the user is waiting to become
+ available for IO. The :meth:`poll` method blocks the calling thread
+ until one or more become ready. The default implementation is based on
+ :func:`select.poll`.
+
+ Each descriptor has an associated `data` element, which is unique for each
+ readiness type, and defaults to being the same as the file descriptor. The
+ :meth:`poll` method yields the data associated with a descriptor, rather
+ than the descriptor itself, allowing concise loops like::
+
+ p = Poller()
+ p.start_receive(conn.fd, data=conn.on_read)
+ p.start_transmit(conn.fd, data=conn.on_write)
+
+ for callback in p.poll():
+ callback() # invoke appropriate bound instance method
+
+ Pollers may be modified while :meth:`poll` is yielding results. Removals
+ are processed immediately, causing pending events for the descriptor to be
+ discarded.
+
+ The :meth:`close` method must be called when a poller is discarded to avoid
+ a resource leak.
+
+ Pollers may only be used by one thread at a time.
+ """
+ # This changed from select() to poll() in Mitogen 0.2.4. Since poll() has
+ # no upper FD limit, it is suitable for use with Latch, which must handle
+ # FDs larger than select's limit during many-host runs. We want this
+ # because poll() requires no setup and teardown: just a single system call,
+ # which is important because Latch.get() creates a Poller on each
+ # invocation. In a microbenchmark, poll() vs. epoll_ctl() is 30% faster in
+ # this scenario. If select() must return in future, it is important
+ # Latch.poller_class is set from parent.py to point to the industrial
+ # strength poller for the OS, otherwise Latch will fail randomly.
+
+ #: Increments on every poll(). Used to version _rfds and _wfds.
+ _generation = 1
+
def __init__(self):
self._rfds = {}
self._wfds = {}
+ self._pollobj = select.poll()
+
+ def __repr__(self):
+ return '%s(%#x)' % (type(self).__name__, id(self))
@property
def readers(self):
- return list(self._rfds.items())
+ """
+ Return a list of `(fd, data)` tuples for every FD registered for
+ receive readiness.
+ """
+ return list((fd, data) for fd, (data, gen) in self._rfds.items())
@property
def writers(self):
- return list(self._wfds.items())
-
- def __repr__(self):
- return '%s(%#x)' % (type(self).__name__, id(self))
+ """
+ Return a list of `(fd, data)` tuples for every FD registered for
+ transmit readiness.
+ """
+ return list((fd, data) for fd, (data, gen) in self._wfds.items())
def close(self):
+ """
+ Close any underlying OS resource used by the poller.
+ """
pass
+ _readmask = select.POLLIN | select.POLLHUP
+ # TODO: no proof we dont need writemask too
+
+ def _update(self, fd):
+ mask = (((fd in self._rfds) and self._readmask) |
+ ((fd in self._wfds) and select.POLLOUT))
+ if mask:
+ self._pollobj.register(fd, mask)
+ else:
+ try:
+ self._pollobj.unregister(fd)
+ except KeyError:
+ pass
+
def start_receive(self, fd, data=None):
- self._rfds[fd] = data or fd
+ """
+ Cause :meth:`poll` to yield `data` when `fd` is readable.
+ """
+ self._rfds[fd] = (data or fd, self._generation)
+ self._update(fd)
def stop_receive(self, fd):
+ """
+ Stop yielding readability events for `fd`.
+
+ Redundant calls to :meth:`stop_receive` are silently ignored, this may
+ change in future.
+ """
self._rfds.pop(fd, None)
+ self._update(fd)
def start_transmit(self, fd, data=None):
- self._wfds[fd] = data or fd
+ """
+ Cause :meth:`poll` to yield `data` when `fd` is writeable.
+ """
+ self._wfds[fd] = (data or fd, self._generation)
+ self._update(fd)
def stop_transmit(self, fd):
+ """
+ Stop yielding writeability events for `fd`.
+
+ Redundant calls to :meth:`stop_transmit` are silently ignored, this may
+ change in future.
+ """
self._wfds.pop(fd, None)
+ self._update(fd)
+
+ def _poll(self, timeout):
+ if timeout:
+ timeout *= 1000
+
+ events, _ = io_op(self._pollobj.poll, timeout)
+ for fd, event in events:
+ if event & self._readmask:
+ _vv and IOLOG.debug('%r: POLLIN|POLLHUP for %r', self, fd)
+ data, gen = self._rfds.get(fd, (None, None))
+ if gen and gen < self._generation:
+ yield data
+ if event & select.POLLOUT:
+ _vv and IOLOG.debug('%r: POLLOUT for %r', self, fd)
+ data, gen = self._wfds.get(fd, (None, None))
+ if gen and gen < self._generation:
+ yield data
def poll(self, timeout=None):
- _vv and IOLOG.debug('%r.poll(%r)', self, timeout)
- (rfds, wfds, _), _ = io_op(select.select,
- self._rfds,
- self._wfds,
- (), timeout
- )
-
- for fd in rfds:
- _vv and IOLOG.debug('%r: POLLIN for %r', self, fd)
- yield self._rfds[fd]
+ """
+ Block the calling thread until one or more FDs are ready for IO.
- for fd in wfds:
- _vv and IOLOG.debug('%r: POLLOUT for %r', self, fd)
- yield self._wfds[fd]
+ :param float timeout:
+ If not :data:`None`, seconds to wait without an event before
+ returning an empty iterable.
+ :returns:
+ Iterable of `data` elements associated with ready FDs.
+ """
+ _vv and IOLOG.debug('%r.poll(%r)', self, timeout)
+ self._generation += 1
+ return self._poll(timeout)
class Latch(object):
"""
- A latch is a :py:class:`Queue.Queue`-like object that supports mutation and
- waiting from multiple threads, however unlike :py:class:`Queue.Queue`,
+ A latch is a :class:`Queue.Queue`-like object that supports mutation and
+ waiting from multiple threads, however unlike :class:`Queue.Queue`,
waiting threads always remain interruptible, so CTRL+C always succeeds, and
waits where a timeout is set experience no wake up latency. These
properties are not possible in combination using the built-in threading
primitives available in Python 2.x.
Latches implement queues using the UNIX self-pipe trick, and a per-thread
- :py:func:`socket.socketpair` that is lazily created the first time any
+ :func:`socket.socketpair` that is lazily created the first time any
latch attempts to sleep on a thread, and dynamically associated with the
waiting Latch only for duration of the wait.
@@ -1351,14 +2051,14 @@ class Latch(object):
# The _cls_ prefixes here are to make it crystal clear in the code which
# state mutation isn't covered by :attr:`_lock`.
- #: List of reusable :func:`socket.socketpair` tuples. The list is from
- #: multiple threads, the only safe operations are `append()` and `pop()`.
+ #: List of reusable :func:`socket.socketpair` tuples. The list is mutated
+ #: from multiple threads, the only safe operations are `append()` and
+ #: `pop()`.
_cls_idle_socketpairs = []
#: List of every socket object that must be closed by :meth:`_on_fork`.
#: Inherited descriptors cannot be reused, as the duplicated handles
- #: reference the same underlying kernel-side sockets still in use by
- #: the parent process.
+ #: reference the same underlying kernel object in use by the parent.
_cls_all_sockets = []
def __init__(self):
@@ -1386,7 +2086,7 @@ class Latch(object):
def close(self):
"""
Mark the latch as closed, and cause every sleeping thread to be woken,
- with :py:class:`mitogen.core.LatchError` raised in each thread.
+ with :class:`mitogen.core.LatchError` raised in each thread.
"""
self._lock.acquire()
try:
@@ -1400,19 +2100,28 @@ class Latch(object):
def empty(self):
"""
- Return :py:data:`True` if calling :py:meth:`get` would block.
+ Return :data:`True` if calling :meth:`get` would block.
+
+ As with :class:`Queue.Queue`, :data:`True` may be returned even
+ though a subsequent call to :meth:`get` will succeed, since a
+ message may be posted at any moment between :meth:`empty` and
+ :meth:`get`.
- As with :py:class:`Queue.Queue`, :py:data:`True` may be returned even
- though a subsequent call to :py:meth:`get` will succeed, since a
- message may be posted at any moment between :py:meth:`empty` and
- :py:meth:`get`.
+ As with :class:`Queue.Queue`, :data:`False` may be returned even
+ though a subsequent call to :meth:`get` will block, since another
+ waiting thread may be woken at any moment between :meth:`empty` and
+ :meth:`get`.
- As with :py:class:`Queue.Queue`, :py:data:`False` may be returned even
- though a subsequent call to :py:meth:`get` will block, since another
- waiting thread may be woken at any moment between :py:meth:`empty` and
- :py:meth:`get`.
+ :raises LatchError:
+ The latch has already been marked closed.
"""
- return len(self._queue) == 0
+ self._lock.acquire()
+ try:
+ if self.closed:
+ raise LatchError()
+ return len(self._queue) == 0
+ finally:
+ self._lock.release()
def _get_socketpair(self):
"""
@@ -1427,30 +2136,32 @@ class Latch(object):
self._cls_all_sockets.extend((rsock, wsock))
return rsock, wsock
- COOKIE_SIZE = 33
+ COOKIE_MAGIC, = struct.unpack('L', b('LTCH') * (struct.calcsize('L')//4))
+ COOKIE_FMT = 'Llll'
+ COOKIE_SIZE = struct.calcsize(COOKIE_FMT)
def _make_cookie(self):
"""
- Return a 33-byte string encoding the ID of the instance and the current
- thread. This disambiguates legitimate wake-ups, accidental writes to
- the FD, and buggy internal FD sharing.
+ Return a string encoding the ID of the process, instance and thread.
+ This disambiguates legitimate wake-ups, accidental writes to the FD,
+ and buggy internal FD sharing.
"""
- ident = threading.currentThread().ident
- return b(u'%016x-%016x' % (int(id(self)), ident))
+ return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC,
+ os.getpid(), id(self), thread.get_ident())
def get(self, timeout=None, block=True):
"""
Return the next enqueued object, or sleep waiting for one.
:param float timeout:
- If not :py:data:`None`, specifies a timeout in seconds.
+ If not :data:`None`, specifies a timeout in seconds.
:param bool block:
- If :py:data:`False`, immediately raise
- :py:class:`mitogen.core.TimeoutError` if the latch is empty.
+ If :data:`False`, immediately raise
+ :class:`mitogen.core.TimeoutError` if the latch is empty.
:raises mitogen.core.LatchError:
- :py:meth:`close` has been called, and the object is no longer valid.
+ :meth:`close` has been called, and the object is no longer valid.
:raises mitogen.core.TimeoutError:
Timeout was reached.
@@ -1512,7 +2223,8 @@ class Latch(object):
assert cookie == got_cookie, (
"Cookie incorrect; got %r, expected %r" \
- % (got_cookie, cookie)
+ % (binascii.hexlify(got_cookie),
+ binascii.hexlify(cookie))
)
assert i < self._waking, (
"Cookie correct, but no queue element assigned."
@@ -1531,7 +2243,7 @@ class Latch(object):
exists.
:raises mitogen.core.LatchError:
- :py:meth:`close` has been called, and the object is no longer valid.
+ :meth:`close` has been called, and the object is no longer valid.
"""
_vv and IOLOG.debug('%r.put(%r)', self, obj)
self._lock.acquire()
@@ -1550,24 +2262,20 @@ class Latch(object):
self._lock.release()
def _wake(self, wsock, cookie):
- try:
- os.write(wsock.fileno(), cookie)
- except OSError:
- e = sys.exc_info()[1]
- if e.args[0] != errno.EBADF:
- raise
+ written, disconnected = io_op(os.write, wsock.fileno(), cookie)
+ assert written == len(cookie) and not disconnected
def __repr__(self):
return 'Latch(%#x, size=%d, t=%r)' % (
id(self),
len(self._queue),
- threading.currentThread().name,
+ threading.currentThread().getName(),
)
class Waker(BasicStream):
"""
- :py:class:`BasicStream` subclass implementing the `UNIX self-pipe trick`_.
+ :class:`BasicStream` subclass implementing the `UNIX self-pipe trick`_.
Used to wake the multiplexer when another thread needs to modify its state
(via a cross-thread function call).
@@ -1587,8 +2295,8 @@ class Waker(BasicStream):
def __repr__(self):
return 'Waker(%r rfd=%r, wfd=%r)' % (
self._broker,
- self.receive_side.fd,
- self.transmit_side.fd,
+ self.receive_side and self.receive_side.fd,
+ self.transmit_side and self.transmit_side.fd,
)
@property
@@ -1604,17 +2312,14 @@ class Waker(BasicStream):
def on_receive(self, broker):
"""
- Drain the pipe and fire callbacks. Reading multiple bytes is safe since
- new bytes corresponding to future .defer() calls are written only after
- .defer() takes _lock: either a byte we read corresponds to something
- already on the queue by the time we take _lock, or a byte remains
- buffered, causing another wake up, because it was written after we
- released _lock.
+ Drain the pipe and fire callbacks. Since :attr:`_deferred` is
+ synchronized, :meth:`defer` and :meth:`on_receive` can conspire to
+ ensure only one byte needs to be pending regardless of queue length.
"""
_vv and IOLOG.debug('%r.on_receive()', self)
- self.receive_side.read(128)
self._lock.acquire()
try:
+ self.receive_side.read(1)
deferred = self._deferred
self._deferred = []
finally:
@@ -1628,44 +2333,69 @@ class Waker(BasicStream):
func, args, kwargs)
self._broker.shutdown()
+ def _wake(self):
+ """
+ Wake the multiplexer by writing a byte. If Broker is midway through
+ teardown, the FD may already be closed, so ignore EBADF.
+ """
+ try:
+ self.transmit_side.write(b(' '))
+ except OSError:
+ e = sys.exc_info()[1]
+ if e.args[0] != errno.EBADF:
+ raise
+
+ broker_shutdown_msg = (
+ "An attempt was made to enqueue a message with a Broker that has "
+ "already exitted. It is likely your program called Broker.shutdown() "
+ "too early."
+ )
+
def defer(self, func, *args, **kwargs):
- if threading.currentThread().ident == self.broker_ident:
+ """
+ Arrange for `func()` to execute on the broker thread. This function
+ returns immediately without waiting the result of `func()`. Use
+ :meth:`defer_sync` to block until a result is available.
+
+ :raises mitogen.core.Error:
+ :meth:`defer` was called after :class:`Broker` has begun shutdown.
+ """
+ if thread.get_ident() == self.broker_ident:
_vv and IOLOG.debug('%r.defer() [immediate]', self)
return func(*args, **kwargs)
+ if self._broker._exitted:
+ raise Error(self.broker_shutdown_msg)
_vv and IOLOG.debug('%r.defer() [fd=%r]', self, self.transmit_side.fd)
self._lock.acquire()
try:
+ if not self._deferred:
+ self._wake()
self._deferred.append((func, args, kwargs))
finally:
self._lock.release()
- # Wake the multiplexer by writing a byte. If the broker is in the midst
- # of tearing itself down, the waker fd may already have been closed, so
- # ignore EBADF here.
- try:
- self.transmit_side.write(b(' '))
- except OSError:
- e = sys.exc_info()[1]
- if e.args[0] != errno.EBADF:
- raise
-
class IoLogger(BasicStream):
"""
- :py:class:`BasicStream` subclass that sets up redirection of a standard
- UNIX file descriptor back into the Python :py:mod:`logging` package.
+ :class:`BasicStream` subclass that sets up redirection of a standard
+ UNIX file descriptor back into the Python :mod:`logging` package.
"""
_buf = ''
def __init__(self, broker, name, dest_fd):
self._broker = broker
self._name = name
- self._log = logging.getLogger(name)
self._rsock, self._wsock = socket.socketpair()
os.dup2(self._wsock.fileno(), dest_fd)
set_cloexec(self._wsock.fileno())
+ self._log = logging.getLogger(name)
+ # #453: prevent accidental log initialization in a child creating a
+ # feedback loop.
+ self._log.propagate = False
+ self._log.handlers = logging.getLogger().handlers[:]
+
self.receive_side = Side(self, self._rsock.fileno())
self.transmit_side = Side(self, dest_fd, cloexec=False, blocking=True)
self._broker.start_receive(self)
@@ -1675,13 +2405,15 @@ class IoLogger(BasicStream):
def _log_lines(self):
while self._buf.find('\n') != -1:
- line, _, self._buf = self._buf.partition('\n')
+ line, _, self._buf = str_partition(self._buf, '\n')
self._log.info('%s', line.rstrip('\n'))
def on_shutdown(self, broker):
"""Shut down the write end of the logging socket."""
_v and LOG.debug('%r.on_shutdown()', self)
- self._wsock.shutdown(socket.SHUT_WR)
+ if not IS_WSL:
+ # #333: WSL generates invalid readiness indication on shutdown()
+ self._wsock.shutdown(socket.SHUT_WR)
self._wsock.close()
self.transmit_side.close()
@@ -1696,107 +2428,307 @@ class IoLogger(BasicStream):
class Router(object):
+ """
+ Route messages between contexts, and invoke local handlers for messages
+ addressed to this context. :meth:`Router.route() ` straddles the
+ :class:`Broker` thread and user threads, it is safe to call anywhere.
+
+ **Note:** This is the somewhat limited core version of the Router class
+ used by child contexts. The master subclass is documented below this one.
+ """
context_class = Context
max_message_size = 128 * 1048576
+
+ #: When :data:`True`, permit children to only communicate with the current
+ #: context or a parent of the current context. Routing between siblings or
+ #: children of parents is prohibited, ensuring no communication is possible
+ #: between intentionally partitioned networks, such as when a program
+ #: simultaneously manipulates hosts spread across a corporate and a
+ #: production network, or production networks that are otherwise
+ #: air-gapped.
+ #:
+ #: Sending a prohibited message causes an error to be logged and a dead
+ #: message to be sent in reply to the errant message, if that message has
+ #: ``reply_to`` set.
+ #:
+ #: The value of :data:`unidirectional` becomes the default for the
+ #: :meth:`local() ` `unidirectional`
+ #: parameter.
unidirectional = False
def __init__(self, broker):
self.broker = broker
listen(broker, 'exit', self._on_broker_exit)
+ self._setup_logging()
- # Here seems as good a place as any.
- global _v, _vv
- _v = logging.getLogger().level <= logging.DEBUG
- _vv = IOLOG.level <= logging.DEBUG
-
- #: context ID -> Stream
+ self._write_lock = threading.Lock()
+ #: context ID -> Stream; must hold _write_lock to edit or iterate
self._stream_by_id = {}
- #: List of contexts to notify of shutdown.
+ #: List of contexts to notify of shutdown; must hold _write_lock
self._context_by_id = {}
self._last_handle = itertools.count(1000)
#: handle -> (persistent?, func(msg))
self._handle_map = {}
+ #: Context -> set { handle, .. }
+ self._handles_by_respondent = {}
+ self.add_handler(self._on_del_route, DEL_ROUTE)
def __repr__(self):
return 'Router(%r)' % (self.broker,)
- def on_stream_disconnect(self, stream):
- for context in self._context_by_id.values():
- stream_ = self._stream_by_id.get(context.context_id)
- if stream_ is stream:
- del self._stream_by_id[context.context_id]
- context.on_disconnect()
+ def _setup_logging(self):
+ """
+ This is done in the :class:`Router` constructor for historical reasons.
+ It must be called before ExternalContext logs its first messages, but
+ after logging has been setup. It must also be called when any router is
+ constructed for a consumer app.
+ """
+ # Here seems as good a place as any.
+ global _v, _vv
+ _v = logging.getLogger().level <= logging.DEBUG
+ _vv = IOLOG.level <= logging.DEBUG
+
+ def _on_del_route(self, msg):
+ """
+ Stub :data:`DEL_ROUTE` handler; fires 'disconnect' events on the
+ corresponding :attr:`_context_by_id` member. This is replaced by
+ :class:`mitogen.parent.RouteMonitor` in an upgraded context.
+ """
+ LOG.error('%r._on_del_route() %r', self, msg)
+ if msg.is_dead:
+ return
+
+ target_id_s, _, name = bytes_partition(msg.data, b(':'))
+ target_id = int(target_id_s, 10)
+ context = self._context_by_id.get(target_id)
+ if context:
+ fire(context, 'disconnect')
+ else:
+ LOG.debug('DEL_ROUTE for unknown ID %r: %r', target_id, msg)
+
+ def _on_stream_disconnect(self, stream):
+ notify = []
+ self._write_lock.acquire()
+ try:
+ for context in list(self._context_by_id.values()):
+ stream_ = self._stream_by_id.get(context.context_id)
+ if stream_ is stream:
+ del self._stream_by_id[context.context_id]
+ notify.append(context)
+ finally:
+ self._write_lock.release()
+
+ # Happens outside lock as e.g. RouteMonitor wants the same lock.
+ for context in notify:
+ context.on_disconnect()
+
+ broker_exit_msg = 'Broker has exitted'
def _on_broker_exit(self):
while self._handle_map:
- _, (_, func, _) = self._handle_map.popitem()
- func(Message.dead())
+ _, (_, func, _, _) = self._handle_map.popitem()
+ func(Message.dead(self.broker_exit_msg))
+
+ def myself(self):
+ """
+ Return a :class:`Context` referring to the current process.
+ """
+ return self.context_class(
+ router=self,
+ context_id=mitogen.context_id,
+ name='self',
+ )
+
+ def context_by_id(self, context_id, via_id=None, create=True, name=None):
+ """
+ Messy factory/lookup function to find a context by its ID, or construct
+ it. This will eventually be replaced by a more sensible interface.
+ """
+ context = self._context_by_id.get(context_id)
+ if context:
+ return context
+
+ if create and via_id is not None:
+ via = self.context_by_id(via_id)
+ else:
+ via = None
+
+ self._write_lock.acquire()
+ try:
+ context = self._context_by_id.get(context_id)
+ if create and not context:
+ context = self.context_class(self, context_id, name=name)
+ context.via = via
+ self._context_by_id[context_id] = context
+ finally:
+ self._write_lock.release()
+
+ return context
def register(self, context, stream):
+ """
+ Register a newly constructed context and its associated stream, and add
+ the stream's receive side to the I/O multiplexer. This method remains
+ public while the design has not yet settled.
+ """
_v and LOG.debug('register(%r, %r)', context, stream)
- self._stream_by_id[context.context_id] = stream
- self._context_by_id[context.context_id] = context
+ self._write_lock.acquire()
+ try:
+ self._stream_by_id[context.context_id] = stream
+ self._context_by_id[context.context_id] = context
+ finally:
+ self._write_lock.release()
+
self.broker.start_receive(stream)
- listen(stream, 'disconnect', lambda: self.on_stream_disconnect(stream))
+ listen(stream, 'disconnect', lambda: self._on_stream_disconnect(stream))
def stream_by_id(self, dst_id):
- return self._stream_by_id.get(dst_id,
- self._stream_by_id.get(mitogen.parent_id))
+ """
+ Return the :class:`Stream` that should be used to communicate with
+ `dst_id`. If a specific route for `dst_id` is not known, a reference to
+ the parent context's stream is returned.
+ """
+ return (
+ self._stream_by_id.get(dst_id) or
+ self._stream_by_id.get(mitogen.parent_id)
+ )
def del_handler(self, handle):
- del self._handle_map[handle]
+ """
+ Remove the handle registered for `handle`
+
+ :raises KeyError:
+ The handle wasn't registered.
+ """
+ _, _, _, respondent = self._handle_map.pop(handle)
+ if respondent:
+ self._handles_by_respondent[respondent].discard(handle)
def add_handler(self, fn, handle=None, persist=True,
- policy=None, respondent=None):
+ policy=None, respondent=None,
+ overwrite=False):
+ """
+ Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
+ `handle` from this context. Unregister after one invocation if
+ `persist` is :data:`False`. If `handle` is :data:`None`, a new handle
+ is allocated and returned.
+
+ :param int handle:
+ If not :data:`None`, an explicit handle to register, usually one of
+ the ``mitogen.core.*`` constants. If unspecified, a new unused
+ handle will be allocated.
+
+ :param bool persist:
+ If :data:`False`, the handler will be unregistered after a single
+ message has been received.
+
+ :param Context respondent:
+ Context that messages to this handle are expected to be sent from.
+ If specified, arranges for a dead message to be delivered to `fn`
+ when disconnection of the context is detected.
+
+ In future `respondent` will likely also be used to prevent other
+ contexts from sending messages to the handle.
+
+ :param function policy:
+ Function invoked as `policy(msg, stream)` where `msg` is a
+ :class:`mitogen.core.Message` about to be delivered, and `stream`
+ is the :class:`mitogen.core.Stream` on which it was received. The
+ function must return :data:`True`, otherwise an error is logged and
+ delivery is refused.
+
+ Two built-in policy functions exist:
+
+ * :func:`has_parent_authority`: requires the message arrived from a
+ parent context, or a context acting with a parent context's
+ authority (``auth_id``).
+
+ * :func:`mitogen.parent.is_immediate_child`: requires the
+ message arrived from an immediately connected child, for use in
+ messaging patterns where either something becomes buggy or
+ insecure by permitting indirect upstream communication.
+
+ In case of refusal, and the message's ``reply_to`` field is
+ nonzero, a :class:`mitogen.core.CallError` is delivered to the
+ sender indicating refusal occurred.
+
+ :param bool overwrite:
+ If :data:`True`, allow existing handles to be silently overwritten.
+
+ :return:
+ `handle`, or if `handle` was :data:`None`, the newly allocated
+ handle.
+ :raises Error:
+ Attemp to register handle that was already registered.
+ """
handle = handle or next(self._last_handle)
_vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
+ if handle in self._handle_map and not overwrite:
+ raise Error(self.duplicate_handle_msg)
+ self._handle_map[handle] = persist, fn, policy, respondent
if respondent:
- assert policy is None
- def policy(msg, _stream):
- return msg.is_dead or msg.src_id == respondent.context_id
- def on_disconnect():
- if handle in self._handle_map:
- fn(Message.dead())
- del self._handle_map[handle]
- listen(respondent, 'disconnect', on_disconnect)
-
- self._handle_map[handle] = persist, fn, policy
+ if respondent not in self._handles_by_respondent:
+ self._handles_by_respondent[respondent] = set()
+ listen(respondent, 'disconnect',
+ lambda: self._on_respondent_disconnect(respondent))
+ self._handles_by_respondent[respondent].add(handle)
+
return handle
+ duplicate_handle_msg = 'cannot register a handle that is already exists'
+ refused_msg = 'refused by policy'
+ invalid_handle_msg = 'invalid handle'
+ too_large_msg = 'message too large (max %d bytes)'
+ respondent_disconnect_msg = 'the respondent Context has disconnected'
+ broker_shutdown_msg = 'Broker is shutting down'
+ no_route_msg = 'no route to %r, my ID is %r'
+ unidirectional_msg = (
+ 'routing mode prevents forward of message from context %d via '
+ 'context %d'
+ )
+
+ def _on_respondent_disconnect(self, context):
+ for handle in self._handles_by_respondent.pop(context, ()):
+ _, fn, _, _ = self._handle_map[handle]
+ fn(Message.dead(self.respondent_disconnect_msg))
+ del self._handle_map[handle]
+
def on_shutdown(self, broker):
- """Called during :py:meth:`Broker.shutdown`, informs callbacks
- registered with :py:meth:`add_handle_cb` the connection is dead."""
+ """Called during :meth:`Broker.shutdown`, informs callbacks registered
+ with :meth:`add_handle_cb` the connection is dead."""
_v and LOG.debug('%r.on_shutdown(%r)', self, broker)
fire(self, 'shutdown')
for handle, (persist, fn) in self._handle_map.iteritems():
_v and LOG.debug('%r.on_shutdown(): killing %r: %r', self, handle, fn)
- fn(Message.dead())
+ fn(Message.dead(self.broker_shutdown_msg))
- refused_msg = 'Refused by policy.'
+ def _maybe_send_dead(self, msg, reason, *args):
+ if args:
+ reason %= args
+ LOG.debug('%r: %r is dead: %r', self, msg, reason)
+ if msg.reply_to and not msg.is_dead:
+ msg.reply(Message.dead(reason=reason), router=self)
def _invoke(self, msg, stream):
# IOLOG.debug('%r._invoke(%r)', self, msg)
try:
- persist, fn, policy = self._handle_map[msg.handle]
+ persist, fn, policy, respondent = self._handle_map[msg.handle]
except KeyError:
- LOG.error('%r: invalid handle: %r', self, msg)
- if msg.reply_to and not msg.is_dead:
- msg.reply(Message.dead())
+ self._maybe_send_dead(msg, reason=self.invalid_handle_msg)
+ return
+
+ if respondent and not (msg.is_dead or
+ msg.src_id == respondent.context_id):
+ self._maybe_send_dead(msg, 'reply from unexpected context')
return
if policy and not policy(msg, stream):
- LOG.error('%r: policy refused message: %r', self, msg)
- if msg.reply_to:
- self.route(Message.pickled(
- CallError(self.refused_msg),
- dst_id=msg.src_id,
- handle=msg.reply_to
- ))
+ self._maybe_send_dead(msg, self.refused_msg)
return
if not persist:
- del self._handle_map[msg.handle]
+ self.del_handler(msg.handle)
try:
fn(msg)
@@ -1804,10 +2736,25 @@ class Router(object):
LOG.exception('%r._invoke(%r): %r crashed', self, msg, fn)
def _async_route(self, msg, in_stream=None):
+ """
+ Arrange for `msg` to be forwarded towards its destination. If its
+ destination is the local context, then arrange for it to be dispatched
+ using the local handlers.
+
+ This is a lower overhead version of :meth:`route` that may only be
+ called from the :class:`Broker` thread.
+
+ :param Stream in_stream:
+ If not :data:`None`, the stream the message arrived on. Used for
+ performing source route verification, to ensure sensitive messages
+ such as ``CALL_FUNCTION`` arrive only from trusted contexts.
+ """
_vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream)
+
if len(msg.data) > self.max_message_size:
- LOG.error('message too large (max %d bytes): %r',
- self.max_message_size, msg)
+ self._maybe_send_dead(msg, self.too_large_msg % (
+ self.max_message_size,
+ ))
return
# Perform source verification.
@@ -1829,6 +2776,9 @@ class Router(object):
if in_stream.auth_id is not None:
msg.auth_id = in_stream.auth_id
+ # Maintain a set of IDs the source ever communicated with.
+ in_stream.egress_ids.add(msg.dst_id)
+
if msg.dst_id == mitogen.context_id:
return self._invoke(msg, in_stream)
@@ -1836,38 +2786,54 @@ class Router(object):
if out_stream is None:
out_stream = self._stream_by_id.get(mitogen.parent_id)
- dead = False
if out_stream is None:
- LOG.error('%r: no route for %r, my ID is %r',
- self, msg, mitogen.context_id)
- dead = True
-
- if in_stream and self.unidirectional and not dead and \
- not (in_stream.is_privileged or out_stream.is_privileged):
- LOG.error('routing mode prevents forward of %r from %r -> %r',
- msg, in_stream, out_stream)
- dead = True
-
- if dead:
- if msg.reply_to and not msg.is_dead:
- msg.reply(Message.dead(), router=self)
+ self._maybe_send_dead(msg, self.no_route_msg,
+ msg.dst_id, mitogen.context_id)
+ return
+
+ if in_stream and self.unidirectional and not \
+ (in_stream.is_privileged or out_stream.is_privileged):
+ self._maybe_send_dead(msg, self.unidirectional_msg,
+ in_stream.remote_id, out_stream.remote_id)
return
out_stream._send(msg)
def route(self, msg):
+ """
+ Arrange for the :class:`Message` `msg` to be delivered to its
+ destination using any relevant downstream context, or if none is found,
+ by forwarding the message upstream towards the master context. If `msg`
+ is destined for the local context, it is dispatched using the handles
+ registered with :meth:`add_handler`.
+
+ This may be called from any thread.
+ """
self.broker.defer(self._async_route, msg)
class Broker(object):
+ """
+ Responsible for handling I/O multiplexing in a private thread.
+
+ **Note:** This is the somewhat limited core version of the Broker class
+ used by child contexts. The master subclass is documented below.
+ """
poller_class = Poller
_waker = None
_thread = None
+
+ #: Seconds grace to allow :class:`streams ` to shutdown gracefully
+ #: before force-disconnecting them during :meth:`shutdown`.
shutdown_timeout = 3.0
def __init__(self, poller_class=None):
self._alive = True
+ self._exitted = False
self._waker = Waker(self)
+ #: Arrange for `func(\*args, \**kwargs)` to be executed on the broker
+ #: thread, or immediately if the current thread is the broker thread.
+ #: Safe to call from any thread.
self.defer = self._waker.defer
self.poller = self.poller_class()
self.poller.start_receive(
@@ -1875,14 +2841,18 @@ class Broker(object):
(self._waker.receive_side, self._waker.on_receive)
)
self._thread = threading.Thread(
- target=_profile_hook,
- args=('broker', self._broker_main),
- name='mitogen-broker'
+ target=self._broker_main,
+ name='mitogen.broker'
)
self._thread.start()
- self._waker.broker_ident = self._thread.ident
def start_receive(self, stream):
+ """
+ Mark the :attr:`receive_side ` on `stream` as
+ ready for reading. Safe to call from any thread. When the associated
+ file descriptor becomes ready for reading,
+ :meth:`BasicStream.on_receive` will be called.
+ """
_vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
side = stream.receive_side
assert side and side.fd is not None
@@ -1890,24 +2860,68 @@ class Broker(object):
side.fd, (side, stream.on_receive))
def stop_receive(self, stream):
+ """
+ Mark the :attr:`receive_side ` on `stream` as not
+ ready for reading. Safe to call from any thread.
+ """
_vv and IOLOG.debug('%r.stop_receive(%r)', self, stream)
self.defer(self.poller.stop_receive, stream.receive_side.fd)
def _start_transmit(self, stream):
+ """
+ Mark the :attr:`transmit_side ` on `stream` as
+ ready for writing. Must only be called from the Broker thread. When the
+ associated file descriptor becomes ready for writing,
+ :meth:`BasicStream.on_transmit` will be called.
+ """
_vv and IOLOG.debug('%r._start_transmit(%r)', self, stream)
side = stream.transmit_side
assert side and side.fd is not None
self.poller.start_transmit(side.fd, (side, stream.on_transmit))
def _stop_transmit(self, stream):
+ """
+ Mark the :attr:`transmit_side ` on `stream` as not
+ ready for writing.
+ """
_vv and IOLOG.debug('%r._stop_transmit(%r)', self, stream)
self.poller.stop_transmit(stream.transmit_side.fd)
def keep_alive(self):
+ """
+ Return :data:`True` if any reader's :attr:`Side.keep_alive` attribute
+ is :data:`True`, or any :class:`Context` is still registered that is
+ not the master. Used to delay shutdown while some important work is in
+ progress (e.g. log draining).
+ """
it = (side.keep_alive for (_, (side, _)) in self.poller.readers)
return sum(it, 0)
+ def defer_sync(self, func):
+ """
+ Arrange for `func()` to execute on :class:`Broker` thread, blocking the
+ current thread until a result or exception is available.
+
+ :returns:
+ Return value of `func()`.
+ """
+ latch = Latch()
+ def wrapper():
+ try:
+ latch.put(func())
+ except Exception:
+ latch.put(sys.exc_info()[1])
+ self.defer(wrapper)
+ res = latch.get()
+ if isinstance(res, Exception):
+ raise res
+ return res
+
def _call(self, stream, func):
+ """
+ Call `func(self)`, catching any exception that might occur, logging it,
+ and force-disconnecting the related `stream`.
+ """
try:
func(self)
except Exception:
@@ -1915,47 +2929,90 @@ class Broker(object):
stream.on_disconnect(self)
def _loop_once(self, timeout=None):
+ """
+ Execute a single :class:`Poller` wait, dispatching any IO events that
+ caused the wait to complete.
+
+ :param float timeout:
+ If not :data:`None`, maximum time in seconds to wait for events.
+ """
_vv and IOLOG.debug('%r._loop_once(%r, %r)',
self, timeout, self.poller)
#IOLOG.debug('readers =\n%s', pformat(self.poller.readers))
#IOLOG.debug('writers =\n%s', pformat(self.poller.writers))
- for (side, func) in self.poller.poll(timeout):
+ for side, func in self.poller.poll(timeout):
self._call(side.stream, func)
- def _broker_main(self):
+ def _broker_exit(self):
+ """
+ Forcefully call :meth:`Stream.on_disconnect` on any streams that failed
+ to shut down gracefully, then discard the :class:`Poller`.
+ """
+ for _, (side, _) in self.poller.readers + self.poller.writers:
+ LOG.debug('_broker_main() force disconnecting %r', side)
+ side.stream.on_disconnect(self)
+
+ self.poller.close()
+
+ def _broker_shutdown(self):
+ """
+ Invoke :meth:`Stream.on_shutdown` for every active stream, then allow
+ up to :attr:`shutdown_timeout` seconds for the streams to unregister
+ themselves, logging an error if any did not unregister during the grace
+ period.
+ """
+ for _, (side, _) in self.poller.readers + self.poller.writers:
+ self._call(side.stream, side.stream.on_shutdown)
+
+ deadline = time.time() + self.shutdown_timeout
+ while self.keep_alive() and time.time() < deadline:
+ self._loop_once(max(0, deadline - time.time()))
+
+ if self.keep_alive():
+ LOG.error('%r: some streams did not close gracefully. '
+ 'The most likely cause for this is one or '
+ 'more child processes still connected to '
+ 'our stdout/stderr pipes.', self)
+
+ def _do_broker_main(self):
+ """
+ Broker thread main function. Dispatches IO events until
+ :meth:`shutdown` is called.
+ """
+ # For Python 2.4, no way to retrieve ident except on thread.
+ self._waker.broker_ident = thread.get_ident()
try:
while self._alive:
self._loop_once()
fire(self, 'shutdown')
- for _, (side, _) in self.poller.readers + self.poller.writers:
- self._call(side.stream, side.stream.on_shutdown)
-
- deadline = time.time() + self.shutdown_timeout
- while self.keep_alive() and time.time() < deadline:
- self._loop_once(max(0, deadline - time.time()))
-
- if self.keep_alive():
- LOG.error('%r: some streams did not close gracefully. '
- 'The most likely cause for this is one or '
- 'more child processes still connected to '
- 'our stdout/stderr pipes.', self)
-
- for _, (side, _) in self.poller.readers + self.poller.writers:
- LOG.error('_broker_main() force disconnecting %r', side)
- side.stream.on_disconnect(self)
+ self._broker_shutdown()
except Exception:
LOG.exception('_broker_main() crashed')
+ self._exitted = True
+ self._broker_exit()
+
+ def _broker_main(self):
+ _profile_hook('mitogen.broker', self._do_broker_main)
fire(self, 'exit')
def shutdown(self):
+ """
+ Request broker gracefully disconnect streams and stop. Safe to call
+ from any thread.
+ """
_v and LOG.debug('%r.shutdown()', self)
def _shutdown():
self._alive = False
- self.defer(_shutdown)
+ if self._alive and not self._exitted:
+ self.defer(_shutdown)
def join(self):
+ """
+ Wait for the broker to stop, expected to be called after
+ :meth:`shutdown`.
+ """
self._thread.join()
def __repr__(self):
@@ -1963,6 +3020,16 @@ class Broker(object):
class Dispatcher(object):
+ """
+ Implementation of the :data:`CALL_FUNCTION` handle for a child context.
+ Listens on the child's main thread for messages sent by
+ :class:`mitogen.parent.CallChain` and dispatches the function calls they
+ describe.
+
+ If a :class:`mitogen.parent.CallChain` sending a message is in pipelined
+ mode, any exception that occurs is recorded, and causes all subsequent
+ calls with the same `chain_id` to fail with the same exception.
+ """
def __init__(self, econtext):
self.econtext = econtext
#: Chain ID -> CallError if prior call failed.
@@ -2023,10 +3090,38 @@ class Dispatcher(object):
if self.econtext.config.get('on_start'):
self.econtext.config['on_start'](self.econtext)
- _profile_hook('main', self._dispatch_calls)
+ _profile_hook('mitogen.child_main', self._dispatch_calls)
class ExternalContext(object):
+ """
+ External context implementation.
+
+ .. attribute:: broker
+ The :class:`mitogen.core.Broker` instance.
+
+ .. attribute:: context
+ The :class:`mitogen.core.Context` instance.
+
+ .. attribute:: channel
+ The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION`
+ requests are received.
+
+ .. attribute:: stdout_log
+ The :class:`mitogen.core.IoLogger` connected to ``stdout``.
+
+ .. attribute:: importer
+ The :class:`mitogen.core.Importer` instance.
+
+ .. attribute:: stdout_log
+ The :class:`IoLogger` connected to ``stdout``.
+
+ .. attribute:: stderr_log
+ The :class:`IoLogger` connected to ``stderr``.
+
+ .. method:: _dispatch_calls
+ Implementation for the main thread in every child context.
+ """
detached = False
def __init__(self, config):
@@ -2067,11 +3162,6 @@ class ExternalContext(object):
_v and LOG.debug('%r: parent stream is gone, dying.', self)
self.broker.shutdown()
- def _sync(self, func):
- latch = Latch()
- self.broker.defer(lambda: latch.put(func()))
- return latch.get()
-
def detach(self):
self.detached = True
stream = self.router.stream_by_id(mitogen.parent_id)
@@ -2080,7 +3170,7 @@ class ExternalContext(object):
self.parent.send_await(Message(handle=DETACHING))
LOG.info('Detaching from %r; parent is %s', stream, self.parent)
for x in range(20):
- pending = self._sync(lambda: stream.pending_bytes())
+ pending = self.broker.defer_sync(lambda: stream.pending_bytes())
if not pending:
break
time.sleep(0.05)
@@ -2167,7 +3257,7 @@ class ExternalContext(object):
self.importer = importer
self.router.importer = importer
- sys.meta_path.append(self.importer)
+ sys.meta_path.insert(0, self.importer)
def _setup_package(self):
global mitogen
@@ -2190,28 +3280,44 @@ class ExternalContext(object):
mitogen.parent_ids = self.config['parent_ids'][:]
mitogen.parent_id = mitogen.parent_ids[0]
- def _setup_stdio(self):
- # We must open this prior to closing stdout, otherwise it will recycle
- # a standard handle, the dup2() will not error, and on closing it, we
- # lose a standrd handle, causing later code to again recycle a standard
- # handle.
- fp = open('/dev/null')
+ def _nullify_stdio(self):
+ """
+ Open /dev/null to replace stdin, and stdout/stderr temporarily. In case
+ of odd startup, assume we may be allocated a standard handle.
+ """
+ fd = os.open('/dev/null', os.O_RDWR)
+ try:
+ for stdfd in (0, 1, 2):
+ if fd != stdfd:
+ os.dup2(fd, stdfd)
+ finally:
+ if fd not in (0, 1, 2):
+ os.close(fd)
+ def _setup_stdio(self):
+ # #481: when stderr is a TTY due to being started via
+ # tty_create_child()/hybrid_tty_create_child(), and some privilege
+ # escalation tool like prehistoric versions of sudo exec this process
+ # over the top of itself, there is nothing left to keep the slave PTY
+ # open after we replace our stdio. Therefore if stderr is a TTY, keep
+ # around a permanent dup() to avoid receiving SIGHUP.
+ try:
+ if os.isatty(2):
+ self.reserve_tty_fd = os.dup(2)
+ set_cloexec(self.reserve_tty_fd)
+ except OSError:
+ pass
# When sys.stdout was opened by the runtime, overwriting it will not
- # cause close to be called. However when forking from a child that
- # previously used fdopen, overwriting it /will/ cause close to be
- # called. So we must explicitly close it before IoLogger overwrites the
- # file descriptor, otherwise the assignment below will cause stdout to
- # be closed.
+ # close FD 1. However when forking from a child that previously used
+ # fdopen(), overwriting it /will/ close FD 1. So we must swallow the
+ # close before IoLogger overwrites FD 1, otherwise its new FD 1 will be
+ # clobbered. Additionally, stdout must be replaced with /dev/null prior
+ # to stdout.close(), since if block buffering was active in the parent,
+ # any pre-fork buffered data will be flushed on close(), corrupting the
+ # connection to the parent.
+ self._nullify_stdio()
sys.stdout.close()
- sys.stdout = None
-
- try:
- os.dup2(fp.fileno(), 0)
- os.dup2(fp.fileno(), 1)
- os.dup2(fp.fileno(), 2)
- finally:
- fp.close()
+ self._nullify_stdio()
self.stdout_log = IoLogger(self.broker, 'stdout', 1)
self.stderr_log = IoLogger(self.broker, 'stderr', 2)
@@ -2233,11 +3339,16 @@ class ExternalContext(object):
self.dispatcher = Dispatcher(self)
self.router.register(self.parent, self.stream)
+ self.router._setup_logging()
self.log_handler.uncork()
sys.executable = os.environ.pop('ARGV0', sys.executable)
- _v and LOG.debug('Connected to %s; my ID is %r, PID is %r',
- self.parent, mitogen.context_id, os.getpid())
+ _v and LOG.debug('Connected to context %s; my ID is %r',
+ self.parent, mitogen.context_id)
+ _v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r',
+ os.getpid(), os.getppid(), os.geteuid(),
+ os.getuid(), os.getegid(), os.getgid(),
+ socket.gethostname())
_v and LOG.debug('Recovered sys.executable: %r', sys.executable)
self.dispatcher.run()
diff --git a/mitogen/debug.py b/mitogen/debug.py
index 19cf1a89..8f290c4d 100644
--- a/mitogen/debug.py
+++ b/mitogen/debug.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
Basic signal handler for dumping thread stacks.
"""
@@ -99,10 +101,6 @@ def get_router_info():
}
-def get_router_info(router):
- pass
-
-
def get_stream_info(router_id):
router = get_routers().get(router_id)
return {
diff --git a/mitogen/doas.py b/mitogen/doas.py
index cdcee0b0..250b6faf 100644
--- a/mitogen/doas.py
+++ b/mitogen/doas.py
@@ -26,8 +26,9 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
-import os
import mitogen.core
import mitogen.parent
@@ -45,10 +46,6 @@ class Stream(mitogen.parent.Stream):
create_child = staticmethod(mitogen.parent.hybrid_tty_create_child)
child_is_immediate_subprocess = False
- #: Once connected, points to the corresponding DiagLogStream, allowing it
- #: to be disconnected at the same time this stream is being torn down.
- tty_stream = None
-
username = 'root'
password = None
doas_path = 'doas'
@@ -71,13 +68,8 @@ class Stream(mitogen.parent.Stream):
if incorrect_prompts is not None:
self.incorrect_prompts = map(str.lower, incorrect_prompts)
- def connect(self):
- super(Stream, self).connect()
- self.name = u'doas.' + mitogen.core.to_text(self.username)
-
- def on_disconnect(self, broker):
- self.tty_stream.on_disconnect(broker)
- super(Stream, self).on_disconnect(broker)
+ def _get_name(self):
+ return u'doas.' + mitogen.core.to_text(self.username)
def get_boot_command(self):
bits = [self.doas_path, '-u', self.username, '--']
@@ -88,15 +80,8 @@ class Stream(mitogen.parent.Stream):
password_incorrect_msg = 'doas password is incorrect'
password_required_msg = 'doas password is required'
- def _connect_bootstrap(self, extra_fd):
- self.tty_stream = mitogen.parent.DiagLogStream(extra_fd, self)
-
+ def _connect_input_loop(self, it):
password_sent = False
- it = mitogen.parent.iter_read(
- fds=[self.receive_side.fd, extra_fd],
- deadline=self.connect_deadline,
- )
-
for buf in it:
LOG.debug('%r: received %r', self, buf)
if buf.endswith(self.EC0_MARKER):
@@ -111,8 +96,18 @@ class Stream(mitogen.parent.Stream):
if password_sent:
raise PasswordError(self.password_incorrect_msg)
LOG.debug('sending password')
- self.tty_stream.transmit_side.write(
+ self.diag_stream.transmit_side.write(
mitogen.core.to_text(self.password + '\n').encode('utf-8')
)
password_sent = True
raise mitogen.core.StreamError('bootstrap failed')
+
+ def _connect_bootstrap(self):
+ it = mitogen.parent.iter_read(
+ fds=[self.receive_side.fd, self.diag_stream.receive_side.fd],
+ deadline=self.connect_deadline,
+ )
+ try:
+ self._connect_input_loop(it)
+ finally:
+ it.close()
diff --git a/mitogen/docker.py b/mitogen/docker.py
index 36b0635b..074f0e90 100644
--- a/mitogen/docker.py
+++ b/mitogen/docker.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
import mitogen.core
@@ -62,9 +64,8 @@ class Stream(mitogen.parent.Stream):
if username:
self.username = username
- def connect(self):
- super(Stream, self).connect()
- self.name = u'docker.' + (self.container or self.image)
+ def _get_name(self):
+ return u'docker.' + (self.container or self.image)
def get_boot_command(self):
args = ['--interactive']
diff --git a/mitogen/fakessh.py b/mitogen/fakessh.py
index 582017bc..2f2726eb 100644
--- a/mitogen/fakessh.py
+++ b/mitogen/fakessh.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
:mod:`mitogen.fakessh` is a stream implementation that starts a subprocess with
its environment modified such that ``PATH`` searches for `ssh` return a Mitogen
diff --git a/mitogen/fork.py b/mitogen/fork.py
index cf769788..081f7e3d 100644
--- a/mitogen/fork.py
+++ b/mitogen/fork.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
import os
import random
@@ -39,6 +41,18 @@ import mitogen.parent
LOG = logging.getLogger('mitogen')
+# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up
+# interpreter state. So 2.4/2.5 interpreters start .local() contexts for
+# isolation instead. Since we don't have any crazy memory sharing problems to
+# avoid, there is no virginal fork parent either. The child is started directly
+# from the login/become process. In future this will be default everywhere,
+# fork is brainwrong from the stone age.
+FORK_SUPPORTED = sys.version_info >= (2, 6)
+
+
+class Error(mitogen.core.StreamError):
+ pass
+
def fixup_prngs():
"""
@@ -85,6 +99,10 @@ def on_fork():
mitogen.core.Latch._on_fork()
mitogen.core.Side._on_fork()
+ mitogen__service = sys.modules.get('mitogen.service')
+ if mitogen__service:
+ mitogen__service._pool_lock = threading.Lock()
+
def handle_child_crash():
"""
@@ -109,9 +127,19 @@ class Stream(mitogen.parent.Stream):
#: User-supplied function for cleaning up child process state.
on_fork = None
+ python_version_msg = (
+ "The mitogen.fork method is not supported on Python versions "
+ "prior to 2.6, since those versions made no attempt to repair "
+ "critical interpreter state following a fork. Please use the "
+ "local() method instead."
+ )
+
def construct(self, old_router, max_message_size, on_fork=None,
debug=False, profiling=False, unidirectional=False,
on_start=None):
+ if not FORK_SUPPORTED:
+ raise Error(self.python_version_msg)
+
# fork method only supports a tiny subset of options.
super(Stream, self).construct(max_message_size=max_message_size,
debug=debug, profiling=profiling,
@@ -180,14 +208,15 @@ class Stream(mitogen.parent.Stream):
config['on_start'] = self.on_start
try:
- mitogen.core.ExternalContext(config).main()
- except Exception:
- # TODO: report exception somehow.
- os._exit(72)
+ try:
+ mitogen.core.ExternalContext(config).main()
+ except Exception:
+ # TODO: report exception somehow.
+ os._exit(72)
finally:
# Don't trigger atexit handlers, they were copied from the parent.
os._exit(0)
- def _connect_bootstrap(self, extra_fd):
+ def _connect_bootstrap(self):
# None required.
pass
diff --git a/mitogen/jail.py b/mitogen/jail.py
index 726b60e0..fade8cbb 100644
--- a/mitogen/jail.py
+++ b/mitogen/jail.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
import mitogen.core
@@ -52,9 +54,8 @@ class Stream(mitogen.parent.Stream):
if jexec_path:
self.jexec_path = jexec_path
- def connect(self):
- super(Stream, self).connect()
- self.name = u'jail.' + self.container
+ def _get_name(self):
+ return u'jail.' + self.container
def get_boot_command(self):
bits = [self.jexec_path]
diff --git a/mitogen/kubectl.py b/mitogen/kubectl.py
index c2be24c1..ef626e1b 100644
--- a/mitogen/kubectl.py
+++ b/mitogen/kubectl.py
@@ -1,5 +1,4 @@
-# coding: utf-8
-# Copyright 2018, Yannig Perré
+# Copyright 2018, Yannig Perre
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -27,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
import mitogen.core
@@ -56,9 +57,8 @@ class Stream(mitogen.parent.Stream):
self.kubectl_path = kubectl_path
self.kubectl_args = kubectl_args or []
- def connect(self):
- super(Stream, self).connect()
- self.name = u'kubectl.%s%s' % (self.pod, self.kubectl_args)
+ def _get_name(self):
+ return u'kubectl.%s%s' % (self.pod, self.kubectl_args)
def get_boot_command(self):
bits = [self.kubectl_path] + self.kubectl_args + ['exec', '-it', self.pod]
diff --git a/mitogen/lxc.py b/mitogen/lxc.py
index 71b12221..6d4acba6 100644
--- a/mitogen/lxc.py
+++ b/mitogen/lxc.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
import mitogen.core
@@ -48,15 +50,20 @@ class Stream(mitogen.parent.Stream):
container = None
lxc_attach_path = 'lxc-attach'
+ eof_error_hint = (
+ 'Note: many versions of LXC do not report program execution failure '
+ 'meaningfully. Please check the host logs (/var/log) for more '
+ 'information.'
+ )
+
def construct(self, container, lxc_attach_path=None, **kwargs):
super(Stream, self).construct(**kwargs)
self.container = container
if lxc_attach_path:
self.lxc_attach_path = lxc_attach_path
- def connect(self):
- super(Stream, self).connect()
- self.name = u'lxc.' + self.container
+ def _get_name(self):
+ return u'lxc.' + self.container
def get_boot_command(self):
bits = [
diff --git a/mitogen/lxd.py b/mitogen/lxd.py
index 9e6702f4..7de4903a 100644
--- a/mitogen/lxd.py
+++ b/mitogen/lxd.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
import mitogen.core
@@ -49,15 +51,20 @@ class Stream(mitogen.parent.Stream):
lxc_path = 'lxc'
python_path = 'python'
+ eof_error_hint = (
+ 'Note: many versions of LXC do not report program execution failure '
+ 'meaningfully. Please check the host logs (/var/log) for more '
+ 'information.'
+ )
+
def construct(self, container, lxc_path=None, **kwargs):
super(Stream, self).construct(**kwargs)
self.container = container
if lxc_path:
self.lxc_path = lxc_path
- def connect(self):
- super(Stream, self).connect()
- self.name = u'lxd.' + self.container
+ def _get_name(self):
+ return u'lxd.' + self.container
def get_boot_command(self):
bits = [
diff --git a/mitogen/master.py b/mitogen/master.py
index d4ee607a..257fb81b 100644
--- a/mitogen/master.py
+++ b/mitogen/master.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
This module implements functionality required by master processes, such as
starting new contexts via SSH. Its size is also restricted, since it must
@@ -43,6 +45,7 @@ import pkgutil
import re
import string
import sys
+import time
import threading
import types
import zlib
@@ -58,13 +61,25 @@ import mitogen.minify
import mitogen.parent
from mitogen.core import b
-from mitogen.core import to_text
-from mitogen.core import LOG
from mitogen.core import IOLOG
+from mitogen.core import LOG
+from mitogen.core import str_partition
+from mitogen.core import str_rpartition
+from mitogen.core import to_text
imap = getattr(itertools, 'imap', map)
izip = getattr(itertools, 'izip', zip)
+try:
+ any
+except NameError:
+ from mitogen.core import any
+
+try:
+ next
+except NameError:
+ from mitogen.core import next
+
RLOG = logging.getLogger('mitogen.ctx')
@@ -127,7 +142,7 @@ def get_child_modules(path):
return [to_text(name) for _, name, _ in it]
-def get_core_source():
+def _get_core_source():
"""
Master version of parent.get_core_source().
"""
@@ -137,31 +152,30 @@ def get_core_source():
if mitogen.is_master:
# TODO: find a less surprising way of installing this.
- mitogen.parent.get_core_source = get_core_source
+ mitogen.parent._get_core_source = _get_core_source
LOAD_CONST = dis.opname.index('LOAD_CONST')
IMPORT_NAME = dis.opname.index('IMPORT_NAME')
+def _getarg(nextb, c):
+ if c >= dis.HAVE_ARGUMENT:
+ return nextb() | (nextb() << 8)
+
+
if sys.version_info < (3, 0):
def iter_opcodes(co):
# Yield `(op, oparg)` tuples from the code object `co`.
ordit = imap(ord, co.co_code)
nextb = ordit.next
- return ((c, (None
- if c < dis.HAVE_ARGUMENT else
- (nextb() | (nextb() << 8))))
- for c in ordit)
+ return ((c, _getarg(nextb, c)) for c in ordit)
elif sys.version_info < (3, 6):
def iter_opcodes(co):
# Yield `(op, oparg)` tuples from the code object `co`.
ordit = iter(co.co_code)
nextb = ordit.__next__
- return ((c, (None
- if c < dis.HAVE_ARGUMENT else
- (nextb() | (nextb() << 8))))
- for c in ordit)
+ return ((c, _getarg(nextb, c)) for c in ordit)
else:
def iter_opcodes(co):
# Yield `(op, oparg)` tuples from the code object `co`.
@@ -172,9 +186,10 @@ else:
def scan_code_imports(co):
- """Given a code object `co`, scan its bytecode yielding any
- ``IMPORT_NAME`` and associated prior ``LOAD_CONST`` instructions
- representing an `Import` statement or `ImportFrom` statement.
+ """
+ Given a code object `co`, scan its bytecode yielding any ``IMPORT_NAME``
+ and associated prior ``LOAD_CONST`` instructions representing an `Import`
+ statement or `ImportFrom` statement.
:return:
Generator producing `(level, modname, namelist)` tuples, where:
@@ -188,6 +203,7 @@ def scan_code_imports(co):
"""
opit = iter_opcodes(co)
opit, opit2, opit3 = itertools.tee(opit, 3)
+
try:
next(opit2)
next(opit3)
@@ -195,14 +211,22 @@ def scan_code_imports(co):
except StopIteration:
return
- for oparg1, oparg2, (op3, arg3) in izip(opit, opit2, opit3):
- if op3 == IMPORT_NAME:
- op2, arg2 = oparg2
- op1, arg1 = oparg1
- if op1 == op2 == LOAD_CONST:
- yield (co.co_consts[arg1],
- co.co_names[arg3],
- co.co_consts[arg2] or ())
+ if sys.version_info >= (2, 5):
+ for oparg1, oparg2, (op3, arg3) in izip(opit, opit2, opit3):
+ if op3 == IMPORT_NAME:
+ op2, arg2 = oparg2
+ op1, arg1 = oparg1
+ if op1 == op2 == LOAD_CONST:
+ yield (co.co_consts[arg1],
+ co.co_names[arg3],
+ co.co_consts[arg2] or ())
+ else:
+ # Python 2.4 did not yet have 'level', so stack format differs.
+ for oparg1, (op2, arg2) in izip(opit, opit2):
+ if op2 == IMPORT_NAME:
+ op1, arg1 = oparg1
+ if op1 == LOAD_CONST:
+ yield (-1, co.co_names[arg2], co.co_consts[arg1] or ())
class ThreadWatcher(object):
@@ -324,17 +348,32 @@ class LogForwarder(object):
self._cache[msg.src_id] = logger = logging.getLogger(name)
name, level_s, s = msg.data.decode('latin1').split('\x00', 2)
- logger.log(int(level_s), '%s: %s', name, s, extra={
- 'mitogen_message': s,
- 'mitogen_context': self._router.context_by_id(msg.src_id),
- 'mitogen_name': name,
- })
+
+ # See logging.Handler.makeRecord()
+ record = logging.LogRecord(
+ name=logger.name,
+ level=int(level_s),
+ pathname='(unknown file)',
+ lineno=0,
+ msg=('%s: %s' % (name, s)),
+ args=(),
+ exc_info=None,
+ )
+ record.mitogen_message = s
+ record.mitogen_context = self._router.context_by_id(msg.src_id)
+ record.mitogen_name = name
+ logger.handle(record)
def __repr__(self):
return 'LogForwarder(%r)' % (self._router,)
class ModuleFinder(object):
+ """
+ Given the name of a loaded module, make a best-effort attempt at finding
+ related modules likely needed by a child context requesting the original
+ module.
+ """
def __init__(self):
#: Import machinery is expensive, keep :py:meth`:get_module_source`
#: results around.
@@ -372,9 +411,37 @@ class ModuleFinder(object):
if os.path.exists(path) and self._looks_like_script(path):
return path
+ def _get_main_module_defective_python_3x(self, fullname):
+ """
+ Recent versions of Python 3.x introduced an incomplete notion of
+ importer specs, and in doing so created permanent asymmetry in the
+ :mod:`pkgutil` interface handling for the `__main__` module. Therefore
+ we must handle `__main__` specially.
+ """
+ if fullname != '__main__':
+ return None
+
+ mod = sys.modules.get(fullname)
+ if not mod:
+ return None
+
+ path = getattr(mod, '__file__', None)
+ if not (os.path.exists(path) and self._looks_like_script(path)):
+ return None
+
+ fp = open(path, 'rb')
+ try:
+ source = fp.read()
+ finally:
+ fp.close()
+
+ return path, source, False
+
def _get_module_via_pkgutil(self, fullname):
- """Attempt to fetch source code via pkgutil. In an ideal world, this
- would be the only required implementation of get_module()."""
+ """
+ Attempt to fetch source code via pkgutil. In an ideal world, this would
+ be the only required implementation of get_module().
+ """
try:
# Pre-'import spec' this returned None, in Python3.6 it raises
# ImportError.
@@ -448,8 +515,70 @@ class ModuleFinder(object):
return path, source, is_pkg
- get_module_methods = [_get_module_via_pkgutil,
- _get_module_via_sys_modules]
+ def _get_module_via_parent_enumeration(self, fullname):
+ """
+ Attempt to fetch source code by examining the module's (hopefully less
+ insane) parent package. Required for older versions of
+ ansible.compat.six and plumbum.colors.
+ """
+ if fullname not in sys.modules:
+ # Don't attempt this unless a module really exists in sys.modules,
+ # else we could return junk.
+ return
+
+ pkgname, _, modname = str_rpartition(to_text(fullname), u'.')
+ pkg = sys.modules.get(pkgname)
+ if pkg is None or not hasattr(pkg, '__file__'):
+ return
+
+ pkg_path = os.path.dirname(pkg.__file__)
+ try:
+ fp, path, ext = imp.find_module(modname, [pkg_path])
+ try:
+ path = self._py_filename(path)
+ if not path:
+ fp.close()
+ return
+
+ source = fp.read()
+ finally:
+ if fp:
+ fp.close()
+
+ if isinstance(source, mitogen.core.UnicodeType):
+ # get_source() returns "string" according to PEP-302, which was
+ # reinterpreted for Python 3 to mean a Unicode string.
+ source = source.encode('utf-8')
+ return path, source, False
+ except ImportError:
+ e = sys.exc_info()[1]
+ LOG.debug('imp.find_module(%r, %r) -> %s', modname, [pkg_path], e)
+
+ def add_source_override(self, fullname, path, source, is_pkg):
+ """
+ Explicitly install a source cache entry, preventing usual lookup
+ methods from being used.
+
+ Beware the value of `path` is critical when `is_pkg` is specified,
+ since it directs where submodules are searched for.
+
+ :param str fullname:
+ Name of the module to override.
+ :param str path:
+ Module's path as it will appear in the cache.
+ :param bytes source:
+ Module source code as a bytestring.
+ :param bool is_pkg:
+ :data:`True` if the module is a package.
+ """
+ self._found_cache[fullname] = (path, source, is_pkg)
+
+ get_module_methods = [
+ _get_main_module_defective_python_3x,
+ _get_module_via_pkgutil,
+ _get_module_via_sys_modules,
+ _get_module_via_parent_enumeration,
+ ]
def get_module_source(self, fullname):
"""Given the name of a loaded module `fullname`, attempt to find its
@@ -466,6 +595,7 @@ class ModuleFinder(object):
for method in self.get_module_methods:
tup = method(self, fullname)
if tup:
+ #LOG.debug('%r returned %r', method, tup)
break
else:
tup = None, None, None
@@ -477,7 +607,8 @@ class ModuleFinder(object):
def resolve_relpath(self, fullname, level):
"""Given an ImportFrom AST node, guess the prefix that should be tacked
on to an alias name to produce a canonical name. `fullname` is the name
- of the module in which the ImportFrom appears."""
+ of the module in which the ImportFrom appears.
+ """
mod = sys.modules.get(fullname, None)
if hasattr(mod, '__path__'):
fullname += '.__init__'
@@ -494,12 +625,12 @@ class ModuleFinder(object):
def generate_parent_names(self, fullname):
while '.' in fullname:
- fullname, _, _ = fullname.rpartition('.')
+ fullname, _, _ = str_rpartition(to_text(fullname), u'.')
yield fullname
def find_related_imports(self, fullname):
"""
- Return a list of non-stdlb modules that are directly imported by
+ Return a list of non-stdlib modules that are directly imported by
`fullname`, plus their parents.
The list is determined by retrieving the source code of
@@ -537,7 +668,7 @@ class ModuleFinder(object):
return self._related_cache.setdefault(fullname, sorted(
set(
- name
+ mitogen.core.to_text(name)
for name in maybe_names
if sys.modules.get(name) is not None
and not is_stdlib_name(name)
@@ -550,8 +681,8 @@ class ModuleFinder(object):
Return a list of non-stdlib modules that are imported directly or
indirectly by `fullname`, plus their parents.
- This method is like :py:meth:`on_disconect`, but it also recursively
- searches any modules which are imported by `fullname`.
+ This method is like :py:meth:`find_related_imports`, but also
+ recursively searches any modules which are imported by `fullname`.
:param fullname: Fully qualified name of an _already imported_ module
for which source code can be retrieved
@@ -563,7 +694,7 @@ class ModuleFinder(object):
while stack:
name = stack.pop(0)
names = self.find_related_imports(name)
- stack.extend(set(names).difference(found, stack))
+ stack.extend(set(names).difference(set(found).union(stack)))
found.update(names)
found.discard(fullname)
@@ -577,6 +708,23 @@ class ModuleResponder(object):
self._cache = {} # fullname -> pickled
self.blacklist = []
self.whitelist = ['']
+
+ #: Context -> set([fullname, ..])
+ self._forwarded_by_context = {}
+
+ #: Number of GET_MODULE messages received.
+ self.get_module_count = 0
+ #: Total time spent in uncached GET_MODULE.
+ self.get_module_secs = 0.0
+ #: Total time spent minifying modules.
+ self.minify_secs = 0.0
+ #: Number of successful LOAD_MODULE messages sent.
+ self.good_load_module_count = 0
+ #: Total bytes in successful LOAD_MODULE payloads.
+ self.good_load_module_size = 0
+ #: Number of negative LOAD_MODULE messages sent.
+ self.bad_load_module_count = 0
+
router.add_handler(
fn=self._on_get_module,
handle=mitogen.core.GET_MODULE,
@@ -585,6 +733,12 @@ class ModuleResponder(object):
def __repr__(self):
return 'ModuleResponder(%r)' % (self._router,)
+ def add_source_override(self, fullname, path, source, is_pkg):
+ """
+ See :meth:`ModuleFinder.add_source_override.
+ """
+ self._finder.add_source_override(fullname, path, source, is_pkg)
+
MAIN_RE = re.compile(b(r'^if\s+__name__\s*==\s*.__main__.\s*:'), re.M)
main_guard_msg = (
"A child context attempted to import __main__, however the main "
@@ -620,20 +774,39 @@ class ModuleResponder(object):
def _make_negative_response(self, fullname):
return (fullname, None, None, None, ())
- def _build_tuple(self, fullname):
- if mitogen.core.is_blacklisted_import(self, fullname):
- raise ImportError('blacklisted')
+ minify_safe_re = re.compile(b(r'\s+#\s*!mitogen:\s*minify_safe'))
+ def _build_tuple(self, fullname):
if fullname in self._cache:
return self._cache[fullname]
+ if mitogen.core.is_blacklisted_import(self, fullname):
+ raise ImportError('blacklisted')
+
path, source, is_pkg = self._finder.get_module_source(fullname)
+ if path and is_stdlib_path(path):
+ # Prevent loading of 2.x<->3.x stdlib modules! This costs one
+ # RTT per hit, so a client-side solution is also required.
+ LOG.debug('%r: refusing to serve stdlib module %r',
+ self, fullname)
+ tup = self._make_negative_response(fullname)
+ self._cache[fullname] = tup
+ return tup
+
if source is None:
- LOG.error('_build_tuple(%r): could not locate source', fullname)
+ # TODO: make this .warning() or similar again once importer has its
+ # own logging category.
+ LOG.debug('_build_tuple(%r): could not locate source', fullname)
tup = self._make_negative_response(fullname)
self._cache[fullname] = tup
return tup
+ if self.minify_safe_re.search(source):
+ # If the module contains a magic marker, it's safe to minify.
+ t0 = time.time()
+ source = mitogen.minify.minimize_source(source).encode('utf-8')
+ self.minify_secs += time.time() - t0
+
if is_pkg:
pkg_present = get_child_modules(path)
LOG.debug('_build_tuple(%r, %r) -> %r',
@@ -662,38 +835,40 @@ class ModuleResponder(object):
def _send_load_module(self, stream, fullname):
if fullname not in stream.sent_modules:
- LOG.debug('_send_load_module(%r, %r)', stream, fullname)
- self._router._async_route(
- mitogen.core.Message.pickled(
- self._build_tuple(fullname),
- dst_id=stream.remote_id,
- handle=mitogen.core.LOAD_MODULE,
- )
+ tup = self._build_tuple(fullname)
+ msg = mitogen.core.Message.pickled(
+ tup,
+ dst_id=stream.remote_id,
+ handle=mitogen.core.LOAD_MODULE,
)
+ LOG.debug('%s: sending module %s (%.2f KiB)',
+ stream.name, fullname, len(msg.data) / 1024.0)
+ self._router._async_route(msg)
stream.sent_modules.add(fullname)
+ if tup[2] is not None:
+ self.good_load_module_count += 1
+ self.good_load_module_size += len(msg.data)
+ else:
+ self.bad_load_module_count += 1
def _send_module_load_failed(self, stream, fullname):
+ self.bad_load_module_count += 1
stream.send(
mitogen.core.Message.pickled(
- (fullname, None, None, None, ()),
+ self._make_negative_response(fullname),
dst_id=stream.remote_id,
handle=mitogen.core.LOAD_MODULE,
)
)
def _send_module_and_related(self, stream, fullname):
+ if fullname in stream.sent_modules:
+ return
+
try:
tup = self._build_tuple(fullname)
- if tup[2] and is_stdlib_path(tup[2]):
- # Prevent loading of 2.x<->3.x stdlib modules! This costs one
- # RTT per hit, so a client-side solution is also required.
- LOG.warning('%r: refusing to serve stdlib module %r',
- self, fullname)
- self._send_module_load_failed(stream, fullname)
- return
-
for name in tup[4]: # related
- parent, _, _ = name.partition('.')
+ parent, _, _ = str_partition(name, '.')
if parent != fullname and parent not in stream.sent_modules:
# Parent hasn't been sent, so don't load submodule yet.
continue
@@ -709,13 +884,18 @@ class ModuleResponder(object):
return
LOG.debug('%r._on_get_module(%r)', self, msg.data)
+ self.get_module_count += 1
stream = self._router.stream_by_id(msg.src_id)
fullname = msg.data.decode()
if fullname in stream.sent_modules:
LOG.warning('_on_get_module(): dup request for %r from %r',
fullname, stream)
- self._send_module_and_related(stream, fullname)
+ t0 = time.time()
+ try:
+ self._send_module_and_related(stream, fullname)
+ finally:
+ self.get_module_secs += time.time() - t0
def _send_forward_module(self, stream, context, fullname):
if stream.remote_id != context.context_id:
@@ -728,26 +908,59 @@ class ModuleResponder(object):
)
def _forward_one_module(self, context, fullname):
+ forwarded = self._forwarded_by_context.get(context)
+ if forwarded is None:
+ forwarded = set()
+ self._forwarded_by_context[context] = forwarded
+
+ if fullname in forwarded:
+ return
+
path = []
while fullname:
path.append(fullname)
- fullname, _, _ = fullname.rpartition('.')
+ fullname, _, _ = str_rpartition(fullname, u'.')
+
+ stream = self._router.stream_by_id(context.context_id)
+ if stream is None:
+ LOG.debug('%r: dropping forward of %s to no longer existent '
+ '%r', self, path[0], context)
+ return
for fullname in reversed(path):
- stream = self._router.stream_by_id(context.context_id)
self._send_module_and_related(stream, fullname)
self._send_forward_module(stream, context, fullname)
def _forward_modules(self, context, fullnames):
IOLOG.debug('%r._forward_modules(%r, %r)', self, context, fullnames)
for fullname in fullnames:
- self._forward_one_module(context, fullname)
+ self._forward_one_module(context, mitogen.core.to_text(fullname))
def forward_modules(self, context, fullnames):
self._router.broker.defer(self._forward_modules, context, fullnames)
class Broker(mitogen.core.Broker):
+ """
+ .. note::
+
+ You may construct as many brokers as desired, and use the same broker
+ for multiple routers, however usually only one broker need exist.
+ Multiple brokers may be useful when dealing with sets of children with
+ differing lifetimes. For example, a subscription service where
+ non-payment results in termination for one customer.
+
+ :param bool install_watcher:
+ If :data:`True`, an additional thread is started to monitor the
+ lifetime of the main thread, triggering :meth:`shutdown`
+ automatically in case the user forgets to call it, or their code
+ crashed.
+
+ You should not rely on this functionality in your program, it is only
+ intended as a fail-safe and to simplify the API for new users. In
+ particular, alternative Python implementations may not be able to
+ support watching the main thread.
+ """
shutdown_timeout = 5.0
_watcher = None
poller_class = mitogen.parent.PREFERRED_POLLER
@@ -767,8 +980,43 @@ class Broker(mitogen.core.Broker):
class Router(mitogen.parent.Router):
+ """
+ Extend :class:`mitogen.core.Router` with functionality useful to masters,
+ and child contexts who later become masters. Currently when this class is
+ required, the target context's router is upgraded at runtime.
+
+ .. note::
+
+ You may construct as many routers as desired, and use the same broker
+ for multiple routers, however usually only one broker and router need
+ exist. Multiple routers may be useful when dealing with separate trust
+ domains, for example, manipulating infrastructure belonging to separate
+ customers or projects.
+
+ :param mitogen.master.Broker broker:
+ Broker to use. If not specified, a private :class:`Broker` is created.
+
+ :param int max_message_size:
+ Override the maximum message size this router is willing to receive or
+ transmit. Any value set here is automatically inherited by any children
+ created by the router.
+
+ This has a liberal default of 128 MiB, but may be set much lower.
+ Beware that setting it below 64KiB may encourage unexpected failures as
+ parents and children can no longer route large Python modules that may
+ be required by your application.
+ """
+
broker_class = Broker
- profiling = False
+
+ #: When :data:`True`, cause the broker thread and any subsequent broker and
+ #: main threads existing in any child to write
+ #: ``/tmp/mitogen.stats...log`` containing a
+ #: :mod:`cProfile` dump on graceful exit. Must be set prior to construction
+ #: of any :class:`Broker`, e.g. via::
+ #:
+ #: mitogen.master.Router.profiling = True
+ profiling = os.environ.get('MITOGEN_PROFILING') is not None
def __init__(self, broker=None, max_message_size=None):
if broker is None:
@@ -789,7 +1037,67 @@ class Router(mitogen.parent.Router):
persist=True,
)
+ def _on_broker_exit(self):
+ super(Router, self)._on_broker_exit()
+ dct = self.get_stats()
+ dct['self'] = self
+ dct['minify_ms'] = 1000 * dct['minify_secs']
+ dct['get_module_ms'] = 1000 * dct['get_module_secs']
+ dct['good_load_module_size_kb'] = dct['good_load_module_size'] / 1024.0
+ dct['good_load_module_size_avg'] = (
+ (
+ dct['good_load_module_size'] /
+ (float(dct['good_load_module_count']) or 1.0)
+ ) / 1024.0
+ )
+
+ LOG.debug(
+ '%(self)r: stats: '
+ '%(get_module_count)d module requests in '
+ '%(get_module_ms)d ms, '
+ '%(good_load_module_count)d sent '
+ '(%(minify_ms)d ms minify time), '
+ '%(bad_load_module_count)d negative responses. '
+ 'Sent %(good_load_module_size_kb).01f kb total, '
+ '%(good_load_module_size_avg).01f kb avg.'
+ % dct
+ )
+
+ def get_stats(self):
+ """
+ Return performance data for the module responder.
+
+ :returns:
+
+ Dict containing keys:
+
+ * `get_module_count`: Integer count of
+ :data:`mitogen.core.GET_MODULE` messages received.
+ * `get_module_secs`: Floating point total seconds spent servicing
+ :data:`mitogen.core.GET_MODULE` requests.
+ * `good_load_module_count`: Integer count of successful
+ :data:`mitogen.core.LOAD_MODULE` messages sent.
+ * `good_load_module_size`: Integer total bytes sent in
+ :data:`mitogen.core.LOAD_MODULE` message payloads.
+ * `bad_load_module_count`: Integer count of negative
+ :data:`mitogen.core.LOAD_MODULE` messages sent.
+ * `minify_secs`: CPU seconds spent minifying modules marked
+ minify-safe.
+ """
+ return {
+ 'get_module_count': self.responder.get_module_count,
+ 'get_module_secs': self.responder.get_module_secs,
+ 'good_load_module_count': self.responder.good_load_module_count,
+ 'good_load_module_size': self.responder.good_load_module_size,
+ 'bad_load_module_count': self.responder.bad_load_module_count,
+ 'minify_secs': self.responder.minify_secs,
+ }
+
def enable_debug(self):
+ """
+ Cause this context and any descendant child contexts to write debug
+ logs to ``/tmp/mitogen..log``.
+ """
mitogen.core.enable_debug_logging()
self.debug = True
@@ -824,6 +1132,12 @@ class IdAllocator(object):
BLOCK_SIZE = 1000
def allocate(self):
+ """
+ Arrange for a unique context ID to be allocated and associated with a
+ route leading to the active context. In masters, the ID is generated
+ directly, in children it is forwarded to the master via a
+ :data:`mitogen.core.ALLOCATE_ID` message.
+ """
self.lock.acquire()
try:
id_ = self.next_id
@@ -849,8 +1163,6 @@ class IdAllocator(object):
id_, last_id = self.allocate_block()
requestee = self.router.context_by_id(msg.src_id)
- allocated = self.router.context_by_id(id_, msg.src_id)
-
LOG.debug('%r: allocating [%r..%r) to %r',
self, id_, last_id, requestee)
msg.reply((id_, last_id))
diff --git a/mitogen/minify.py b/mitogen/minify.py
index a261bf6a..dc9f517c 100644
--- a/mitogen/minify.py
+++ b/mitogen/minify.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import sys
try:
@@ -40,13 +42,7 @@ if sys.version_info < (2, 7, 11):
else:
import tokenize
-try:
- from functools import lru_cache
-except ImportError:
- from mitogen.compat.functools import lru_cache
-
-@lru_cache()
def minimize_source(source):
"""Remove comments and docstrings from Python `source`, preserving line
numbers and syntax of empty blocks.
diff --git a/mitogen/parent.py b/mitogen/parent.py
index a57ca20b..91a4e5eb 100644
--- a/mitogen/parent.py
+++ b/mitogen/parent.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
This module defines functionality common to master and parent processes. It is
sent to any child context that is due to become a parent, due to recursive
@@ -41,6 +43,7 @@ import logging
import os
import signal
import socket
+import struct
import subprocess
import sys
import termios
@@ -53,29 +56,37 @@ import zlib
select = __import__('select')
try:
- from cStringIO import StringIO
-except ImportError:
- from io import StringIO
-
-try:
- from functools import lru_cache
+ import thread
except ImportError:
- from mitogen.compat.functools import lru_cache
+ import threading as thread
import mitogen.core
from mitogen.core import b
+from mitogen.core import bytes_partition
from mitogen.core import LOG
from mitogen.core import IOLOG
+try:
+ next
+except NameError:
+ # Python 2.4/2.5
+ from mitogen.core import next
+
-IS_WSL = 'Microsoft' in os.uname()[2]
+itervalues = getattr(dict, 'itervalues', dict.values)
if mitogen.core.PY3:
xrange = range
+ closure_attr = '__closure__'
+ IM_SELF_ATTR = '__self__'
+else:
+ closure_attr = 'func_closure'
+ IM_SELF_ATTR = 'im_self'
+
try:
SC_OPEN_MAX = os.sysconf('SC_OPEN_MAX')
-except:
+except ValueError:
SC_OPEN_MAX = 1024
OPENPTY_MSG = (
@@ -93,6 +104,32 @@ SYS_EXECUTABLE_MSG = (
)
_sys_executable_warning_logged = False
+
+def _ioctl_cast(n):
+ """
+ Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is
+ signed. Until 2.5 Python exclusively implemented the BSD behaviour,
+ preventing use of large unsigned int requests like the TTY layer uses
+ below. So on 2.4, we cast our unsigned to look like signed for Python.
+ """
+ if sys.version_info < (2, 5):
+ n, = struct.unpack('i', struct.pack('I', n))
+ return n
+
+
+# If not :data:`None`, called prior to exec() of any new child process. Used by
+# :func:`mitogen.utils.reset_affinity` to allow the child to be freely
+# scheduled.
+_preexec_hook = None
+
+# Get PTY number; asm-generic/ioctls.h
+LINUX_TIOCGPTN = _ioctl_cast(2147767344)
+
+# Lock/unlock PTY; asm-generic/ioctls.h
+LINUX_TIOCSPTLCK = _ioctl_cast(1074025521)
+
+IS_LINUX = os.uname()[0] == 'Linux'
+
SIGNAL_BY_NUM = dict(
(getattr(signal, name), name)
for name in sorted(vars(signal), reverse=True)
@@ -120,7 +157,11 @@ def get_sys_executable():
return '/usr/bin/python'
-def get_core_source():
+_core_source_lock = threading.Lock()
+_core_source_partial = None
+
+
+def _get_core_source():
"""
In non-masters, simply fetch the cached mitogen.core source code via the
import mechanism. In masters, this function is replaced with a version that
@@ -129,6 +170,26 @@ def get_core_source():
return inspect.getsource(mitogen.core)
+def get_core_source_partial():
+ """
+ _get_core_source() is expensive, even with @lru_cache in minify.py, threads
+ can enter it simultaneously causing severe slowdowns.
+ """
+ global _core_source_partial
+
+ if _core_source_partial is None:
+ _core_source_lock.acquire()
+ try:
+ if _core_source_partial is None:
+ _core_source_partial = PartialZlib(
+ _get_core_source().encode('utf-8')
+ )
+ finally:
+ _core_source_lock.release()
+
+ return _core_source_partial
+
+
def get_default_remote_name():
"""
Return the default name appearing in argv[0] of remote machines.
@@ -177,7 +238,7 @@ def disable_echo(fd):
old = termios.tcgetattr(fd)
new = cfmakeraw(old)
flags = getattr(termios, 'TCSASOFT', 0)
- if not IS_WSL:
+ if not mitogen.core.IS_WSL:
# issue #319: Windows Subsystem for Linux as of July 2018 throws EINVAL
# if TCSAFLUSH is specified.
flags |= termios.TCSAFLUSH
@@ -211,7 +272,7 @@ def create_socketpair():
return parentfp, childfp
-def detach_popen(*args, **kwargs):
+def detach_popen(**kwargs):
"""
Use :class:`subprocess.Popen` to construct a child process, then hack the
Popen so that it forgets the child it created, allowing it to survive a
@@ -223,6 +284,8 @@ def detach_popen(*args, **kwargs):
delivered to this process, causing later 'legitimate' calls to fail with
ECHILD.
+ :param list close_on_error:
+ Array of integer file descriptors to close on exception.
:returns:
Process ID of the new child.
"""
@@ -230,7 +293,13 @@ def detach_popen(*args, **kwargs):
# handling, without tying the surrounding code into managing a Popen
# object, which isn't possible for at least :mod:`mitogen.fork`. This
# should be replaced by a swappable helper class in a future version.
- proc = subprocess.Popen(*args, **kwargs)
+ real_preexec_fn = kwargs.pop('preexec_fn', None)
+ def preexec_fn():
+ if _preexec_hook:
+ _preexec_hook()
+ if real_preexec_fn:
+ real_preexec_fn()
+ proc = subprocess.Popen(preexec_fn=preexec_fn, **kwargs)
proc._child_created = False
return proc.pid
@@ -271,14 +340,23 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None):
mitogen.core.set_cloexec(stderr_w)
extra = {'stderr': stderr_w}
- pid = detach_popen(
- args=args,
- stdin=childfp,
- stdout=childfp,
- close_fds=True,
- preexec_fn=preexec_fn,
- **extra
- )
+ try:
+ pid = detach_popen(
+ args=args,
+ stdin=childfp,
+ stdout=childfp,
+ close_fds=True,
+ preexec_fn=preexec_fn,
+ **extra
+ )
+ except Exception:
+ childfp.close()
+ parentfp.close()
+ if stderr_pipe:
+ os.close(stderr_r)
+ os.close(stderr_w)
+ raise
+
if stderr_pipe:
os.close(stderr_w)
childfp.close()
@@ -303,6 +381,48 @@ def _acquire_controlling_tty():
fcntl.ioctl(2, termios.TIOCSCTTY)
+def _linux_broken_devpts_openpty():
+ """
+ #462: On broken Linux hosts with mismatched configuration (e.g. old
+ /etc/fstab template installed), /dev/pts may be mounted without the gid=
+ mount option, causing new slave devices to be created with the group ID of
+ the calling process. This upsets glibc, whose openpty() is required by
+ specification to produce a slave owned by a special group ID (which is
+ always the 'tty' group).
+
+ Glibc attempts to use "pt_chown" to fix ownership. If that fails, it
+ chown()s the PTY directly, which fails due to non-root, causing openpty()
+ to fail with EPERM ("Operation not permitted"). Since we don't need the
+ magical TTY group to run sudo and su, open the PTY ourselves in this case.
+ """
+ master_fd = None
+ try:
+ # Opening /dev/ptmx causes a PTY pair to be allocated, and the
+ # corresponding slave /dev/pts/* device to be created, owned by UID/GID
+ # matching this process.
+ master_fd = os.open('/dev/ptmx', os.O_RDWR)
+ # Clear the lock bit from the PTY. This a prehistoric feature from a
+ # time when slave device files were persistent.
+ fcntl.ioctl(master_fd, LINUX_TIOCSPTLCK, struct.pack('i', 0))
+ # Since v4.13 TIOCGPTPEER exists to open the slave in one step, but we
+ # must support older kernels. Ask for the PTY number.
+ pty_num_s = fcntl.ioctl(master_fd, LINUX_TIOCGPTN,
+ struct.pack('i', 0))
+ pty_num, = struct.unpack('i', pty_num_s)
+ pty_name = '/dev/pts/%d' % (pty_num,)
+ # Now open it with O_NOCTTY to ensure it doesn't change our controlling
+ # TTY. Otherwise when we close the FD we get killed by the kernel, and
+ # the child we spawn that should really attach to it will get EPERM
+ # during _acquire_controlling_tty().
+ slave_fd = os.open(pty_name, os.O_RDWR|os.O_NOCTTY)
+ return master_fd, slave_fd
+ except OSError:
+ if master_fd is not None:
+ os.close(master_fd)
+ e = sys.exc_info()[1]
+ raise mitogen.core.StreamError(OPENPTY_MSG, e)
+
+
def openpty():
"""
Call :func:`os.openpty`, raising a descriptive error if the call fails.
@@ -316,6 +436,8 @@ def openpty():
return os.openpty()
except OSError:
e = sys.exc_info()[1]
+ if IS_LINUX and e.args[0] == errno.EPERM:
+ return _linux_broken_devpts_openpty()
raise mitogen.core.StreamError(OPENPTY_MSG, e)
@@ -334,18 +456,23 @@ def tty_create_child(args):
`(pid, tty_fd, None)`
"""
master_fd, slave_fd = openpty()
- mitogen.core.set_block(slave_fd)
- disable_echo(master_fd)
- disable_echo(slave_fd)
-
- pid = detach_popen(
- args=args,
- stdin=slave_fd,
- stdout=slave_fd,
- stderr=slave_fd,
- preexec_fn=_acquire_controlling_tty,
- close_fds=True,
- )
+ try:
+ mitogen.core.set_block(slave_fd)
+ disable_echo(master_fd)
+ disable_echo(slave_fd)
+
+ pid = detach_popen(
+ args=args,
+ stdin=slave_fd,
+ stdout=slave_fd,
+ stderr=slave_fd,
+ preexec_fn=_acquire_controlling_tty,
+ close_fds=True,
+ )
+ except Exception:
+ os.close(master_fd)
+ os.close(slave_fd)
+ raise
os.close(slave_fd)
LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s',
@@ -366,20 +493,31 @@ def hybrid_tty_create_child(args):
`(pid, socketpair_fd, tty_fd)`
"""
master_fd, slave_fd = openpty()
- parentfp, childfp = create_socketpair()
- mitogen.core.set_block(slave_fd)
- mitogen.core.set_block(childfp)
- disable_echo(master_fd)
- disable_echo(slave_fd)
- pid = detach_popen(
- args=args,
- stdin=childfp,
- stdout=childfp,
- stderr=slave_fd,
- preexec_fn=_acquire_controlling_tty,
- close_fds=True,
- )
+ try:
+ disable_echo(master_fd)
+ disable_echo(slave_fd)
+ mitogen.core.set_block(slave_fd)
+
+ parentfp, childfp = create_socketpair()
+ try:
+ mitogen.core.set_block(childfp)
+ pid = detach_popen(
+ args=args,
+ stdin=childfp,
+ stdout=childfp,
+ stderr=slave_fd,
+ preexec_fn=_acquire_controlling_tty,
+ close_fds=True,
+ )
+ except Exception:
+ parentfp.close()
+ childfp.close()
+ raise
+ except Exception:
+ os.close(master_fd)
+ os.close(slave_fd)
+ raise
os.close(slave_fd)
childfp.close()
@@ -406,6 +544,8 @@ def write_all(fd, s, deadline=None):
:raises mitogen.core.TimeoutError:
Bytestring could not be written entirely before deadline was exceeded.
+ :raises mitogen.parent.EofError:
+ Stream indicated EOF, suggesting the child process has exitted.
:raises mitogen.core.StreamError:
File descriptor was disconnected before write could complete.
"""
@@ -429,13 +569,94 @@ def write_all(fd, s, deadline=None):
for fd in poller.poll(timeout):
n, disconnected = mitogen.core.io_op(os.write, fd, window)
if disconnected:
- raise mitogen.core.StreamError('EOF on stream during write')
+ raise EofError('EOF on stream during write')
written += n
finally:
poller.close()
+class PartialZlib(object):
+ """
+ Because the mitogen.core source has a line appended to it during bootstrap,
+ it must be recompressed for each connection. This is not a problem for a
+ small number of connections, but it amounts to 30 seconds CPU time by the
+ time 500 targets are in use.
+
+ For that reason, build a compressor containing mitogen.core and flush as
+ much of it as possible into an initial buffer. Then to append the custom
+ line, clone the compressor and compress just that line.
+
+ A full compression costs ~6ms on a modern machine, this method costs ~35
+ usec.
+ """
+ def __init__(self, s):
+ self.s = s
+ if sys.version_info > (2, 5):
+ self._compressor = zlib.compressobj(9)
+ self._out = self._compressor.compress(s)
+ self._out += self._compressor.flush(zlib.Z_SYNC_FLUSH)
+ else:
+ self._compressor = None
+
+ def append(self, s):
+ """
+ Append the bytestring `s` to the compressor state and return the
+ final compressed output.
+ """
+ if self._compressor is None:
+ return zlib.compress(self.s + s, 9)
+ else:
+ compressor = self._compressor.copy()
+ out = self._out
+ out += compressor.compress(s)
+ return out + compressor.flush()
+
+
+class IteratingRead(object):
+ def __init__(self, fds, deadline=None):
+ self.deadline = deadline
+ self.timeout = None
+ self.poller = PREFERRED_POLLER()
+ for fd in fds:
+ self.poller.start_receive(fd)
+
+ self.bits = []
+ self.timeout = None
+
+ def close(self):
+ self.poller.close()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ while self.poller.readers:
+ if self.deadline is not None:
+ self.timeout = max(0, self.deadline - time.time())
+ if self.timeout == 0:
+ break
+
+ for fd in self.poller.poll(self.timeout):
+ s, disconnected = mitogen.core.io_op(os.read, fd, 4096)
+ if disconnected or not s:
+ LOG.debug('iter_read(%r) -> disconnected: %s',
+ fd, disconnected)
+ self.poller.stop_receive(fd)
+ else:
+ IOLOG.debug('iter_read(%r) -> %r', fd, s)
+ self.bits.append(s)
+ return s
+
+ if not self.poller.readers:
+ raise EofError(u'EOF on stream; last 300 bytes received: %r' %
+ (b('').join(self.bits)[-300:].decode('latin1'),))
+
+ raise mitogen.core.TimeoutError('read timed out')
+
+ __next__ = next
+
+
def iter_read(fds, deadline=None):
"""Return a generator that arranges for up to 4096-byte chunks to be read
at a time from the file descriptor `fd` until the generator is destroyed.
@@ -448,40 +669,12 @@ def iter_read(fds, deadline=None):
:raises mitogen.core.TimeoutError:
Attempt to read beyond deadline.
+ :raises mitogen.parent.EofError:
+ All streams indicated EOF, suggesting the child process has exitted.
:raises mitogen.core.StreamError:
Attempt to read past end of file.
"""
- poller = PREFERRED_POLLER()
- for fd in fds:
- poller.start_receive(fd)
-
- bits = []
- timeout = None
- try:
- while poller.readers:
- if deadline is not None:
- timeout = max(0, deadline - time.time())
- if timeout == 0:
- break
-
- for fd in poller.poll(timeout):
- s, disconnected = mitogen.core.io_op(os.read, fd, 4096)
- if disconnected or not s:
- IOLOG.debug('iter_read(%r) -> disconnected', fd)
- poller.stop_receive(fd)
- else:
- IOLOG.debug('iter_read(%r) -> %r', fd, s)
- bits.append(s)
- yield s
- finally:
- poller.close()
-
- if not poller.readers:
- raise mitogen.core.StreamError(
- u'EOF on stream; last 300 bytes received: %r' %
- (b('').join(bits)[-300:].decode('latin1'),)
- )
- raise mitogen.core.TimeoutError('read timed out')
+ return IteratingRead(fds=fds, deadline=deadline)
def discard_until(fd, s, deadline):
@@ -500,15 +693,21 @@ def discard_until(fd, s, deadline):
:raises mitogen.core.TimeoutError:
Attempt to read beyond deadline.
+ :raises mitogen.parent.EofError:
+ All streams indicated EOF, suggesting the child process has exitted.
:raises mitogen.core.StreamError:
Attempt to read past end of file.
"""
- for buf in iter_read([fd], deadline):
- if IOLOG.level == logging.DEBUG:
- for line in buf.splitlines():
- IOLOG.debug('discard_until: discarding %r', line)
- if buf.endswith(s):
- return
+ it = iter_read([fd], deadline)
+ try:
+ for buf in it:
+ if IOLOG.level == logging.DEBUG:
+ for line in buf.splitlines():
+ IOLOG.debug('discard_until: discarding %r', line)
+ if buf.endswith(s):
+ return
+ finally:
+ it.close() # ensure Poller.close() is called.
def _upgrade_broker(broker):
@@ -543,6 +742,7 @@ def _upgrade_broker(broker):
len(old.readers), len(old.writers))
+@mitogen.core.takes_econtext
def upgrade_router(econtext):
if not isinstance(econtext.router, Router): # TODO
econtext.broker.defer(_upgrade_broker, econtext.broker)
@@ -566,7 +766,20 @@ def stream_by_method_name(name):
@mitogen.core.takes_econtext
def _proxy_connect(name, method_name, kwargs, econtext):
+ """
+ Implements the target portion of Router._proxy_connect() by upgrading the
+ local context to a parent if it was not already, then calling back into
+ Router._connect() using the arguments passed to the parent's
+ Router.connect().
+
+ :returns:
+ Dict containing:
+ * ``id``: :data:`None`, or integer new context ID.
+ * ``name``: :data:`None`, or string name attribute of new Context.
+ * ``msg``: :data:`None`, or StreamError exception text.
+ """
upgrade_router(econtext)
+
try:
context = econtext.router._connect(
klass=stream_by_method_name(method_name),
@@ -605,6 +818,15 @@ def wstatus_to_str(status):
return 'unknown wait status (%d)' % (status,)
+class EofError(mitogen.core.StreamError):
+ """
+ Raised by :func:`iter_read` and :func:`write_all` when EOF is detected by
+ the child process.
+ """
+ # inherits from StreamError to maintain compatibility.
+ pass
+
+
class Argv(object):
"""
Wrapper to defer argv formatting when debug logging is disabled.
@@ -643,8 +865,9 @@ class CallSpec(object):
def _get_name(self):
bits = [self.func.__module__]
if inspect.ismethod(self.func):
- bits.append(getattr(self.func.__self__, '__name__', None) or
- getattr(type(self.func.__self__), '__name__', None))
+ im_self = getattr(self.func, IM_SELF_ATTR)
+ bits.append(getattr(im_self, '__name__', None) or
+ getattr(type(im_self), '__name__', None))
bits.append(self.func.__name__)
return u'.'.join(bits)
@@ -668,25 +891,20 @@ class CallSpec(object):
class KqueuePoller(mitogen.core.Poller):
+ """
+ Poller based on the FreeBSD/Darwin kqueue(2) interface.
+ """
_repr = 'KqueuePoller()'
def __init__(self):
+ super(KqueuePoller, self).__init__()
self._kqueue = select.kqueue()
- self._rfds = {}
- self._wfds = {}
self._changelist = []
def close(self):
+ super(KqueuePoller, self).close()
self._kqueue.close()
- @property
- def readers(self):
- return list(self._rfds.items())
-
- @property
- def writers(self):
- return list(self._wfds.items())
-
def _control(self, fd, filters, flags):
mitogen.core._vv and IOLOG.debug(
'%r._control(%r, %r, %r)', self, fd, filters, flags)
@@ -705,7 +923,7 @@ class KqueuePoller(mitogen.core.Poller):
self, fd, data)
if fd not in self._rfds:
self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD)
- self._rfds[fd] = data or fd
+ self._rfds[fd] = (data or fd, self._generation)
def stop_receive(self, fd):
mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd)
@@ -718,7 +936,7 @@ class KqueuePoller(mitogen.core.Poller):
self, fd, data)
if fd not in self._wfds:
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
- self._wfds[fd] = data or fd
+ self._wfds[fd] = (data or fd, self._generation)
def stop_transmit(self, fd):
mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd)
@@ -726,7 +944,7 @@ class KqueuePoller(mitogen.core.Poller):
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
del self._wfds[fd]
- def poll(self, timeout=None):
+ def _poll(self, timeout):
changelist = self._changelist
self._changelist = []
events, _ = mitogen.core.io_op(self._kqueue.control,
@@ -736,35 +954,34 @@ class KqueuePoller(mitogen.core.Poller):
if event.flags & select.KQ_EV_ERROR:
LOG.debug('ignoring stale event for fd %r: errno=%d: %s',
fd, event.data, errno.errorcode.get(event.data))
- elif event.filter == select.KQ_FILTER_READ and fd in self._rfds:
+ elif event.filter == select.KQ_FILTER_READ:
+ data, gen = self._rfds.get(fd, (None, None))
# Events can still be read for an already-discarded fd.
- mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
- yield self._rfds[fd]
+ if gen and gen < self._generation:
+ mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
+ yield data
elif event.filter == select.KQ_FILTER_WRITE and fd in self._wfds:
- mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
- yield self._wfds[fd]
+ data, gen = self._wfds.get(fd, (None, None))
+ if gen and gen < self._generation:
+ mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
+ yield data
class EpollPoller(mitogen.core.Poller):
+ """
+ Poller based on the Linux epoll(2) interface.
+ """
_repr = 'EpollPoller()'
def __init__(self):
+ super(EpollPoller, self).__init__()
self._epoll = select.epoll(32)
self._registered_fds = set()
- self._rfds = {}
- self._wfds = {}
def close(self):
+ super(EpollPoller, self).close()
self._epoll.close()
- @property
- def readers(self):
- return list(self._rfds.items())
-
- @property
- def writers(self):
- return list(self._wfds.items())
-
def _control(self, fd):
mitogen.core._vv and IOLOG.debug('%r._control(%r)', self, fd)
mask = (((fd in self._rfds) and select.EPOLLIN) |
@@ -782,7 +999,7 @@ class EpollPoller(mitogen.core.Poller):
def start_receive(self, fd, data=None):
mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)',
self, fd, data)
- self._rfds[fd] = data or fd
+ self._rfds[fd] = (data or fd, self._generation)
self._control(fd)
def stop_receive(self, fd):
@@ -793,7 +1010,7 @@ class EpollPoller(mitogen.core.Poller):
def start_transmit(self, fd, data=None):
mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)',
self, fd, data)
- self._wfds[fd] = data or fd
+ self._wfds[fd] = (data or fd, self._generation)
self._control(fd)
def stop_transmit(self, fd):
@@ -804,38 +1021,41 @@ class EpollPoller(mitogen.core.Poller):
_inmask = (getattr(select, 'EPOLLIN', 0) |
getattr(select, 'EPOLLHUP', 0))
- def poll(self, timeout=None):
+ def _poll(self, timeout):
the_timeout = -1
if timeout is not None:
the_timeout = timeout
events, _ = mitogen.core.io_op(self._epoll.poll, the_timeout, 32)
for fd, event in events:
- if event & self._inmask and fd in self._rfds:
- # Events can still be read for an already-discarded fd.
- mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
- yield self._rfds[fd]
- if event & select.EPOLLOUT and fd in self._wfds:
- mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
- yield self._wfds[fd]
-
-
-POLLER_BY_SYSNAME = {
- 'Darwin': KqueuePoller,
- 'FreeBSD': KqueuePoller,
- 'Linux': EpollPoller,
-}
+ if event & self._inmask:
+ data, gen = self._rfds.get(fd, (None, None))
+ if gen and gen < self._generation:
+ # Events can still be read for an already-discarded fd.
+ mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
+ yield data
+ if event & select.EPOLLOUT:
+ data, gen = self._wfds.get(fd, (None, None))
+ if gen and gen < self._generation:
+ mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
+ yield data
+
+
+if sys.version_info < (2, 6):
+ # 2.4 and 2.5 only had select.select() and select.poll().
+ POLLER_BY_SYSNAME = {}
+else:
+ POLLER_BY_SYSNAME = {
+ 'Darwin': KqueuePoller,
+ 'FreeBSD': KqueuePoller,
+ 'Linux': EpollPoller,
+ }
PREFERRED_POLLER = POLLER_BY_SYSNAME.get(
os.uname()[0],
mitogen.core.Poller,
)
-# For apps that start threads dynamically, it's possible Latch will also get
-# very high-numbered wait fds when there are many connections, and so select()
-# becomes useless there too. So swap in our favourite poller.
-mitogen.core.Latch.poller_class = PREFERRED_POLLER
-
class DiagLogStream(mitogen.core.BasicStream):
"""
@@ -858,7 +1078,7 @@ class DiagLogStream(mitogen.core.BasicStream):
self.buf = ''
def __repr__(self):
- return 'mitogen.parent.DiagLogStream(fd=%r, %r)' % (
+ return "mitogen.parent.DiagLogStream(fd=%r, '%s')" % (
self.receive_side.fd,
self.stream.name,
)
@@ -874,11 +1094,11 @@ class DiagLogStream(mitogen.core.BasicStream):
return self.on_disconnect(broker)
self.buf += buf.decode('utf-8', 'replace')
- while '\n' in self.buf:
+ while u'\n' in self.buf:
lines = self.buf.split('\n')
self.buf = lines[-1]
for line in lines[:-1]:
- LOG.debug('%r: %r', self, line.rstrip())
+ LOG.debug('%s: %s', self.stream.name, line.rstrip())
class Stream(mitogen.core.Stream):
@@ -908,12 +1128,36 @@ class Stream(mitogen.core.Stream):
#: ExternalContext.main().
max_message_size = None
+ #: If :attr:`create_child` supplied a diag_fd, references the corresponding
+ #: :class:`DiagLogStream`, allowing it to be disconnected when this stream
+ #: is disconnected. Set to :data:`None` if no `diag_fd` was present.
+ diag_stream = None
+
+ #: Function with the semantics of :func:`create_child` used to create the
+ #: child process.
+ create_child = staticmethod(create_child)
+
+ #: Dictionary of extra kwargs passed to :attr:`create_child`.
+ create_child_args = {}
+
+ #: :data:`True` if the remote has indicated that it intends to detach, and
+ #: should not be killed on disconnect.
+ detached = False
+
+ #: If :data:`True`, indicates the child should not be killed during
+ #: graceful detachment, as it the actual process implementing the child
+ #: context. In all other cases, the subprocess is SSH, sudo, or a similar
+ #: tool that should be reminded to quit during disconnection.
+ child_is_immediate_subprocess = True
+
+ #: Prefix given to default names generated by :meth:`connect`.
+ name_prefix = u'local'
+
+ _reaped = False
+
def __init__(self, *args, **kwargs):
super(Stream, self).__init__(*args, **kwargs)
self.sent_modules = set(['mitogen', 'mitogen.core'])
- #: List of contexts reachable via this stream; used to cleanup routes
- #: during disconnection.
- self.routes = set([self.remote_id])
def construct(self, max_message_size, remote_name=None, python_path=None,
debug=False, connect_timeout=None, profiling=False,
@@ -948,15 +1192,6 @@ class Stream(mitogen.core.Stream):
)
)
- #: If :data:`True`, indicates the subprocess managed by us should not be
- #: killed during graceful detachment, as it the actual process implementing
- #: the child context. In all other cases, the subprocess is SSH, sudo, or a
- #: similar tool that should be reminded to quit during disconnection.
- child_is_immediate_subprocess = True
-
- detached = False
- _reaped = False
-
def _reap_child(self):
"""
Reap the child process during disconnection.
@@ -965,6 +1200,10 @@ class Stream(mitogen.core.Stream):
LOG.debug('%r: immediate child is detached, won\'t reap it', self)
return
+ if self.profiling:
+ LOG.info('%r: wont kill child because profiling=True', self)
+ return
+
if self._reaped:
# on_disconnect() may be invoked more than once, for example, if
# there is still a pending message to be sent after the first
@@ -985,19 +1224,22 @@ class Stream(mitogen.core.Stream):
LOG.debug('%r: PID %d %s', self, pid, wstatus_to_str(status))
return
- # For processes like sudo we cannot actually send sudo a signal,
- # because it is setuid, so this is best-effort only.
- LOG.debug('%r: child process still alive, sending SIGTERM', self)
- try:
- os.kill(self.pid, signal.SIGTERM)
- except OSError:
- e = sys.exc_info()[1]
- if e.args[0] != errno.EPERM:
- raise
+ if not self._router.profiling:
+ # For processes like sudo we cannot actually send sudo a signal,
+ # because it is setuid, so this is best-effort only.
+ LOG.debug('%r: child process still alive, sending SIGTERM', self)
+ try:
+ os.kill(self.pid, signal.SIGTERM)
+ except OSError:
+ e = sys.exc_info()[1]
+ if e.args[0] != errno.EPERM:
+ raise
def on_disconnect(self, broker):
- self._reap_child()
super(Stream, self).on_disconnect(broker)
+ if self.diag_stream is not None:
+ self.diag_stream.on_disconnect(broker)
+ self._reap_child()
# Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups
# file descriptor 0 as 100, creates a pipe, then execs a new interpreter
@@ -1017,6 +1259,9 @@ class Stream(mitogen.core.Stream):
# r: read side of core_src FD.
# w: write side of core_src FD.
# C: the decompressed core source.
+
+ # Final os.close(2) to avoid --py-debug build from corrupting stream with
+ # "[1234 refs]" during exit.
@staticmethod
def _first_stage():
R,W=os.pipe()
@@ -1042,6 +1287,7 @@ class Stream(mitogen.core.Stream):
fp.write(C)
fp.close()
os.write(1,'MITO001\n'.encode())
+ os.close(2)
def get_python_argv(self):
"""
@@ -1095,15 +1341,12 @@ class Stream(mitogen.core.Stream):
}
def get_preamble(self):
- source = get_core_source()
- source += '\nExternalContext(%r).main()\n' % (
- self.get_econtext_config(),
+ suffix = (
+ '\nExternalContext(%r).main()\n' %\
+ (self.get_econtext_config(),)
)
- return zlib.compress(source.encode('utf-8'), 9)
-
- create_child = staticmethod(create_child)
- create_child_args = {}
- name_prefix = u'local'
+ partial = get_core_source_partial()
+ return partial.append(suffix.encode('utf-8'))
def start_child(self):
args = self.get_boot_command()
@@ -1114,18 +1357,48 @@ class Stream(mitogen.core.Stream):
msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args))
raise mitogen.core.StreamError(msg)
+ eof_error_hint = None
+
+ def _adorn_eof_error(self, e):
+ """
+ Used by subclasses to provide additional information in the case of a
+ failed connection.
+ """
+ if self.eof_error_hint:
+ e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),)
+
+ def _get_name(self):
+ """
+ Called by :meth:`connect` after :attr:`pid` is known. Subclasses can
+ override it to specify a default stream name, or set
+ :attr:`name_prefix` to generate a default format.
+ """
+ return u'%s.%s' % (self.name_prefix, self.pid)
+
def connect(self):
LOG.debug('%r.connect()', self)
- self.pid, fd, extra_fd = self.start_child()
- self.name = u'%s.%s' % (self.name_prefix, self.pid)
+ self.pid, fd, diag_fd = self.start_child()
+ self.name = self._get_name()
self.receive_side = mitogen.core.Side(self, fd)
self.transmit_side = mitogen.core.Side(self, os.dup(fd))
- LOG.debug('%r.connect(): child process stdin/stdout=%r',
- self, self.receive_side.fd)
+ if diag_fd is not None:
+ self.diag_stream = DiagLogStream(diag_fd, self)
+ else:
+ self.diag_stream = None
+
+ LOG.debug('%r.connect(): pid:%r stdin:%r, stdout:%r, diag:%r',
+ self, self.pid, self.receive_side.fd, self.transmit_side.fd,
+ self.diag_stream and self.diag_stream.receive_side.fd)
try:
- self._connect_bootstrap(extra_fd)
+ self._connect_bootstrap()
+ except EofError:
+ self.on_disconnect(self._router.broker)
+ e = sys.exc_info()[1]
+ self._adorn_eof_error(e)
+ raise
except Exception:
+ self.on_disconnect(self._router.broker)
self._reap_child()
raise
@@ -1140,8 +1413,10 @@ class Stream(mitogen.core.Stream):
write_all(self.transmit_side.fd, self.get_preamble())
discard_until(self.receive_side.fd, self.EC1_MARKER,
self.connect_deadline)
+ if self.diag_stream:
+ self._router.broker.start_receive(self.diag_stream)
- def _connect_bootstrap(self, extra_fd):
+ def _connect_bootstrap(self):
discard_until(self.receive_side.fd, self.EC0_MARKER,
self.connect_deadline)
self._ec0_received()
@@ -1246,7 +1521,7 @@ class CallChain(object):
return '%s-%s-%x-%x' % (
socket.gethostname(),
os.getpid(),
- threading.currentThread().ident,
+ thread.get_ident(),
int(1e6 * time.time()),
)
@@ -1272,9 +1547,33 @@ class CallChain(object):
finally:
self.chain_id = saved
+ closures_msg = (
+ 'Mitogen cannot invoke closures, as doing so would require '
+ 'serializing arbitrary program state, and no universal '
+ 'method exists to recover a reference to them.'
+ )
+
+ lambda_msg = (
+ 'Mitogen cannot invoke anonymous functions, as no universal method '
+ 'exists to recover a reference to an anonymous function.'
+ )
+
+ method_msg = (
+ 'Mitogen cannot invoke instance methods, as doing so would require '
+ 'serializing arbitrary program state.'
+ )
+
def make_msg(self, fn, *args, **kwargs):
- if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
- klass = mitogen.core.to_text(fn.__self__.__name__)
+ if getattr(fn, closure_attr, None) is not None:
+ raise TypeError(self.closures_msg)
+ if fn.__name__ == '':
+ raise TypeError(self.lambda_msg)
+
+ if inspect.ismethod(fn):
+ im_self = getattr(fn, IM_SELF_ATTR)
+ if not inspect.isclass(im_self):
+ raise TypeError(self.method_msg)
+ klass = mitogen.core.to_text(im_self.__name__)
else:
klass = None
@@ -1372,13 +1671,25 @@ class CallChain(object):
class Context(mitogen.core.Context):
+ """
+ Extend :class:`mitogen.core.Context` with functionality useful to masters,
+ and child contexts who later become parents. Currently when this class is
+ required, the target context's router is upgraded at runtime.
+ """
+ #: A :class:`CallChain` instance constructed by default, with pipelining
+ #: disabled. :meth:`call`, :meth:`call_async` and :meth:`call_no_reply` use
+ #: this instance.
call_chain_class = CallChain
+
via = None
def __init__(self, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
self.default_call_chain = self.call_chain_class(self)
+ def __ne__(self, other):
+ return not (self == other)
+
def __eq__(self, other):
return (isinstance(other, mitogen.core.Context) and
(other.context_id == self.context_id) and
@@ -1388,15 +1699,41 @@ class Context(mitogen.core.Context):
return hash((self.router, self.context_id))
def call_async(self, fn, *args, **kwargs):
+ """
+ See :meth:`CallChain.call_async`.
+ """
return self.default_call_chain.call_async(fn, *args, **kwargs)
def call(self, fn, *args, **kwargs):
+ """
+ See :meth:`CallChain.call`.
+ """
return self.default_call_chain.call(fn, *args, **kwargs)
def call_no_reply(self, fn, *args, **kwargs):
+ """
+ See :meth:`CallChain.call_no_reply`.
+ """
self.default_call_chain.call_no_reply(fn, *args, **kwargs)
def shutdown(self, wait=False):
+ """
+ Arrange for the context to receive a ``SHUTDOWN`` message, triggering
+ graceful shutdown.
+
+ Due to a lack of support for timers, no attempt is made yet to force
+ terminate a hung context using this method. This will be fixed shortly.
+
+ :param bool wait:
+ If :data:`True`, block the calling thread until the context has
+ completely terminated.
+
+ :returns:
+ If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch`
+ whose :meth:`get() ` method returns
+ :data:`None` when shutdown completes. The `timeout` parameter may
+ be used to implement graceful timeouts.
+ """
LOG.debug('%r.shutdown() sending SHUTDOWN', self)
latch = mitogen.core.Latch()
mitogen.core.listen(self, 'disconnect', lambda: latch.put(None))
@@ -1413,70 +1750,177 @@ class Context(mitogen.core.Context):
class RouteMonitor(object):
+ """
+ Generate and respond to :data:`mitogen.core.ADD_ROUTE` and
+ :data:`mitogen.core.DEL_ROUTE` messages sent to the local context by
+ maintaining a table of available routes, and propagating messages towards
+ parents and siblings as appropriate.
+
+ :class:`RouteMonitor` is responsible for generating routing messages for
+ directly attached children. It learns of new children via
+ :meth:`notice_stream` called by :class:`Router`, and subscribes to their
+ ``disconnect`` event to learn when they disappear.
+
+ In children, constructing this class overwrites the stub
+ :data:`mitogen.core.DEL_ROUTE` handler installed by
+ :class:`mitogen.core.ExternalContext`, which is expected behaviour when a
+ child is beging upgraded in preparation to become a parent of children of
+ its own.
+
+ By virtue of only being active while responding to messages from a handler,
+ RouteMonitor lives entirely on the broker thread, so its data requires no
+ locking.
+
+ :param Router router:
+ Router to install handlers on.
+ :param Context parent:
+ :data:`None` in the master process, or reference to the parent context
+ we should propagate route updates towards.
+ """
def __init__(self, router, parent=None):
self.router = router
self.parent = parent
+ #: Mapping of Stream instance to integer context IDs reachable via the
+ #: stream; used to cleanup routes during disconnection.
+ self._routes_by_stream = {}
self.router.add_handler(
fn=self._on_add_route,
handle=mitogen.core.ADD_ROUTE,
persist=True,
policy=is_immediate_child,
+ overwrite=True,
)
self.router.add_handler(
fn=self._on_del_route,
handle=mitogen.core.DEL_ROUTE,
persist=True,
policy=is_immediate_child,
+ overwrite=True,
)
- def propagate(self, handle, target_id, name=None):
- # self.parent is None in the master.
- if not self.parent:
+ def __repr__(self):
+ return 'RouteMonitor()'
+
+ def _send_one(self, stream, handle, target_id, name):
+ """
+ Compose and send an update message on a stream.
+
+ :param mitogen.core.Stream stream:
+ Stream to send it on.
+ :param int handle:
+ :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+ :param int target_id:
+ ID of the connecting or disconnecting context.
+ :param str name:
+ Context name or :data:`None`.
+ """
+ if not stream:
+ # We may not have a stream during shutdown.
return
data = str(target_id)
if name:
- data = '%s:%s' % (target_id, mitogen.core.b(name))
- self.parent.send(
+ data = '%s:%s' % (target_id, name)
+ stream.send(
mitogen.core.Message(
handle=handle,
data=data.encode('utf-8'),
+ dst_id=stream.remote_id,
)
)
+ def _propagate_up(self, handle, target_id, name=None):
+ """
+ In a non-master context, propagate an update towards the master.
+
+ :param int handle:
+ :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+ :param int target_id:
+ ID of the connecting or disconnecting context.
+ :param str name:
+ For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
+ assigned by its parent. This is used by parents to assign the
+ :attr:`mitogen.core.Context.name` attribute.
+ """
+ if self.parent:
+ stream = self.router.stream_by_id(self.parent.context_id)
+ self._send_one(stream, handle, target_id, name)
+
+ def _propagate_down(self, handle, target_id):
+ """
+ For DEL_ROUTE, we additionally want to broadcast the message to any
+ stream that has ever communicated with the disconnecting ID, so
+ core.py's :meth:`mitogen.core.Router._on_del_route` can turn the
+ message into a disconnect event.
+
+ :param int handle:
+ :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+ :param int target_id:
+ ID of the connecting or disconnecting context.
+ """
+ for stream in self.router.get_streams():
+ if target_id in stream.egress_ids and (
+ (self.parent is None) or
+ (self.parent.context_id != stream.remote_id)
+ ):
+ self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None)
+
def notice_stream(self, stream):
"""
When this parent is responsible for a new directly connected child
stream, we're also responsible for broadcasting DEL_ROUTE upstream
if/when that child disconnects.
"""
- self.propagate(mitogen.core.ADD_ROUTE, stream.remote_id,
- stream.name)
+ self._routes_by_stream[stream] = set([stream.remote_id])
+ self._propagate_up(mitogen.core.ADD_ROUTE, stream.remote_id,
+ stream.name)
mitogen.core.listen(
obj=stream,
name='disconnect',
func=lambda: self._on_stream_disconnect(stream),
)
+ def get_routes(self, stream):
+ """
+ Return the set of context IDs reachable on a stream.
+
+ :param mitogen.core.Stream stream:
+ :returns: set([int])
+ """
+ return self._routes_by_stream.get(stream) or set()
+
def _on_stream_disconnect(self, stream):
"""
- Respond to disconnection of a local stream by
+ Respond to disconnection of a local stream by propagating DEL_ROUTE for
+ any contexts we know were attached to it.
"""
- LOG.debug('%r is gone; propagating DEL_ROUTE for %r',
- stream, stream.routes)
- for target_id in stream.routes:
+ # During a stream crash it is possible for disconnect signal to fire
+ # twice, in which case ignore the second instance.
+ routes = self._routes_by_stream.pop(stream, None)
+ if routes is None:
+ return
+
+ LOG.debug('%r: %r is gone; propagating DEL_ROUTE for %r',
+ self, stream, routes)
+ for target_id in routes:
self.router.del_route(target_id)
- self.propagate(mitogen.core.DEL_ROUTE, target_id)
+ self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
+ self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
context = self.router.context_by_id(target_id, create=False)
if context:
mitogen.core.fire(context, 'disconnect')
def _on_add_route(self, msg):
+ """
+ Respond to :data:`mitogen.core.ADD_ROUTE` by validating the source of
+ the message, updating the local table, and propagating the message
+ upwards.
+ """
if msg.is_dead:
return
- target_id_s, _, target_name = msg.data.partition(b(':'))
+ target_id_s, _, target_name = bytes_partition(msg.data, b(':'))
target_name = target_name.decode()
target_id = int(target_id_s)
self.router.context_by_id(target_id).name = target_name
@@ -1489,30 +1933,46 @@ class RouteMonitor(object):
return
LOG.debug('Adding route to %d via %r', target_id, stream)
- stream.routes.add(target_id)
+ self._routes_by_stream[stream].add(target_id)
self.router.add_route(target_id, stream)
- self.propagate(mitogen.core.ADD_ROUTE, target_id, target_name)
+ self._propagate_up(mitogen.core.ADD_ROUTE, target_id, target_name)
def _on_del_route(self, msg):
+ """
+ Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
+ the message, updating the local table, propagating the message
+ upwards, and downwards towards any stream that every had a message
+ forwarded from it towards the disconnecting context.
+ """
if msg.is_dead:
return
target_id = int(msg.data)
registered_stream = self.router.stream_by_id(target_id)
+ if registered_stream is None:
+ return
+
stream = self.router.stream_by_id(msg.auth_id)
if registered_stream != stream:
- LOG.error('Received DEL_ROUTE for %d from %r, expected %r',
- target_id, stream, registered_stream)
+ LOG.error('%r: received DEL_ROUTE for %d from %r, expected %r',
+ self, target_id, stream, registered_stream)
return
- LOG.debug('Deleting route to %d via %r', target_id, stream)
- stream.routes.discard(target_id)
- self.router.del_route(target_id)
- self.propagate(mitogen.core.DEL_ROUTE, target_id)
context = self.router.context_by_id(target_id, create=False)
if context:
+ LOG.debug('%r: firing local disconnect for %r', self, context)
mitogen.core.fire(context, 'disconnect')
+ LOG.debug('%r: deleting route to %d via %r', self, target_id, stream)
+ routes = self._routes_by_stream.get(stream)
+ if routes:
+ routes.discard(target_id)
+
+ self.router.del_route(target_id)
+ if stream.remote_id != mitogen.parent_id:
+ self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
+ self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
+
class Router(mitogen.core.Router):
context_class = Context
@@ -1550,23 +2010,46 @@ class Router(mitogen.core.Router):
stream.detached = True
msg.reply(None)
+ def get_streams(self):
+ """
+ Return a snapshot of all streams in existence at time of call.
+ """
+ self._write_lock.acquire()
+ try:
+ return itervalues(self._stream_by_id)
+ finally:
+ self._write_lock.release()
+
def add_route(self, target_id, stream):
+ """
+ Arrange for messages whose `dst_id` is `target_id` to be forwarded on
+ the directly connected stream for `via_id`. This method is called
+ automatically in response to :data:`mitogen.core.ADD_ROUTE` messages,
+ but remains public while the design has not yet settled, and situations
+ may arise where routing is not fully automatic.
+ """
LOG.debug('%r.add_route(%r, %r)', self, target_id, stream)
assert isinstance(target_id, int)
assert isinstance(stream, Stream)
+
+ self._write_lock.acquire()
try:
self._stream_by_id[target_id] = stream
- except KeyError:
- LOG.error('%r: cant add route to %r via %r: no such stream',
- self, target_id, stream)
+ finally:
+ self._write_lock.release()
def del_route(self, target_id):
LOG.debug('%r.del_route(%r)', self, target_id)
+ # DEL_ROUTE may be sent by a parent if it knows this context sent
+ # messages to a peer that has now disconnected, to let us raise
+ # 'disconnect' event on the appropriate Context instance. In that case,
+ # we won't a matching _stream_by_id entry for the disappearing route,
+ # so don't raise an error for a missing key here.
+ self._write_lock.acquire()
try:
- del self._stream_by_id[target_id]
- except KeyError:
- LOG.error('%r: cant delete route to %r: no such stream',
- self, target_id)
+ self._stream_by_id.pop(target_id, None)
+ finally:
+ self._write_lock.release()
def get_module_blacklist(self):
if mitogen.context_id == 0:
@@ -1581,15 +2064,6 @@ class Router(mitogen.core.Router):
def allocate_id(self):
return self.id_allocator.allocate()
- def context_by_id(self, context_id, via_id=None, create=True):
- context = self._context_by_id.get(context_id)
- if create and not context:
- context = self.context_class(self, context_id)
- if via_id is not None:
- context.via = self.context_by_id(via_id)
- self._context_by_id[context_id] = context
- return context
-
connection_timeout_msg = u"Connection timed out."
def _connect(self, klass, name=None, **kwargs):
@@ -1617,8 +2091,10 @@ class Router(mitogen.core.Router):
via = kwargs.pop(u'via', None)
if via is not None:
- return self.proxy_connect(via, method_name, name=name, **kwargs)
- return self._connect(klass, name=name, **kwargs)
+ return self.proxy_connect(via, method_name, name=name,
+ **mitogen.core.Kwargs(kwargs))
+ return self._connect(klass, name=name,
+ **mitogen.core.Kwargs(kwargs))
def proxy_connect(self, via_context, method_name, name=None, **kwargs):
resp = via_context.call(_proxy_connect,
@@ -1632,7 +2108,11 @@ class Router(mitogen.core.Router):
name = u'%s.%s' % (via_context.name, resp['name'])
context = self.context_class(self, resp['id'], name=name)
context.via = via_context
- self._context_by_id[context.context_id] = context
+ self._write_lock.acquire()
+ try:
+ self._context_by_id[context.context_id] = context
+ finally:
+ self._write_lock.release()
return context
def doas(self, **kwargs):
@@ -1673,6 +2153,10 @@ class Router(mitogen.core.Router):
class ProcessMonitor(object):
+ """
+ Install a :data:`signal.SIGCHLD` handler that generates callbacks when a
+ specific child process has exitted. This class is obsolete, do not use.
+ """
def __init__(self):
# pid -> callback()
self.callback_by_pid = {}
@@ -1686,6 +2170,16 @@ class ProcessMonitor(object):
del self.callback_by_pid[pid]
def add(self, pid, callback):
+ """
+ Add a callback function to be notified of the exit status of a process.
+
+ :param int pid:
+ Process ID to be notified of.
+
+ :param callback:
+ Function invoked as `callback(status)`, where `status` is the raw
+ exit status of the child process.
+ """
self.callback_by_pid[pid] = callback
_instance = None
@@ -1726,7 +2220,7 @@ class ModuleForwarder(object):
if msg.is_dead:
return
- context_id_s, _, fullname = msg.data.partition(b('\x00'))
+ context_id_s, _, fullname = bytes_partition(msg.data, b('\x00'))
fullname = mitogen.core.to_text(fullname)
context_id = int(context_id_s)
stream = self.router.stream_by_id(context_id)
@@ -1759,15 +2253,6 @@ class ModuleForwarder(object):
callback = lambda: self._on_cache_callback(msg, fullname)
self.importer._request_module(fullname, callback)
- def _send_one_module(self, msg, tup):
- self.router._async_route(
- mitogen.core.Message.pickled(
- tup,
- dst_id=msg.src_id,
- handle=mitogen.core.LOAD_MODULE,
- )
- )
-
def _on_cache_callback(self, msg, fullname):
LOG.debug('%r._on_get_module(): sending %r', self, fullname)
stream = self.router.stream_by_id(msg.src_id)
diff --git a/mitogen/profiler.py b/mitogen/profiler.py
new file mode 100644
index 00000000..10ec6086
--- /dev/null
+++ b/mitogen/profiler.py
@@ -0,0 +1,166 @@
+# Copyright 2017, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""mitogen.profiler
+ Record and report cProfile statistics from a run. Creates one aggregated
+ output file, one aggregate containing only workers, and one for the
+ top-level process.
+
+Usage:
+ mitogen.profiler record [args ..]
+ mitogen.profiler report [sort_mode]
+ mitogen.profiler stat [args ..]
+
+Mode:
+ record: Record a trace.
+ report: Report on a previously recorded trace.
+ stat: Record and report in a single step.
+
+Where:
+ dest_path: Filesystem prefix to write .pstats files to.
+ sort_mode: Sorting mode; defaults to "cumulative". See:
+ https://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
+
+Example:
+ mitogen.profiler record /tmp/mypatch ansible-playbook foo.yml
+ mitogen.profiler dump /tmp/mypatch-worker.pstats
+"""
+
+from __future__ import print_function
+import os
+import pstats
+import cProfile
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+
+import mitogen.core
+
+
+def try_merge(stats, path):
+ try:
+ stats.add(path)
+ return True
+ except Exception as e:
+ print('Failed. Race? Will retry. %s' % (e,))
+ return False
+
+
+def merge_stats(outpath, inpaths):
+ first, rest = inpaths[0], inpaths[1:]
+ for x in range(5):
+ try:
+ stats = pstats.Stats(first)
+ except EOFError:
+ time.sleep(0.2)
+ continue
+
+ print("Writing %r..." % (outpath,))
+ for path in rest:
+ #print("Merging %r into %r.." % (os.path.basename(path), outpath))
+ for x in range(5):
+ if try_merge(stats, path):
+ break
+ time.sleep(0.2)
+
+ stats.dump_stats(outpath)
+
+
+def generate_stats(outpath, tmpdir):
+ print('Generating stats..')
+ all_paths = []
+ paths_by_ident = {}
+
+ for name in os.listdir(tmpdir):
+ if name.endswith('-dump.pstats'):
+ ident, _, pid = name.partition('-')
+ path = os.path.join(tmpdir, name)
+ all_paths.append(path)
+ paths_by_ident.setdefault(ident, []).append(path)
+
+ merge_stats('%s-all.pstat' % (outpath,), all_paths)
+ for ident, paths in paths_by_ident.items():
+ merge_stats('%s-%s.pstat' % (outpath, ident), paths)
+
+
+def do_record(tmpdir, path, *args):
+ env = os.environ.copy()
+ fmt = '%(identity)s-%(pid)s.%(now)s-dump.%(ext)s'
+ env['MITOGEN_PROFILING'] = '1'
+ env['MITOGEN_PROFILE_FMT'] = os.path.join(tmpdir, fmt)
+ rc = subprocess.call(args, env=env)
+ generate_stats(path, tmpdir)
+ return rc
+
+
+def do_report(tmpdir, path, sort='cumulative'):
+ stats = pstats.Stats(path).sort_stats(sort)
+ stats.print_stats(100)
+
+
+def do_stat(tmpdir, sort, *args):
+ valid_sorts = pstats.Stats.sort_arg_dict_default
+ if sort not in valid_sorts:
+ sys.stderr.write('Invalid sort %r, must be one of %s\n' %
+ (sort, ', '.join(sorted(valid_sorts))))
+ sys.exit(1)
+
+ outfile = os.path.join(tmpdir, 'combined')
+ do_record(tmpdir, outfile, *args)
+ aggs = ('app.main', 'mitogen.broker', 'mitogen.child_main',
+ 'mitogen.service.pool', 'Strategy', 'WorkerProcess',
+ 'all')
+ for agg in aggs:
+ path = '%s-%s.pstat' % (outfile, agg)
+ if os.path.exists(path):
+ print()
+ print()
+ print('------ Aggregation %r ------' % (agg,))
+ print()
+ do_report(tmpdir, path, sort)
+ print()
+
+
+def main():
+ if len(sys.argv) < 2 or sys.argv[1] not in ('record', 'report', 'stat'):
+ sys.stderr.write(__doc__)
+ sys.exit(1)
+
+ func = globals()['do_' + sys.argv[1]]
+ tmpdir = tempfile.mkdtemp(prefix='mitogen.profiler')
+ try:
+ sys.exit(func(tmpdir, *sys.argv[2:]) or 0)
+ finally:
+ shutil.rmtree(tmpdir)
+
+if __name__ == '__main__':
+ main()
diff --git a/mitogen/select.py b/mitogen/select.py
index ce4023a9..6b87e671 100644
--- a/mitogen/select.py
+++ b/mitogen/select.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import mitogen.core
@@ -34,11 +36,57 @@ class Error(mitogen.core.Error):
class Select(object):
- notify = None
+ """
+ Support scatter/gather asynchronous calls and waiting on multiple
+ receivers, channels, and sub-Selects. Accepts a sequence of
+ :class:`mitogen.core.Receiver` or :class:`mitogen.select.Select` instances
+ and returns the first value posted to any receiver or select.
- @classmethod
- def all(cls, receivers):
- return list(msg.unpickle() for msg in cls(receivers))
+ If `oneshot` is :data:`True`, then remove each receiver as it yields a
+ result; since :meth:`__iter__` terminates once the final receiver is
+ removed, this makes it convenient to respond to calls made in parallel:
+
+ .. code-block:: python
+
+ total = 0
+ recvs = [c.call_async(long_running_operation) for c in contexts]
+
+ for msg in mitogen.select.Select(recvs):
+ print('Got %s from %s' % (msg, msg.receiver))
+ total += msg.unpickle()
+
+ # Iteration ends when last Receiver yields a result.
+ print('Received total %s from %s receivers' % (total, len(recvs)))
+
+ :class:`Select` may drive a long-running scheduler:
+
+ .. code-block:: python
+
+ with mitogen.select.Select(oneshot=False) as select:
+ while running():
+ for msg in select:
+ process_result(msg.receiver.context, msg.unpickle())
+ for context, workfunc in get_new_work():
+ select.add(context.call_async(workfunc))
+
+ :class:`Select` may be nested:
+
+ .. code-block:: python
+
+ subselects = [
+ mitogen.select.Select(get_some_work()),
+ mitogen.select.Select(get_some_work()),
+ mitogen.select.Select([
+ mitogen.select.Select(get_some_work()),
+ mitogen.select.Select(get_some_work())
+ ])
+ ]
+
+ for msg in mitogen.select.Select(selects):
+ print(msg.unpickle())
+ """
+
+ notify = None
def __init__(self, receivers=(), oneshot=True):
self._receivers = []
@@ -47,14 +95,50 @@ class Select(object):
for recv in receivers:
self.add(recv)
+ @classmethod
+ def all(cls, receivers):
+ """
+ Take an iterable of receivers and retrieve a :class:`Message` from
+ each, returning the result of calling `msg.unpickle()` on each in turn.
+ Results are returned in the order they arrived.
+
+ This is sugar for handling batch :meth:`Context.call_async
+ ` invocations:
+
+ .. code-block:: python
+
+ print('Total disk usage: %.02fMiB' % (sum(
+ mitogen.select.Select.all(
+ context.call_async(get_disk_usage)
+ for context in contexts
+ ) / 1048576.0
+ ),))
+
+ However, unlike in a naive comprehension such as:
+
+ .. code-block:: python
+
+ recvs = [c.call_async(get_disk_usage) for c in contexts]
+ sum(recv.get().unpickle() for recv in recvs)
+
+ Result processing happens in the order results arrive, rather than the
+ order requests were issued, so :meth:`all` should always be faster.
+ """
+ return list(msg.unpickle() for msg in cls(receivers))
+
def _put(self, value):
self._latch.put(value)
if self.notify:
self.notify(self)
def __bool__(self):
+ """
+ Return :data:`True` if any receivers are registered with this select.
+ """
return bool(self._receivers)
+ __nonzero__ = __bool__
+
def __enter__(self):
return self
@@ -62,6 +146,11 @@ class Select(object):
self.close()
def __iter__(self):
+ """
+ Yield the result of :meth:`get` until no receivers remain in the
+ select, either because `oneshot` is :data:`True`, or each receiver was
+ explicitly removed via :meth:`remove`.
+ """
while self._receivers:
yield self.get()
@@ -80,6 +169,14 @@ class Select(object):
owned_msg = 'Cannot add: Receiver is already owned by another Select'
def add(self, recv):
+ """
+ Add the :class:`mitogen.core.Receiver` or :class:`Select` `recv` to the
+ select.
+
+ :raises mitogen.select.Error:
+ An attempt was made to add a :class:`Select` to which this select
+ is indirectly a member of.
+ """
if isinstance(recv, Select):
recv._check_no_loop(self)
@@ -95,6 +192,12 @@ class Select(object):
not_present_msg = 'Instance is not a member of this Select'
def remove(self, recv):
+ """
+ Remove the :class:`mitogen.core.Receiver` or :class:`Select` `recv`
+ from the select. Note that if the receiver has notified prior to
+ :meth:`remove`, it will still be returned by a subsequent :meth:`get`.
+ This may change in a future version.
+ """
try:
if recv.notify != self._put:
raise ValueError
@@ -104,16 +207,59 @@ class Select(object):
raise Error(self.not_present_msg)
def close(self):
+ """
+ Remove the select's notifier function from each registered receiver,
+ mark the associated latch as closed, and cause any thread currently
+ sleeping in :meth:`get` to be woken with
+ :class:`mitogen.core.LatchError`.
+
+ This is necessary to prevent memory leaks in long-running receivers. It
+ is called automatically when the Python :keyword:`with` statement is
+ used.
+ """
for recv in self._receivers[:]:
self.remove(recv)
self._latch.close()
def empty(self):
+ """
+ Return :data:`True` if calling :meth:`get` would block.
+
+ As with :class:`Queue.Queue`, :data:`True` may be returned even though
+ a subsequent call to :meth:`get` will succeed, since a message may be
+ posted at any moment between :meth:`empty` and :meth:`get`.
+
+ :meth:`empty` may return :data:`False` even when :meth:`get` would
+ block if another thread has drained a receiver added to this select.
+ This can be avoided by only consuming each receiver from a single
+ thread.
+ """
return self._latch.empty()
empty_msg = 'Cannot get(), Select instance is empty'
def get(self, timeout=None, block=True):
+ """
+ Fetch the next available value from any receiver, or raise
+ :class:`mitogen.core.TimeoutError` if no value is available within
+ `timeout` seconds.
+
+ On success, the message's :attr:`receiver
+ ` attribute is set to the receiver.
+
+ :param float timeout:
+ Timeout in seconds.
+ :param bool block:
+ If :data:`False`, immediately raise
+ :class:`mitogen.core.TimeoutError` if the select is empty.
+ :return:
+ :class:`mitogen.core.Message`
+ :raises mitogen.core.TimeoutError:
+ Timeout was reached.
+ :raises mitogen.core.LatchError:
+ :meth:`close` has been called, and the underlying latch is no
+ longer valid.
+ """
if not self._receivers:
raise Error(self.empty_msg)
diff --git a/mitogen/service.py b/mitogen/service.py
index ffb7308e..c67b35e8 100644
--- a/mitogen/service.py
+++ b/mitogen/service.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import grp
import os
import os.path
@@ -40,6 +42,16 @@ import mitogen.core
import mitogen.select
from mitogen.core import b
from mitogen.core import LOG
+from mitogen.core import str_rpartition
+
+try:
+ all
+except NameError:
+ def all(it):
+ for elem in it:
+ if not elem:
+ return False
+ return True
DEFAULT_POOL_SIZE = 16
@@ -52,9 +64,13 @@ _pool_lock = threading.Lock()
if mitogen.core.PY3:
def func_code(func):
return func.__code__
+ def func_name(func):
+ return func.__name__
else:
def func_code(func):
return func.func_code
+ def func_name(func):
+ return func.func_name
@mitogen.core.takes_router
@@ -64,7 +80,12 @@ def get_or_create_pool(size=None, router=None):
_pool_lock.acquire()
try:
if _pool_pid != os.getpid():
- _pool = Pool(router, [], size=size or DEFAULT_POOL_SIZE)
+ _pool = Pool(router, [], size=size or DEFAULT_POOL_SIZE,
+ overwrite=True)
+ # In case of Broker shutdown crash, Pool can cause 'zombie'
+ # processes.
+ mitogen.core.listen(router.broker, 'shutdown',
+ lambda: _pool.stop(join=False))
_pool_pid = os.getpid()
return _pool
finally:
@@ -187,7 +208,7 @@ class Activator(object):
)
def activate(self, pool, service_name, msg):
- mod_name, _, class_name = service_name.rpartition('.')
+ mod_name, _, class_name = str_rpartition(service_name, '.')
if msg and not self.is_permitted(mod_name, class_name, msg):
raise mitogen.core.CallError(self.not_active_msg, service_name)
@@ -244,7 +265,7 @@ class Invoker(object):
if no_reply:
LOG.exception('While calling no-reply method %s.%s',
type(self.service).__name__,
- method.func_name)
+ func_name(method))
else:
raise
@@ -395,11 +416,13 @@ class Service(object):
Called when a message arrives on any of :attr:`select`'s registered
receivers.
"""
+ pass
def on_shutdown(self):
"""
Called by Pool.shutdown() once the last worker thread has exitted.
"""
+ pass
class Pool(object):
@@ -426,12 +449,13 @@ class Pool(object):
"""
activator_class = Activator
- def __init__(self, router, services, size=1):
+ def __init__(self, router, services, size=1, overwrite=False):
self.router = router
self._activator = self.activator_class()
self._receiver = mitogen.core.Receiver(
router=router,
handle=mitogen.core.CALL_SERVICE,
+ overwrite=overwrite,
)
self._select = mitogen.select.Select(oneshot=False)
@@ -449,7 +473,7 @@ class Pool(object):
thread = threading.Thread(
name=name,
target=mitogen.core._profile_hook,
- args=(name, self._worker_main),
+ args=('mitogen.service.pool', self._worker_main),
)
thread.start()
self._threads.append(thread)
@@ -473,6 +497,7 @@ class Pool(object):
def stop(self, join=True):
self.closed = True
+ self._receiver.close()
self._select.close()
if join:
self.join()
@@ -533,7 +558,7 @@ class Pool(object):
msg = self._select.get()
except (mitogen.core.ChannelError, mitogen.core.LatchError):
e = sys.exc_info()[1]
- LOG.info('%r: channel or latch closed, exitting: %s', self, e)
+ LOG.debug('%r: channel or latch closed, exitting: %s', self, e)
return
func = self._func_by_recv[msg.receiver]
@@ -547,7 +572,7 @@ class Pool(object):
self._worker_run()
except Exception:
th = threading.currentThread()
- LOG.exception('%r: worker %r crashed', self, th.name)
+ LOG.exception('%r: worker %r crashed', self, th.getName())
raise
def __repr__(self):
@@ -555,7 +580,7 @@ class Pool(object):
return 'mitogen.service.Pool(%#x, size=%d, th=%r)' % (
id(self),
len(self._threads),
- th.name,
+ th.getName(),
)
@@ -588,6 +613,7 @@ class PushFileService(Service):
self._sent_by_stream = {}
def get(self, path):
+ assert isinstance(path, mitogen.core.UnicodeType)
self._lock.acquire()
try:
if path in self._cache:
@@ -614,7 +640,7 @@ class PushFileService(Service):
path=path,
context=context
).close()
- else:
+ elif path not in sent:
child.call_service_async(
service_name=self.name(),
method_name='store_and_forward',
@@ -622,6 +648,7 @@ class PushFileService(Service):
data=self._cache[path],
context=context
).close()
+ sent.add(path)
@expose(policy=AllowParents())
@arg_spec({
@@ -635,7 +662,7 @@ class PushFileService(Service):
with a set of small files and Python modules.
"""
for path in paths:
- self.propagate_to(context, path)
+ self.propagate_to(context, mitogen.core.to_text(path))
self.router.responder.forward_modules(context, modules)
@expose(policy=AllowParents())
@@ -664,7 +691,7 @@ class PushFileService(Service):
@expose(policy=AllowParents())
@no_reply()
@arg_spec({
- 'path': mitogen.core.FsPathTypes,
+ 'path': mitogen.core.UnicodeType,
'data': mitogen.core.Blob,
'context': mitogen.core.Context,
})
@@ -688,7 +715,7 @@ class PushFileService(Service):
if path not in self._cache:
LOG.error('%r: %r is not in local cache', self, path)
return
- self._forward(path, context)
+ self._forward(context, path)
class FileService(Service):
@@ -743,7 +770,7 @@ class FileService(Service):
proceed normally, without the associated thread needing to be
forcefully killed.
"""
- unregistered_msg = 'Path is not registered with FileService.'
+ unregistered_msg = 'Path %r is not registered with FileService.'
context_mismatch_msg = 'sender= kwarg context must match requestee context'
#: Burst size. With 1MiB and 10ms RTT max throughput is 100MiB/sec, which
@@ -752,8 +779,10 @@ class FileService(Service):
def __init__(self, router):
super(FileService, self).__init__(router)
- #: Mapping of registered path -> file size.
- self._metadata_by_path = {}
+ #: Set of registered paths.
+ self._paths = set()
+ #: Set of registered directory prefixes.
+ self._prefixes = set()
#: Mapping of Stream->FileStreamState.
self._state_by_stream = {}
@@ -770,26 +799,43 @@ class FileService(Service):
def register(self, path):
"""
Authorize a path for access by children. Repeat calls with the same
- path is harmless.
+ path has no effect.
:param str path:
File path.
"""
- if path in self._metadata_by_path:
- return
+ if path not in self._paths:
+ LOG.debug('%r: registering %r', self, path)
+ self._paths.add(path)
+
+ @expose(policy=AllowParents())
+ @arg_spec({
+ 'path': mitogen.core.FsPathTypes,
+ })
+ def register_prefix(self, path):
+ """
+ Authorize a path and any subpaths for access by children. Repeat calls
+ with the same path has no effect.
+
+ :param str path:
+ File path.
+ """
+ if path not in self._prefixes:
+ LOG.debug('%r: registering prefix %r', self, path)
+ self._prefixes.add(path)
+ def _generate_stat(self, path):
st = os.stat(path)
if not stat.S_ISREG(st.st_mode):
raise IOError('%r is not a regular file.' % (path,))
- LOG.debug('%r: registering %r', self, path)
- self._metadata_by_path[path] = {
- 'size': st.st_size,
- 'mode': st.st_mode,
- 'owner': self._name_or_none(pwd.getpwuid, 0, 'pw_name'),
- 'group': self._name_or_none(grp.getgrgid, 0, 'gr_name'),
- 'mtime': st.st_mtime,
- 'atime': st.st_atime,
+ return {
+ u'size': st.st_size,
+ u'mode': st.st_mode,
+ u'owner': self._name_or_none(pwd.getpwuid, 0, 'pw_name'),
+ u'group': self._name_or_none(grp.getgrgid, 0, 'gr_name'),
+ u'mtime': float(st.st_mtime), # Python 2.4 uses int.
+ u'atime': float(st.st_atime), # Python 2.4 uses int.
}
def on_shutdown(self):
@@ -841,6 +887,24 @@ class FileService(Service):
fp.close()
state.jobs.pop(0)
+ def _prefix_is_authorized(self, path):
+ """
+ Return the set of all possible directory prefixes for `path`.
+ :func:`os.path.abspath` is used to ensure the path is absolute.
+
+ :param str path:
+ The path.
+ :returns: Set of prefixes.
+ """
+ path = os.path.abspath(path)
+ while True:
+ if path in self._prefixes:
+ return True
+ if path == '/':
+ break
+ path = os.path.dirname(path)
+ return False
+
@expose(policy=AllowAny())
@no_reply()
@arg_spec({
@@ -867,26 +931,33 @@ class FileService(Service):
:raises Error:
Unregistered path, or Sender did not match requestee context.
"""
- if path not in self._metadata_by_path:
- raise Error(self.unregistered_msg)
+ if path not in self._paths and not self._prefix_is_authorized(path):
+ msg.reply(mitogen.core.CallError(
+ Error(self.unregistered_msg % (path,))
+ ))
+ return
+
if msg.src_id != sender.context.context_id:
- raise Error(self.context_mismatch_msg)
+ msg.reply(mitogen.core.CallError(
+ Error(self.context_mismatch_msg)
+ ))
+ return
LOG.debug('Serving %r', path)
+
+ # Response must arrive first so requestee can begin receive loop,
+ # otherwise first ack won't arrive until all pending chunks were
+ # delivered. In that case max BDP would always be 128KiB, aka. max
+ # ~10Mbit/sec over a 100ms link.
try:
fp = open(path, 'rb', self.IO_SIZE)
+ msg.reply(self._generate_stat(path))
except IOError:
msg.reply(mitogen.core.CallError(
sys.exc_info()[1]
))
return
- # Response must arrive first so requestee can begin receive loop,
- # otherwise first ack won't arrive until all pending chunks were
- # delivered. In that case max BDP would always be 128KiB, aka. max
- # ~10Mbit/sec over a 100ms link.
- msg.reply(self._metadata_by_path[path])
-
stream = self.router.stream_by_id(sender.context.context_id)
state = self._state_by_stream.setdefault(stream, FileStreamState())
state.lock.acquire()
@@ -934,8 +1005,12 @@ class FileService(Service):
:param bytes out_path:
Name of the output path on the local disk.
:returns:
- :data:`True` on success, or :data:`False` if the transfer was
- interrupted and the output should be discarded.
+ Tuple of (`ok`, `metadata`), where `ok` is :data:`True` on success,
+ or :data:`False` if the transfer was interrupted and the output
+ should be discarded.
+
+ `metadata` is a dictionary of file metadata as documented in
+ :meth:`fetch`.
"""
LOG.debug('get_file(): fetching %r from %r', path, context)
t0 = time.time()
@@ -947,6 +1022,7 @@ class FileService(Service):
sender=recv.to_sender(),
)
+ received_bytes = 0
for chunk in recv:
s = chunk.unpickle()
LOG.debug('get_file(%r): received %d bytes', path, len(s))
@@ -956,11 +1032,19 @@ class FileService(Service):
size=len(s),
).close()
out_fp.write(s)
+ received_bytes += len(s)
- ok = out_fp.tell() == metadata['size']
- if not ok:
+ ok = received_bytes == metadata['size']
+ if received_bytes < metadata['size']:
LOG.error('get_file(%r): receiver was closed early, controller '
- 'is likely shutting down.', path)
+ 'may be shutting down, or the file was truncated '
+ 'during transfer. Expected %d bytes, received %d.',
+ path, metadata['size'], received_bytes)
+ elif received_bytes > metadata['size']:
+ LOG.error('get_file(%r): the file appears to have grown '
+ 'while transfer was in progress. Expected %d '
+ 'bytes, received %d.',
+ path, metadata['size'], received_bytes)
LOG.debug('target.get_file(): fetched %d bytes of %r from %r in %dms',
metadata['size'], path, context, 1000 * (time.time() - t0))
diff --git a/mitogen/setns.py b/mitogen/setns.py
index be87e063..d38aa092 100644
--- a/mitogen/setns.py
+++ b/mitogen/setns.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import ctypes
import grp
import logging
@@ -69,7 +71,7 @@ def _run_command(args):
output, _ = proc.communicate()
if not proc.returncode:
- return output
+ return output.decode('utf-8', 'replace')
raise Error("%s exitted with status %d: %s",
mitogen.parent.Argv(args), proc.returncode, output)
@@ -223,11 +225,14 @@ class Stream(mitogen.parent.Stream):
def create_child(self, args):
return mitogen.parent.create_child(args, preexec_fn=self.preexec_fn)
+ def _get_name(self):
+ return u'setns.' + self.container
+
def connect(self):
+ self.name = self._get_name()
attr, func = self.GET_LEADER_BY_KIND[self.kind]
tool_path = getattr(self, attr)
self.leader_pid = func(tool_path, self.container)
LOG.debug('Leader PID for %s container %r: %d',
self.kind, self.container, self.leader_pid)
super(Stream, self).connect()
- self.name = u'setns.' + self.container
diff --git a/mitogen/ssh.py b/mitogen/ssh.py
index ee97425b..47c90fff 100644
--- a/mitogen/ssh.py
+++ b/mitogen/ssh.py
@@ -26,12 +26,14 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
Functionality to allow establishing new slave contexts over an SSH connection.
"""
import logging
-import time
+import re
try:
from shlex import quote as shlex_quote
@@ -40,16 +42,28 @@ except ImportError:
import mitogen.parent
from mitogen.core import b
+from mitogen.core import bytes_partition
+
+try:
+ any
+except NameError:
+ from mitogen.core import any
LOG = logging.getLogger('mitogen')
# sshpass uses 'assword' because it doesn't lowercase the input.
PASSWORD_PROMPT = b('password')
-PERMDENIED_PROMPT = b('permission denied')
HOSTKEY_REQ_PROMPT = b('are you sure you want to continue connecting (yes/no)?')
HOSTKEY_FAIL = b('host key verification failed.')
+# [user@host: ] permission denied
+PERMDENIED_RE = re.compile(
+ ('(?:[^@]+@[^:]+: )?' # Absent in OpenSSH <7.5
+ 'Permission denied').encode(),
+ re.I
+)
+
DEBUG_PREFIXES = (b('debug1:'), b('debug2:'), b('debug3:'))
@@ -85,18 +99,19 @@ def filter_debug(stream, it):
# interesting token from above or the bootstrap
# ('password', 'MITO000\n').
break
- elif buf.startswith(DEBUG_PREFIXES):
+ elif any(buf.startswith(p) for p in DEBUG_PREFIXES):
state = 'in_debug'
else:
state = 'in_plain'
elif state == 'in_debug':
if b('\n') not in buf:
break
- line, _, buf = buf.partition(b('\n'))
- LOG.debug('%r: %s', stream, line.rstrip())
+ line, _, buf = bytes_partition(buf, b('\n'))
+ LOG.debug('%s: %s', stream.name,
+ mitogen.core.to_text(line.rstrip()))
state = 'start_of_line'
elif state == 'in_plain':
- line, nl, buf = buf.partition(b('\n'))
+ line, nl, buf = bytes_partition(buf, b('\n'))
yield line + nl, not (nl or buf)
if nl:
state = 'start_of_line'
@@ -120,10 +135,6 @@ class Stream(mitogen.parent.Stream):
#: Number of -v invocations to pass on command line.
ssh_debug_level = 0
- #: If batch_mode=False, points to the corresponding DiagLogStream, allowing
- #: it to be disconnected at the same time this stream is being torn down.
- tty_stream = None
-
#: The path to the SSH binary.
ssh_path = 'ssh'
@@ -188,11 +199,6 @@ class Stream(mitogen.parent.Stream):
'stderr_pipe': True,
}
- def on_disconnect(self, broker):
- if self.tty_stream is not None:
- self.tty_stream.on_disconnect(broker)
- super(Stream, self).on_disconnect(broker)
-
def get_boot_command(self):
bits = [self.ssh_path]
if self.ssh_debug_level:
@@ -235,11 +241,11 @@ class Stream(mitogen.parent.Stream):
base = super(Stream, self).get_boot_command()
return bits + [shlex_quote(s).strip() for s in base]
- def connect(self):
- super(Stream, self).connect()
- self.name = u'ssh.' + mitogen.core.to_text(self.hostname)
+ def _get_name(self):
+ s = u'ssh.' + mitogen.core.to_text(self.hostname)
if self.port:
- self.name += u':%s' % (self.port,)
+ s += u':%s' % (self.port,)
+ return s
auth_incorrect_msg = 'SSH authentication is incorrect'
password_incorrect_msg = 'SSH password is incorrect'
@@ -257,8 +263,8 @@ class Stream(mitogen.parent.Stream):
def _host_key_prompt(self):
if self.check_host_keys == 'accept':
- LOG.debug('%r: accepting host key', self)
- self.tty_stream.transmit_side.write(b('y\n'))
+ LOG.debug('%s: accepting host key', self.name)
+ self.diag_stream.transmit_side.write(b('yes\n'))
return
# _host_key_prompt() should never be reached with ignore or enforce
@@ -266,22 +272,10 @@ class Stream(mitogen.parent.Stream):
# with ours.
raise HostKeyError(self.hostkey_config_msg)
- def _ec0_received(self):
- if self.tty_stream is not None:
- self._router.broker.start_receive(self.tty_stream)
- return super(Stream, self)._ec0_received()
-
- def _connect_bootstrap(self, extra_fd):
- fds = [self.receive_side.fd]
- if extra_fd is not None:
- self.tty_stream = mitogen.parent.DiagLogStream(extra_fd, self)
- fds.append(extra_fd)
-
- it = mitogen.parent.iter_read(fds=fds, deadline=self.connect_deadline)
-
+ def _connect_input_loop(self, it):
password_sent = False
for buf, partial in filter_debug(self, it):
- LOG.debug('%r: received %r', self, buf)
+ LOG.debug('%s: stdout: %s', self.name, buf.rstrip())
if buf.endswith(self.EC0_MARKER):
self._ec0_received()
return
@@ -289,11 +283,7 @@ class Stream(mitogen.parent.Stream):
self._host_key_prompt()
elif HOSTKEY_FAIL in buf.lower():
raise HostKeyError(self.hostkey_failed_msg)
- elif buf.lower().startswith((
- PERMDENIED_PROMPT,
- b("%s@%s: " % (self.username, self.hostname))
- + PERMDENIED_PROMPT,
- )):
+ elif PERMDENIED_RE.match(buf):
# issue #271: work around conflict with user shell reporting
# 'permission denied' e.g. during chdir($HOME) by only matching
# it at the start of the line.
@@ -307,10 +297,21 @@ class Stream(mitogen.parent.Stream):
elif partial and PASSWORD_PROMPT in buf.lower():
if self.password is None:
raise PasswordError(self.password_required_msg)
- LOG.debug('%r: sending password', self)
- self.tty_stream.transmit_side.write(
+ LOG.debug('%s: sending password', self.name)
+ self.diag_stream.transmit_side.write(
(self.password + '\n').encode()
)
password_sent = True
raise mitogen.core.StreamError('bootstrap failed')
+
+ def _connect_bootstrap(self):
+ fds = [self.receive_side.fd]
+ if self.diag_stream is not None:
+ fds.append(self.diag_stream.receive_side.fd)
+
+ it = mitogen.parent.iter_read(fds=fds, deadline=self.connect_deadline)
+ try:
+ self._connect_input_loop(it)
+ finally:
+ it.close()
diff --git a/mitogen/su.py b/mitogen/su.py
index 7e2e5f08..7eff60a6 100644
--- a/mitogen/su.py
+++ b/mitogen/su.py
@@ -26,13 +26,19 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import logging
-import os
import mitogen.core
import mitogen.parent
from mitogen.core import b
+try:
+ any
+except NameError:
+ from mitogen.core import any
+
LOG = logging.getLogger(__name__)
@@ -60,6 +66,7 @@ class Stream(mitogen.parent.Stream):
b('su: sorry'), # BSD
b('su: authentication failure'), # Linux
b('su: incorrect password'), # CentOS 6
+ b('authentication is denied'), # AIX
)
def construct(self, username=None, password=None, su_path=None,
@@ -76,12 +83,8 @@ class Stream(mitogen.parent.Stream):
if incorrect_prompts is not None:
self.incorrect_prompts = map(str.lower, incorrect_prompts)
- def connect(self):
- super(Stream, self).connect()
- self.name = u'su.' + mitogen.core.to_text(self.username)
-
- def on_disconnect(self, broker):
- super(Stream, self).on_disconnect(broker)
+ def _get_name(self):
+ return u'su.' + mitogen.core.to_text(self.username)
def get_boot_command(self):
argv = mitogen.parent.Argv(super(Stream, self).get_boot_command())
@@ -90,12 +93,8 @@ class Stream(mitogen.parent.Stream):
password_incorrect_msg = 'su password is incorrect'
password_required_msg = 'su password is required'
- def _connect_bootstrap(self, extra_fd):
+ def _connect_input_loop(self, it):
password_sent = False
- it = mitogen.parent.iter_read(
- fds=[self.receive_side.fd],
- deadline=self.connect_deadline,
- )
for buf in it:
LOG.debug('%r: received %r', self, buf)
@@ -115,4 +114,15 @@ class Stream(mitogen.parent.Stream):
mitogen.core.to_text(self.password + '\n').encode('utf-8')
)
password_sent = True
+
raise mitogen.core.StreamError('bootstrap failed')
+
+ def _connect_bootstrap(self):
+ it = mitogen.parent.iter_read(
+ fds=[self.receive_side.fd],
+ deadline=self.connect_deadline,
+ )
+ try:
+ self._connect_input_loop(it)
+ finally:
+ it.close()
diff --git a/mitogen/sudo.py b/mitogen/sudo.py
index c410dac9..05a04989 100644
--- a/mitogen/sudo.py
+++ b/mitogen/sudo.py
@@ -26,10 +26,12 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
+import base64
import logging
import optparse
-import os
-import time
+import re
import mitogen.core
import mitogen.parent
@@ -37,6 +39,73 @@ from mitogen.core import b
LOG = logging.getLogger(__name__)
+
+# These are base64-encoded UTF-8 as our existing minifier/module server
+# struggles with Unicode Python source in some (forgotten) circumstances.
+PASSWORD_PROMPTS = [
+ 'cGFzc3dvcmQ=', # english
+ 'bG96aW5rYQ==', # sr@latin.po
+ '44OR44K544Ov44O844OJ', # ja.po
+ '4Kaq4Ka+4Ka44KaT4Kef4Ka+4Kaw4KeN4Kah', # bn.po
+ '2YPZhNmF2Kkg2KfZhNiz2LE=', # ar.po
+ 'cGFzYWhpdHph', # eu.po
+ '0L/QsNGA0L7Qu9GM', # uk.po
+ 'cGFyb29s', # et.po
+ 'c2FsYXNhbmE=', # fi.po
+ '4Kiq4Ki+4Ki44Ki14Kiw4Kih', # pa.po
+ 'Y29udHJhc2lnbm8=', # ia.po
+ 'Zm9jYWwgZmFpcmU=', # ga.po
+ '16HXodee15Q=', # he.po
+ '4Kqq4Kq+4Kq44Kq14Kqw4KuN4Kqh', # gu.po
+ '0L/QsNGA0L7Qu9Cw', # bg.po
+ '4Kyq4K2N4Kyw4Kys4K2H4Ky2IOCsuOCsmeCtjeCsleCth+CspA==', # or.po
+ '4K6V4K6f4K614K+B4K6a4K+N4K6a4K+K4K6y4K+N', # ta.po
+ 'cGFzc3dvcnQ=', # de.po
+ '7JWU7Zi4', # ko.po
+ '0LvQvtC30LjQvdC60LA=', # sr.po
+ 'beG6rXQga2jhuql1', # vi.po
+ 'c2VuaGE=', # pt_BR.po
+ 'cGFzc3dvcmQ=', # it.po
+ 'aGVzbG8=', # cs.po
+ '5a+G56K877ya', # zh_TW.po
+ 'aGVzbG8=', # sk.po
+ '4LC44LCC4LCV4LGH4LCk4LCq4LCm4LCu4LGB', # te.po
+ '0L/QsNGA0L7Qu9GM', # kk.po
+ 'aGFzxYJv', # pl.po
+ 'Y29udHJhc2VueWE=', # ca.po
+ 'Y29udHJhc2XDsWE=', # es.po
+ '4LSF4LSf4LSv4LS+4LSz4LS14LS+4LSV4LWN4LSV4LWN', # ml.po
+ 'c2VuaGE=', # pt.po
+ '5a+G56CB77ya', # zh_CN.po
+ '4KSX4KWB4KSq4KWN4KSk4KS24KSs4KWN4KSm', # mr.po
+ 'bMO2c2Vub3Jk', # sv.po
+ '4YOe4YOQ4YOg4YOd4YOa4YOY', # ka.po
+ '4KS24KSs4KWN4KSm4KSV4KWC4KSf', # hi.po
+ 'YWRnYW5nc2tvZGU=', # da.po
+ '4La74LeE4LeD4LeK4La04Lav4La6', # si.po
+ 'cGFzc29yZA==', # nb.po
+ 'd2FjaHR3b29yZA==', # nl.po
+ '4Kaq4Ka+4Ka44KaT4Kef4Ka+4Kaw4KeN4Kah', # bn_IN.po
+ 'cGFyb2xh', # tr.po
+ '4LKX4LOB4LKq4LON4LKk4LKq4LKm', # kn.po
+ 'c2FuZGk=', # id.po
+ '0L/QsNGA0L7Qu9GM', # ru.po
+ 'amVsc3rDsw==', # hu.po
+ 'bW90IGRlIHBhc3Nl', # fr.po
+ 'aXBoYXNpd2VkaQ==', # zu.po
+ '4Z6W4Z624Z6A4Z+S4Z6Z4Z6f4Z6Y4Z+S4Z6E4Z624Z6P4Z+LwqDhn5Y=', # km.po
+ '4KaX4KeB4Kaq4KeN4Kak4Ka24Kas4KeN4Kam', # as.po
+]
+
+
+PASSWORD_PROMPT_RE = re.compile(
+ u'|'.join(
+ base64.b64decode(s).decode('utf-8')
+ for s in PASSWORD_PROMPTS
+ )
+)
+
+
PASSWORD_PROMPT = b('password')
SUDO_OPTIONS = [
#(False, 'bool', '--askpass', '-A')
@@ -55,7 +124,10 @@ SUDO_OPTIONS = [
#(False, 'bool', '--list', '-l')
#(False, 'bool', '--preserve-groups', '-P')
#(False, 'str', '--prompt', '-p')
- #(False, 'str', '--role', '-r')
+
+ # SELinux options. Passed through as-is.
+ (False, 'str', '--role', '-r'),
+ (False, 'str', '--type', '-t'),
# These options are supplied by default by Ansible, but are ignored, as
# sudo always runs under a TTY with Mitogen.
@@ -63,9 +135,8 @@ SUDO_OPTIONS = [
(True, 'bool', '--non-interactive', '-n'),
#(False, 'str', '--shell', '-s')
- #(False, 'str', '--type', '-t')
#(False, 'str', '--other-user', '-U')
- #(False, 'str', '--user', '-u')
+ (False, 'str', '--user', '-u'),
#(False, 'bool', '--version', '-V')
#(False, 'bool', '--validate', '-v')
]
@@ -103,14 +174,17 @@ class PasswordError(mitogen.core.StreamError):
pass
+def option(default, *args):
+ for arg in args:
+ if arg is not None:
+ return arg
+ return default
+
+
class Stream(mitogen.parent.Stream):
create_child = staticmethod(mitogen.parent.hybrid_tty_create_child)
child_is_immediate_subprocess = False
- #: Once connected, points to the corresponding DiagLogStream, allowing it to
- #: be disconnected at the same time this stream is being torn down.
- tty_stream = None
-
sudo_path = 'sudo'
username = 'root'
password = None
@@ -118,32 +192,27 @@ class Stream(mitogen.parent.Stream):
set_home = False
login = False
+ selinux_role = None
+ selinux_type = None
+
def construct(self, username=None, sudo_path=None, password=None,
preserve_env=None, set_home=None, sudo_args=None,
- login=None, **kwargs):
+ login=None, selinux_role=None, selinux_type=None, **kwargs):
super(Stream, self).construct(**kwargs)
opts = parse_sudo_flags(sudo_args or [])
- if username is not None:
- self.username = username
- if sudo_path is not None:
- self.sudo_path = sudo_path
- if password is not None:
- self.password = password
- if (preserve_env or opts.preserve_env) is not None:
- self.preserve_env = preserve_env or opts.preserve_env
- if (set_home or opts.set_home) is not None:
- self.set_home = set_home or opts.set_home
- if (login or opts.login) is not None:
- self.login = True
-
- def connect(self):
- super(Stream, self).connect()
- self.name = u'sudo.' + mitogen.core.to_text(self.username)
-
- def on_disconnect(self, broker):
- self.tty_stream.on_disconnect(broker)
- super(Stream, self).on_disconnect(broker)
+ self.username = option(self.username, username, opts.user)
+ self.sudo_path = option(self.sudo_path, sudo_path)
+ self.password = password or None
+ self.preserve_env = option(self.preserve_env,
+ preserve_env, opts.preserve_env)
+ self.set_home = option(self.set_home, set_home, opts.set_home)
+ self.login = option(self.login, login, opts.login)
+ self.selinux_role = option(self.selinux_role, selinux_role, opts.role)
+ self.selinux_type = option(self.selinux_type, selinux_type, opts.type)
+
+ def _get_name(self):
+ return u'sudo.' + mitogen.core.to_text(self.username)
def get_boot_command(self):
# Note: sudo did not introduce long-format option processing until July
@@ -156,35 +225,53 @@ class Stream(mitogen.parent.Stream):
bits += ['-H']
if self.login:
bits += ['-i']
+ if self.selinux_role:
+ bits += ['-r', self.selinux_role]
+ if self.selinux_type:
+ bits += ['-t', self.selinux_type]
- bits = bits + super(Stream, self).get_boot_command()
+ bits = bits + ['--'] + super(Stream, self).get_boot_command()
LOG.debug('sudo command line: %r', bits)
return bits
password_incorrect_msg = 'sudo password is incorrect'
password_required_msg = 'sudo password is required'
- def _connect_bootstrap(self, extra_fd):
- self.tty_stream = mitogen.parent.DiagLogStream(extra_fd, self)
-
+ def _connect_input_loop(self, it):
password_sent = False
- it = mitogen.parent.iter_read(
- fds=[self.receive_side.fd, extra_fd],
- deadline=self.connect_deadline,
- )
for buf in it:
- LOG.debug('%r: received %r', self, buf)
+ LOG.debug('%s: received %r', self.name, buf)
if buf.endswith(self.EC0_MARKER):
self._ec0_received()
return
- elif PASSWORD_PROMPT in buf.lower():
+
+ match = PASSWORD_PROMPT_RE.search(buf.decode('utf-8').lower())
+ if match is not None:
+ LOG.debug('%s: matched password prompt %r',
+ self.name, match.group(0))
if self.password is None:
raise PasswordError(self.password_required_msg)
if password_sent:
raise PasswordError(self.password_incorrect_msg)
- self.tty_stream.transmit_side.write(
- mitogen.core.to_text(self.password + '\n').encode('utf-8')
+ self.diag_stream.transmit_side.write(
+ (mitogen.core.to_text(self.password) + '\n').encode('utf-8')
)
password_sent = True
+
raise mitogen.core.StreamError('bootstrap failed')
+
+ def _connect_bootstrap(self):
+ fds = [self.receive_side.fd]
+ if self.diag_stream is not None:
+ fds.append(self.diag_stream.receive_side.fd)
+
+ it = mitogen.parent.iter_read(
+ fds=fds,
+ deadline=self.connect_deadline,
+ )
+
+ try:
+ self._connect_input_loop(it)
+ finally:
+ it.close()
diff --git a/mitogen/unix.py b/mitogen/unix.py
index 4a4dfb65..3e315d6f 100644
--- a/mitogen/unix.py
+++ b/mitogen/unix.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
"""
Permit connection of additional contexts that may act with the authority of
this context. For now, the UNIX socket is always mode 0600, i.e. can only be
@@ -49,20 +51,30 @@ from mitogen.core import LOG
def is_path_dead(path):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
- s.connect(path)
- except socket.error:
- e = sys.exc_info()[1]
- return e[0] in (errno.ECONNREFUSED, errno.ENOENT)
+ try:
+ s.connect(path)
+ except socket.error:
+ e = sys.exc_info()[1]
+ return e.args[0] in (errno.ECONNREFUSED, errno.ENOENT)
+ finally:
+ s.close()
return False
def make_socket_path():
- return tempfile.mktemp(prefix='mitogen_unix_')
+ return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock')
class Listener(mitogen.core.BasicStream):
keep_alive = True
+ def __repr__(self):
+ return '%s.%s(%r)' % (
+ __name__,
+ self.__class__.__name__,
+ self.path,
+ )
+
def __init__(self, router, path=None, backlog=100):
self._router = router
self.path = path or make_socket_path()
@@ -78,11 +90,26 @@ class Listener(mitogen.core.BasicStream):
self.receive_side = mitogen.core.Side(self, self._sock.fileno())
router.broker.start_receive(self)
+ def _unlink_socket(self):
+ try:
+ os.unlink(self.path)
+ except OSError:
+ e = sys.exc_info()[1]
+ # Prevent a shutdown race with the parent process.
+ if e.args[0] != errno.ENOENT:
+ raise
+
+ def on_shutdown(self, broker):
+ broker.stop_receive(self)
+ self._unlink_socket()
+ self._sock.close()
+ self.receive_side.closed = True
+
def _accept_client(self, sock):
sock.setblocking(True)
try:
pid, = struct.unpack('>L', sock.recv(4))
- except socket.error:
+ except (struct.error, socket.error):
LOG.error('%r: failed to read remote identity: %s',
self, sys.exc_info()[1])
return
@@ -102,6 +129,7 @@ class Listener(mitogen.core.BasicStream):
self, pid, sys.exc_info()[1])
return
+ LOG.debug('%r: accepted %r', self, stream)
stream.accept(sock.fileno(), sock.fileno())
self._router.register(context, stream)
diff --git a/mitogen/utils.py b/mitogen/utils.py
index 4fd80aa1..6c56d6d5 100644
--- a/mitogen/utils.py
+++ b/mitogen/utils.py
@@ -26,6 +26,8 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+# !mitogen: minify_safe
+
import datetime
import logging
import os
@@ -34,6 +36,7 @@ import sys
import mitogen
import mitogen.core
import mitogen.master
+import mitogen.parent
LOG = logging.getLogger('mitogen')
@@ -45,7 +48,33 @@ else:
iteritems = dict.iteritems
+def setup_gil():
+ """
+ Set extremely long GIL release interval to let threads naturally progress
+ through CPU-heavy sequences without forcing the wake of another thread that
+ may contend trying to run the same CPU-heavy code. For the new-style
+ Ansible work, this drops runtime ~33% and involuntary context switches by
+ >80%, essentially making threads cooperatively scheduled.
+ """
+ try:
+ # Python 2.
+ sys.setcheckinterval(100000)
+ except AttributeError:
+ pass
+
+ try:
+ # Python 3.
+ sys.setswitchinterval(10)
+ except AttributeError:
+ pass
+
+
def disable_site_packages():
+ """
+ Remove all entries mentioning ``site-packages`` or ``Extras`` from
+ :attr:sys.path. Used primarily for testing on OS X within a virtualenv,
+ where OS X bundles some ancient version of the :mod:`six` module.
+ """
for entry in sys.path[:]:
if 'site-packages' in entry or 'Extras' in entry:
sys.path.remove(entry)
@@ -57,7 +86,9 @@ def _formatTime(record, datefmt=None):
def log_get_formatter():
- datefmt = '%H:%M:%S.%f'
+ datefmt = '%H:%M:%S'
+ if sys.version_info > (2, 6):
+ datefmt += '.%f'
fmt = '%(asctime)s %(levelname).1s %(name)s: %(message)s'
formatter = logging.Formatter(fmt, datefmt)
formatter.formatTime = _formatTime
@@ -65,6 +96,26 @@ def log_get_formatter():
def log_to_file(path=None, io=False, level='INFO'):
+ """
+ Install a new :class:`logging.Handler` writing applications logs to the
+ filesystem. Useful when debugging slave IO problems.
+
+ Parameters to this function may be overridden at runtime using environment
+ variables. See :ref:`logging-env-vars`.
+
+ :param str path:
+ If not :data:`None`, a filesystem path to write logs to. Otherwise,
+ logs are written to :data:`sys.stderr`.
+
+ :param bool io:
+ If :data:`True`, include extremely verbose IO logs in the output.
+ Useful for debugging hangs, less useful for debugging application code.
+
+ :param str level:
+ Name of the :mod:`logging` package constant that is the minimum level
+ to log at. Useful levels are ``DEBUG``, ``INFO``, ``WARNING``, and
+ ``ERROR``.
+ """
log = logging.getLogger('')
if path:
fp = open(path, 'w', 1)
@@ -94,6 +145,14 @@ def log_to_file(path=None, io=False, level='INFO'):
def run_with_router(func, *args, **kwargs):
+ """
+ Arrange for `func(router, *args, **kwargs)` to run with a temporary
+ :class:`mitogen.master.Router`, ensuring the Router and Broker are
+ correctly shut down during normal or exceptional return.
+
+ :returns:
+ `func`'s return value.
+ """
broker = mitogen.master.Broker()
router = mitogen.master.Router(broker)
try:
@@ -104,6 +163,17 @@ def run_with_router(func, *args, **kwargs):
def with_router(func):
+ """
+ Decorator version of :func:`run_with_router`. Example:
+
+ .. code-block:: python
+
+ @with_router
+ def do_stuff(router, arg):
+ pass
+
+ do_stuff(blah, 123)
+ """
def wrapper(*args, **kwargs):
return run_with_router(func, *args, **kwargs)
if mitogen.core.PY3:
@@ -122,7 +192,27 @@ PASSTHROUGH = (
mitogen.core.Secret,
)
+
def cast(obj):
+ """
+ Many tools love to subclass built-in types in order to implement useful
+ functionality, such as annotating the safety of a Unicode string, or adding
+ additional methods to a dict. However, cPickle loves to preserve those
+ subtypes during serialization, resulting in CallError during :meth:`call
+ ` in the target when it tries to deserialize
+ the data.
+
+ This function walks the object graph `obj`, producing a copy with any
+ custom sub-types removed. The functionality is not default since the
+ resulting walk may be computationally expensive given a large enough graph.
+
+ See :ref:`serialization-rules` for a list of supported types.
+
+ :param obj:
+ Object to undecorate.
+ :returns:
+ Undecorated object.
+ """
if isinstance(obj, dict):
return dict((cast(k), cast(v)) for k, v in iteritems(obj))
if isinstance(obj, (list, tuple)):
diff --git a/preamble_size.py b/preamble_size.py
index bf3b5950..ead5af85 100644
--- a/preamble_size.py
+++ b/preamble_size.py
@@ -16,6 +16,9 @@ import mitogen.service
import mitogen.ssh
import mitogen.sudo
+import ansible_mitogen.runner
+import ansible_mitogen.target
+
router = mitogen.master.Router()
context = mitogen.parent.Context(router, 0)
stream = mitogen.ssh.Stream(router, 0, max_message_size=0, hostname='foo')
@@ -31,7 +34,7 @@ if '--dump' in sys.argv:
print(
- ' '
+ ' '
' '
' Original '
' '
@@ -42,6 +45,9 @@ print(
for mod in (
mitogen.parent,
+ mitogen.fork,
+ ansible_mitogen.target,
+ ansible_mitogen.runner,
mitogen.ssh,
mitogen.sudo,
mitogen.select,
@@ -56,7 +62,7 @@ for mod in (
compressed = zlib.compress(minimized, 9)
compressed_size = len(compressed)
print(
- '%-15s'
+ '%-25s'
' '
'%5i %4.1fKiB'
' '
diff --git a/run_tests b/run_tests
index 65bf1fef..b583af3b 100755
--- a/run_tests
+++ b/run_tests
@@ -1,23 +1,55 @@
-#/usr/bin/env bash
+#!/usr/bin/env bash
+
+# From https://unix.stackexchange.com/a/432145
+# Return the maximum of one or more integer arguments
+max() {
+ local max number
+
+ max="$1"
+
+ for number in "${@:2}"; do
+ if ((number > max)); then
+ max="$number"
+ fi
+ done
+
+ printf '%d\n' "$max"
+}
echo '----- ulimits -----'
ulimit -a
echo '-------------------'
echo
-set -o errexit
+# Don't use errexit, so coverage report is still generated when tests fail
set -o pipefail
-UNIT2="$(which unit2)"
+NOCOVERAGE="${NOCOVERAGE:-}"
+NOCOVERAGE_ERASE="${NOCOVERAGE_ERASE:-$NOCOVERAGE}"
+NOCOVERAGE_REPORT="${NOCOVERAGE_REPORT:-$NOCOVERAGE}"
+
+if [ ! "$UNIT2" ]; then
+ UNIT2="$(which unit2)"
+fi
-coverage erase
+if [ ! "$NOCOVERAGE_ERASE" ]; then
+ coverage erase
+fi
# First run overwites coverage output.
[ "$SKIP_MITOGEN" ] || {
- coverage run "${UNIT2}" discover \
- --start-directory "tests" \
- --pattern '*_test.py' \
- "$@"
+ if [ ! "$NOCOVERAGE" ]; then
+ coverage run -a "${UNIT2}" discover \
+ --start-directory "tests" \
+ --pattern '*_test.py' \
+ "$@"
+ else
+ "${UNIT2}" discover \
+ --start-directory "tests" \
+ --pattern '*_test.py' \
+ "$@"
+ fi
+ MITOGEN_TEST_STATUS=$?
}
# Second run appends. This is since 'discover' treats subdirs as packages and
@@ -27,11 +59,24 @@ coverage erase
# mess of Git history.
[ "$SKIP_ANSIBLE" ] || {
export PYTHONPATH=`pwd`/tests:$PYTHONPATH
- coverage run -a "${UNIT2}" discover \
- --start-directory "tests/ansible" \
- --pattern '*_test.py' \
- "$@"
+ if [ ! "$NOCOVERAGE" ]; then
+ coverage run -a "${UNIT2}" discover \
+ --start-directory "tests/ansible" \
+ --pattern '*_test.py' \
+ "$@"
+ else
+ "${UNIT2}" discover \
+ --start-directory "tests/ansible" \
+ --pattern '*_test.py' \
+ "$@"
+ fi
+ ANSIBLE_TEST_STATUS=$?
}
-coverage html
-echo coverage report is at "file://$(pwd)/htmlcov/index.html"
+if [ ! "$NOCOVERAGE_REPORT" ]; then
+ coverage html
+ echo "coverage report is at file://$(pwd)/htmlcov/index.html"
+fi
+
+# Exit with a non-zero status if any test run did so
+exit "$(max $MITOGEN_TEST_STATUS $ANSIBLE_TEST_STATUS)"
diff --git a/scripts/pogrep.py b/scripts/pogrep.py
new file mode 100644
index 00000000..b837bcfd
--- /dev/null
+++ b/scripts/pogrep.py
@@ -0,0 +1,40 @@
+
+# issue #429: tool for extracting keys out of message catalogs and turning them
+# into the big gob of base64 as used in mitogen/sudo.py
+#
+# Usage:
+# - apt-get source libpam0g
+# - cd */po/
+# - python ~/pogrep.py "Password: "
+
+import sys
+import shlex
+import glob
+
+
+last_word = None
+
+for path in glob.glob('*.po'):
+ for line in open(path):
+ bits = shlex.split(line, comments=True)
+ if not bits:
+ continue
+
+ word = bits[0]
+ if len(bits) < 2 or not word:
+ continue
+
+ rest = bits[1]
+ if not rest:
+ continue
+
+ if last_word == 'msgid' and word == 'msgstr':
+ if last_rest == sys.argv[1]:
+ thing = rest.rstrip(': ').decode('utf-8').lower().encode('utf-8').encode('base64').rstrip()
+ print ' %-60s # %s' % (repr(thing)+',', path)
+
+ last_word = word
+ last_rest = rest
+
+#ag -A 1 'msgid "Password: "'|less | grep msgstr | grep -v '""'|cut -d'"' -f2|cut -d'"' -f1| tr -d :
+
diff --git a/setup.py b/setup.py
index 2547e6ed..6f31133d 100644
--- a/setup.py
+++ b/setup.py
@@ -50,7 +50,6 @@ setup(
packages = find_packages(exclude=['tests', 'examples']),
zip_safe = False,
classifiers = [
- 'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
diff --git a/tests/ansible/README.md b/tests/ansible/README.md
index 46320951..50e747fe 100644
--- a/tests/ansible/README.md
+++ b/tests/ansible/README.md
@@ -13,7 +13,7 @@ demonstrator for what does and doesn't work.
See `../image_prep/README.md`.
-## `run_ansible_playbook.sh`
+## `run_ansible_playbook.py`
This is necessary to set some environment variables used by future tests, as
there appears to be no better way to inject them into the top-level process
@@ -22,7 +22,7 @@ environment before the Mitogen connection process forks.
## Running Everything
-`ANSIBLE_STRATEGY=mitogen_linear ./run_ansible_playbook.sh all.yml`
+`ANSIBLE_STRATEGY=mitogen_linear ./run_ansible_playbook.py all.yml`
## `hosts/` and `common-hosts`
diff --git a/tests/ansible/all.yml b/tests/ansible/all.yml
index a68831f7..e074a384 100644
--- a/tests/ansible/all.yml
+++ b/tests/ansible/all.yml
@@ -1,3 +1,3 @@
-- import_playbook: regression/all.yml
-- import_playbook: integration/all.yml
+- include: regression/all.yml
+- include: integration/all.yml
diff --git a/tests/ansible/ansible.cfg b/tests/ansible/ansible.cfg
index 3897519b..a968f84a 100644
--- a/tests/ansible/ansible.cfg
+++ b/tests/ansible/ansible.cfg
@@ -7,11 +7,18 @@ callback_plugins = lib/callback
stdout_callback = nice_stdout
vars_plugins = lib/vars
library = lib/modules
+filter_plugins = lib/filters
module_utils = lib/module_utils
retry_files_enabled = False
display_args_to_stdout = True
forks = 100
+# issue #434; hosts/delegate_to; integration/delegate_to
+remote_user = ansible-cfg-remote-user
+
+# On MacOS, "smart" with a password set causes Ansible to use paramiko.
+transport = ssh
+
no_target_syslog = True
# Required by integration/ssh/timeouts.yml
@@ -21,5 +28,5 @@ timeout = 10
host_key_checking = False
[ssh_connection]
-ssh_args = -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s
+ssh_args = -o UserKnownHostsFile=/dev/null -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s
pipelining = True
diff --git a/tests/ansible/ara_env.py b/tests/ansible/ara_env.py
new file mode 100755
index 00000000..ab2b726e
--- /dev/null
+++ b/tests/ansible/ara_env.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+"""
+Print shell environment exports adding ARA plugins to the list of plugins
+from ansible.cfg in the CWD.
+"""
+
+import os
+
+import ara.setup
+import ansible.constants as C
+
+os.chdir(os.path.dirname(__file__))
+
+print('export ANSIBLE_ACTION_PLUGINS=%s:%s' % (
+ ':'.join(C.DEFAULT_ACTION_PLUGIN_PATH),
+ ara.setup.action_plugins,
+))
+
+print('export ANSIBLE_CALLBACK_PLUGINS=%s:%s' % (
+ ':'.join(C.DEFAULT_CALLBACK_PLUGIN_PATH),
+ ara.setup.callback_plugins,
+))
+
+print('export ANSIBLE_LIBRARY=%s:%s' % (
+ ':'.join(C.DEFAULT_MODULE_PATH),
+ ara.setup.library,
+))
diff --git a/tests/ansible/bench/file_transfer.yml b/tests/ansible/bench/file_transfer.yml
index d0ac727d..2ca46f1c 100644
--- a/tests/ansible/bench/file_transfer.yml
+++ b/tests/ansible/bench/file_transfer.yml
@@ -5,11 +5,11 @@
tasks:
- name: Make 32MiB file
- connection: local
+ delegate_to: localhost
shell: openssl rand 33554432 > /tmp/bigfile.in
- name: Make 320MiB file
- connection: local
+ delegate_to: localhost
shell: >
cat
/tmp/bigfile.in
@@ -47,21 +47,21 @@
file:
path: "{{item}}"
state: absent
- connection: local
+ delegate_to: localhost
become: true
with_items:
- /tmp/bigfile.out
- /tmp/bigbigfile.out
- name: Copy 32MiB file via localhost sudo
- connection: local
+ delegate_to: localhost
become: true
copy:
src: /tmp/bigfile.in
dest: /tmp/bigfile.out
- name: Copy 320MiB file via localhost sudo
- connection: local
+ delegate_to: localhost
become: true
copy:
src: /tmp/bigbigfile.in
diff --git a/tests/ansible/bench/loop-100-copies.yml b/tests/ansible/bench/loop-100-copies.yml
new file mode 100644
index 00000000..231bf4a1
--- /dev/null
+++ b/tests/ansible/bench/loop-100-copies.yml
@@ -0,0 +1,25 @@
+
+- hosts: all
+ any_errors_fatal: true
+ tasks:
+
+ - name: Create file tree
+ connection: local
+ shell: >
+ mkdir -p /tmp/filetree.in;
+ for i in `seq -f /tmp/filetree.in/%g 1 100`; do echo $RANDOM > $i; done;
+
+ - name: Delete remote file tree
+ file: path=/tmp/filetree.out state=absent
+ when: 0
+
+ - file:
+ state: directory
+ path: /tmp/filetree.out
+
+ - name: Trigger nasty process pileup
+ copy:
+ src: "{{item.src}}"
+ dest: "/tmp/filetree.out/{{item.path}}"
+ with_filetree: /tmp/filetree.in
+ when: item.state == 'file'
diff --git a/tests/ansible/bench/loop-100-items.yml b/tests/ansible/bench/loop-100-items.yml
index 0feb57c5..c071c100 100644
--- a/tests/ansible/bench/loop-100-items.yml
+++ b/tests/ansible/bench/loop-100-items.yml
@@ -7,4 +7,4 @@
- hosts: all
tasks:
- command: hostname
- with_sequence: start=1 end=100
+ with_sequence: start=1 end="{{end|default(100)}}"
diff --git a/tests/ansible/bench/loop-20-templates.yml b/tests/ansible/bench/loop-20-templates.yml
index df994bd8..17dc7777 100644
--- a/tests/ansible/bench/loop-20-templates.yml
+++ b/tests/ansible/bench/loop-20-templates.yml
@@ -11,4 +11,4 @@
mode: 0755
content:
Hello from {{item}}
- with_sequence: start=1 end=20
+ with_sequence: start=1 end={{end|default(20)}}
diff --git a/tests/ansible/debug_run_ansible_playbook.sh b/tests/ansible/debug_run_ansible_playbook.sh
deleted file mode 100755
index ab2c9385..00000000
--- a/tests/ansible/debug_run_ansible_playbook.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-# Wrap ansible-playbook, setting up some test of the test environment.
-
-# Used by delegate_to.yml to ensure "sudo -E" preserves environment.
-export I_WAS_PRESERVED=1
-export MITOGEN_MAX_INTERPRETERS=3
-
-if [ "${ANSIBLE_STRATEGY:0:7}" = "mitogen" ]
-then
- EXTRA='{"is_mitogen": true}'
-else
- EXTRA='{"is_mitogen": false}'
-fi
-
-exec ~/src/cpython/venv/bin/ansible-playbook -e "$EXTRA" -e ansible_python_interpreter=/Users/dmw/src/cpython/venv/bin/python2.7 "$@"
diff --git a/tests/ansible/gcloud/controller.yml b/tests/ansible/gcloud/controller.yml
index 494c2164..3c7f9ea0 100644
--- a/tests/ansible/gcloud/controller.yml
+++ b/tests/ansible/gcloud/controller.yml
@@ -24,10 +24,13 @@
src: ssh_config.j2
- lineinfile:
- line: "net.ipv4.ip_forward=1"
+ line: "{{item}}"
path: /etc/sysctl.conf
- register: sysctl_conf
become: true
+ with_items:
+ - net.ipv4.ip_forward=1
+ - kernel.perf_event_paranoid=-1
+ register: sysctl_conf
- shell: /sbin/sysctl -p
when: sysctl_conf.changed
@@ -46,6 +49,7 @@
- python-virtualenv
- strace
- libldap2-dev
+ - linux-perf
- libsasl2-dev
- build-essential
- git
@@ -66,8 +70,8 @@
- git:
dest: ~/ansible
- repo: https://github.com/dw/ansible.git
- version: dmw
+ repo: https://github.com/ansible/ansible.git
+ #version: dmw
- pip:
virtualenv: ~/venv
diff --git a/tests/ansible/gcloud/requirements.txt b/tests/ansible/gcloud/requirements.txt
new file mode 100644
index 00000000..0df6728c
--- /dev/null
+++ b/tests/ansible/gcloud/requirements.txt
@@ -0,0 +1 @@
+google-api-python-client==1.6.5
diff --git a/tests/ansible/hosts/become_same_user.hosts b/tests/ansible/hosts/become_same_user.hosts
new file mode 100644
index 00000000..a18b90d2
--- /dev/null
+++ b/tests/ansible/hosts/become_same_user.hosts
@@ -0,0 +1,4 @@
+
+# become_same_user.yml
+bsu-joe ansible_user=joe
+
diff --git a/tests/ansible/hosts/connection-delegation b/tests/ansible/hosts/connection-delegation
deleted file mode 100644
index 2fb87455..00000000
--- a/tests/ansible/hosts/connection-delegation
+++ /dev/null
@@ -1,12 +0,0 @@
-[connection-delegation-test]
-cd-bastion
-cd-rack11 mitogen_via=ssh-user@cd-bastion
-cd-rack11a mitogen_via=root@cd-rack11
-cd-rack11a-docker mitogen_via=docker-admin@cd-rack11a ansible_connection=docker
-
-[connection-delegation-cycle]
-# Create cycle with Docker container.
-cdc-bastion mitogen_via=cdc-rack11a-docker
-cdc-rack11 mitogen_via=ssh-user@cdc-bastion
-cdc-rack11a mitogen_via=root@cdc-rack11
-cdc-rack11a-docker mitogen_via=docker-admin@cdc-rack11a ansible_connection=docker
diff --git a/tests/ansible/hosts/common-hosts b/tests/ansible/hosts/connection_delegation.hosts
similarity index 71%
rename from tests/ansible/hosts/common-hosts
rename to tests/ansible/hosts/connection_delegation.hosts
index cf84d2d1..a22bd5df 100644
--- a/tests/ansible/hosts/common-hosts
+++ b/tests/ansible/hosts/connection_delegation.hosts
@@ -1,38 +1,18 @@
# vim: syntax=dosini
+# Connection delegation scenarios. It's impossible to connect to them, but their would-be
+# config can be inspected using "mitogen_get_stack" action.
-# This must be defined explicitly, otherwise _create_implicit_localhost()
-# generates its own copy, which includes an ansible_python_interpreter that
-# varies according to host machine.
-localhost
-[connection-delegation-test]
-cd-bastion
-cd-rack11 mitogen_via=ssh-user@cd-bastion
-cd-rack11a mitogen_via=root@cd-rack11
-cd-rack11a-docker mitogen_via=docker-admin@cd-rack11a ansible_connection=docker
-
-[connection-delegation-cycle]
-# Create cycle with Docker container.
-cdc-bastion mitogen_via=cdc-rack11a-docker
-cdc-rack11 mitogen_via=ssh-user@cdc-bastion
-cdc-rack11a mitogen_via=root@cdc-rack11
-cdc-rack11a-docker mitogen_via=docker-admin@cdc-rack11a ansible_connection=docker
-
-[conn-delegation]
-cd-user1 ansible_user=mitogen__user1 ansible_connection=mitogen_sudo mitogen_via=target
-
-
-# Connection delegation scenarios. It's impossible to connection to them, but
-# you can inspect the would-be config via "mitogen_get_stack" action.
-[cd-no-connect]
# Normal inventory host, no aliasing.
cd-normal ansible_connection=mitogen_doas ansible_user=normal-user
+
# Inventory host that is really a different host.
cd-alias ansible_connection=ssh ansible_user=alias-user ansible_host=alias-host
# Via one normal host.
cd-normal-normal mitogen_via=cd-normal
+
# Via one aliased host.
cd-normal-alias mitogen_via=cd-alias
@@ -41,3 +21,19 @@ cd-newuser-normal-normal mitogen_via=cd-normal ansible_user=newuser-normal-norma
# doas:newuser via host.
cd-newuser-doas-normal mitogen_via=cd-normal ansible_connection=mitogen_doas ansible_user=newuser-doas-normal-user
+
+
+[connection-delegation-test]
+cd-bastion
+cd-rack11 mitogen_via=ssh-user@cd-bastion
+cd-rack11a mitogen_via=root@cd-rack11
+cd-rack11a-docker mitogen_via=docker-admin@cd-rack11a ansible_connection=docker
+
+
+[connection-delegation-cycle]
+# Create cycle with Docker container.
+cdc-bastion mitogen_via=cdc-rack11a-docker
+cdc-rack11 mitogen_via=ssh-user@cdc-bastion
+cdc-rack11a mitogen_via=root@cdc-rack11
+cdc-rack11a-docker mitogen_via=docker-admin@cdc-rack11a ansible_connection=docker
+
diff --git a/tests/ansible/hosts/default.hosts b/tests/ansible/hosts/default.hosts
new file mode 100644
index 00000000..02f3c614
--- /dev/null
+++ b/tests/ansible/hosts/default.hosts
@@ -0,0 +1,8 @@
+# vim: syntax=dosini
+
+# When running the tests outside CI, make a single 'target' host which is the
+# local machine.
+target ansible_host=localhost
+
+[test-targets]
+target
diff --git a/tests/ansible/hosts/k3 b/tests/ansible/hosts/k3.hosts
similarity index 79%
rename from tests/ansible/hosts/k3
rename to tests/ansible/hosts/k3.hosts
index 1a7190d8..34e1ff95 100644
--- a/tests/ansible/hosts/k3
+++ b/tests/ansible/hosts/k3.hosts
@@ -1,3 +1,6 @@
+# vim: syntax=dosini
+
+# Used for manual testing.
k3
[k3-x10]
diff --git a/tests/ansible/hosts/localhost b/tests/ansible/hosts/localhost
deleted file mode 100644
index f4dab2ab..00000000
--- a/tests/ansible/hosts/localhost
+++ /dev/null
@@ -1,8 +0,0 @@
-localhost
-target ansible_host=localhost
-
-[test-targets]
-target
-
-[localhost-x10]
-localhost-[01:10]
diff --git a/tests/ansible/hosts/localhost.hosts b/tests/ansible/hosts/localhost.hosts
new file mode 100644
index 00000000..89bf7b38
--- /dev/null
+++ b/tests/ansible/hosts/localhost.hosts
@@ -0,0 +1,37 @@
+# vim: syntax=dosini
+
+# This must be defined explicitly, otherwise _create_implicit_localhost()
+# generates its own copy, which includes an ansible_python_interpreter that
+# varies according to host machine.
+localhost
+
+# This is only used for manual testing.
+[localhost-x10]
+localhost-[001:010]
+
+[localhost-x20]
+localhost-[001:020]
+
+[localhost-x30]
+localhost-[001:030]
+
+[localhost-x40]
+localhost-[001:040]
+
+[localhost-x50]
+localhost-[001:050]
+
+[localhost-x60]
+localhost-[001:060]
+
+[localhost-x70]
+localhost-[001:070]
+
+[localhost-x80]
+localhost-[001:080]
+
+[localhost-x90]
+localhost-[001:090]
+
+[localhost-x100]
+localhost-[001:100]
diff --git a/tests/ansible/hosts/nessy b/tests/ansible/hosts/nessy
deleted file mode 100644
index 5cdef123..00000000
--- a/tests/ansible/hosts/nessy
+++ /dev/null
@@ -1,10 +0,0 @@
-nessy
-
-[nessy-x10]
-nessy-[00:10]
-
-[nessy-x20]
-nessy-[00:20]
-
-[nessy-x50]
-nessy-[00:50]
diff --git a/tests/ansible/integration/action/all.yml b/tests/ansible/integration/action/all.yml
index 018973a9..461c742b 100644
--- a/tests/ansible/integration/action/all.yml
+++ b/tests/ansible/integration/action/all.yml
@@ -1,9 +1,9 @@
-- import_playbook: copy.yml
-- import_playbook: fixup_perms2__copy.yml
-- import_playbook: low_level_execute_command.yml
-- import_playbook: make_tmp_path.yml
-- import_playbook: remote_expand_user.yml
-- import_playbook: remote_file_exists.yml
-- import_playbook: remove_tmp_path.yml
-- import_playbook: synchronize.yml
-- import_playbook: transfer_data.yml
+- include: copy.yml
+- include: fixup_perms2__copy.yml
+- include: low_level_execute_command.yml
+- include: make_tmp_path.yml
+- include: remote_expand_user.yml
+- include: remote_file_exists.yml
+- include: remove_tmp_path.yml
+- include: synchronize.yml
+- include: transfer_data.yml
diff --git a/tests/ansible/integration/action/copy.yml b/tests/ansible/integration/action/copy.yml
index d799be90..b34b9831 100644
--- a/tests/ansible/integration/action/copy.yml
+++ b/tests/ansible/integration/action/copy.yml
@@ -1,6 +1,6 @@
# Verify copy module for small and large files, and inline content.
-- name: integration/action/synchronize.yml
+- name: integration/action/copy.yml
hosts: test-targets
any_errors_fatal: true
tasks:
@@ -8,13 +8,13 @@
dest: /tmp/copy-tiny-file
content:
this is a tiny file.
- connection: local
+ delegate_to: localhost
- copy:
dest: /tmp/copy-large-file
# Must be larger than Connection.SMALL_SIZE_LIMIT.
content: "{% for x in range(200000) %}x{% endfor %}"
- connection: local
+ delegate_to: localhost
# end of making files
diff --git a/tests/ansible/integration/action/fixup_perms2__copy.yml b/tests/ansible/integration/action/fixup_perms2__copy.yml
index c92b158e..280267e6 100644
--- a/tests/ansible/integration/action/fixup_perms2__copy.yml
+++ b/tests/ansible/integration/action/fixup_perms2__copy.yml
@@ -53,7 +53,7 @@
state: absent
- name: Create local test file.
- connection: local
+ delegate_to: localhost
copy:
content: "weird mode"
dest: "/tmp/weird-mode"
diff --git a/tests/ansible/integration/action/low_level_execute_command.yml b/tests/ansible/integration/action/low_level_execute_command.yml
index a42fa877..7c14cb22 100644
--- a/tests/ansible/integration/action/low_level_execute_command.yml
+++ b/tests/ansible/integration/action/low_level_execute_command.yml
@@ -1,6 +1,6 @@
# Verify the behaviour of _low_level_execute_command().
-- name: integration/action__low_level_execute_command.yml
+- name: integration/action/low_level_execute_command.yml
hosts: test-targets
any_errors_fatal: true
tasks:
@@ -14,19 +14,28 @@
assert:
that:
- 'raw.rc == 0'
- - 'raw.stdout_lines == ["2"]'
- - 'raw.stdout == "2"'
+ - 'raw.stdout_lines[-1]|to_text == "2"'
+ - 'raw.stdout[-1]|to_text == "2"'
- name: Run raw module with sudo
become: true
raw: 'whoami'
register: raw
+ - debug: msg="x{{raw}}x"
+
# Can't test stdout because TTY inserts \r in Ansible version.
- name: Verify raw module output.
assert:
that:
- raw.rc == 0
- # WHY DOES VANILLA ANSIBLE INSERT NEWLINES HERE!?!?!?!?!?!ONE
- - raw.stdout in ("\r\nroot\r\n", "root\r\n")
- - raw.stdout_lines in (["", "root"], ["root"])
+ # WHY DOES VANILLA INSERT NEWLINES HERE!?!?!?!?!?!ONE
+ #- raw.stdout in ("\r\nroot\r\n", "root\r\n")
+ - '(raw.stdout|to_text).endswith("root\r\n")'
+ - |
+ raw.stdout_lines|to_text in (
+ ["\r\n"],
+ ["", "root"],
+ ["root\r\n"],
+ ["root"],
+ )
diff --git a/tests/ansible/integration/action/remote_expand_user.yml b/tests/ansible/integration/action/remote_expand_user.yml
index 85990264..37fc5ebe 100644
--- a/tests/ansible/integration/action/remote_expand_user.yml
+++ b/tests/ansible/integration/action/remote_expand_user.yml
@@ -8,12 +8,12 @@
- name: "Find out root's homedir."
# Runs first because it blats regular Ansible facts with junk, so
# non-become run fixes that up.
- setup: gather_subset=min
+ setup:
become: true
register: root_facts
- name: "Find regular homedir"
- setup: gather_subset=min
+ setup:
register: user_facts
# ------------------------
@@ -36,8 +36,9 @@
sudoable: false
register: out
become: true
- - assert:
- that: out.result == '{{user_facts.ansible_facts.ansible_user_dir}}/foo'
+ - assert_equal:
+ left: out.result
+ right: user_facts.ansible_facts.ansible_user_dir + '/foo'
- name: "Expand ~user/foo"
action_passthrough:
@@ -80,8 +81,9 @@
register: out
become: true
- - assert:
- that: out.result == '{{root_facts.ansible_facts.ansible_user_dir}}/foo'
+ - assert_equal:
+ left: out.result
+ right: root_facts.ansible_facts.ansible_user_dir + '/foo'
- name: "sudoable; Expand ~user/foo"
action_passthrough:
diff --git a/tests/ansible/integration/action/synchronize.yml b/tests/ansible/integration/action/synchronize.yml
index 25649fbf..25f86d6d 100644
--- a/tests/ansible/integration/action/synchronize.yml
+++ b/tests/ansible/integration/action/synchronize.yml
@@ -5,6 +5,7 @@
any_errors_fatal: true
vars:
ansible_user: mitogen__has_sudo_pubkey
+ ansible_become_pass: has_sudo_pubkey_password
ansible_ssh_private_key_file: /tmp/synchronize-action-key
tasks:
# must copy git file to set proper file mode.
@@ -12,22 +13,22 @@
dest: /tmp/synchronize-action-key
src: ../../../data/docker/mitogen__has_sudo_pubkey.key
mode: u=rw,go=
- connection: local
+ delegate_to: localhost
- file:
path: /tmp/sync-test
state: absent
- connection: local
+ delegate_to: localhost
- file:
path: /tmp/sync-test
state: directory
- connection: local
+ delegate_to: localhost
- copy:
dest: /tmp/sync-test/item
content: "item!"
- connection: local
+ delegate_to: localhost
- file:
path: /tmp/sync-test.out
diff --git a/tests/ansible/integration/all.yml b/tests/ansible/integration/all.yml
index e9a12ec8..bd68b4ab 100644
--- a/tests/ansible/integration/all.yml
+++ b/tests/ansible/integration/all.yml
@@ -3,17 +3,19 @@
# This playbook imports all tests that are known to work at present.
#
-- import_playbook: action/all.yml
-- import_playbook: async/all.yml
-- import_playbook: become/all.yml
-- import_playbook: connection/all.yml
-- import_playbook: connection_loader/all.yml
-- import_playbook: context_service/all.yml
-- import_playbook: delegation/all.yml
-- import_playbook: glibc_caches/all.yml
-- import_playbook: local/all.yml
-- import_playbook: module_utils/all.yml
-- import_playbook: playbook_semantics/all.yml
-- import_playbook: runner/all.yml
-- import_playbook: ssh/all.yml
-- import_playbook: strategy/all.yml
+- include: action/all.yml
+- include: async/all.yml
+- include: become/all.yml
+- include: connection/all.yml
+- include: connection_delegation/all.yml
+- include: connection_loader/all.yml
+- include: context_service/all.yml
+- include: glibc_caches/all.yml
+- include: local/all.yml
+- include: module_utils/all.yml
+- include: playbook_semantics/all.yml
+- include: process/all.yml
+- include: runner/all.yml
+- include: ssh/all.yml
+- include: strategy/all.yml
+- include: stub_connections/all.yml
diff --git a/tests/ansible/integration/async/all.yml b/tests/ansible/integration/async/all.yml
index 17969ead..f14537ed 100644
--- a/tests/ansible/integration/async/all.yml
+++ b/tests/ansible/integration/async/all.yml
@@ -1,8 +1,9 @@
-- import_playbook: result_binary_producing_json.yml
-- import_playbook: result_binary_producing_junk.yml
-- import_playbook: result_shell_echo_hi.yml
-- import_playbook: runner_new_process.yml
-- import_playbook: runner_one_job.yml
-- import_playbook: runner_timeout_then_polling.yml
-- import_playbook: runner_with_polling_and_timeout.yml
-- import_playbook: runner_two_simultaneous_jobs.yml
+- include: multiple_items_loop.yml
+- include: result_binary_producing_json.yml
+- include: result_binary_producing_junk.yml
+- include: result_shell_echo_hi.yml
+- include: runner_new_process.yml
+- include: runner_one_job.yml
+- include: runner_timeout_then_polling.yml
+- include: runner_two_simultaneous_jobs.yml
+- include: runner_with_polling_and_timeout.yml
diff --git a/tests/ansible/integration/async/multiple_items_loop.yml b/tests/ansible/integration/async/multiple_items_loop.yml
new file mode 100644
index 00000000..9a9b1192
--- /dev/null
+++ b/tests/ansible/integration/async/multiple_items_loop.yml
@@ -0,0 +1,36 @@
+# issue #414: verify behaviour of async tasks created in a loop.
+
+- name: integration/async/multiple_items_loop.yml
+ hosts: test-targets
+ any_errors_fatal: true
+ tasks:
+
+ - name: start long running ops
+ become: true
+ shell: "{{item}}"
+ async: 15
+ poll: 0
+ register: jobs
+ with_items:
+ - "sleep 3; echo hi-from-job-1"
+ - "sleep 5; echo hi-from-job-2"
+
+ - name: Ensure static files are collected and compressed
+ async_status:
+ jid: "{{ item.ansible_job_id }}"
+ become: yes
+ register: out
+ until: out.finished
+ retries: 30
+ with_items:
+ - "{{ jobs.results }}"
+
+ - assert:
+ that:
+ - out.results[0].stdout == 'hi-from-job-1'
+ - out.results[0].rc == 0
+ - out.results[0].delta > '0:00:03'
+
+ - out.results[1].stdout == 'hi-from-job-2'
+ - out.results[1].rc == 0
+ - out.results[1].delta > '0:00:05'
diff --git a/tests/ansible/integration/async/result_shell_echo_hi.yml b/tests/ansible/integration/async/result_shell_echo_hi.yml
index 8858037a..c2d2dc42 100644
--- a/tests/ansible/integration/async/result_shell_echo_hi.yml
+++ b/tests/ansible/integration/async/result_shell_echo_hi.yml
@@ -16,15 +16,13 @@
src: "{{ansible_user_dir}}/.ansible_async/{{job.ansible_job_id}}"
register: result
- #- debug: msg={{async_out}}
- #vars:
- #async_out: "{{result.content|b64decode|from_json}}"
+ #- debug: msg="{{result.content|b64decode|from_json}}"
- assert:
that:
- async_out.changed == True
- async_out.cmd == "echo hi"
- - 'async_out.delta.startswith("0:00:00")'
+ - 'async_out.delta.startswith("0:00:")'
- async_out.end.startswith("20")
- async_out.invocation.module_args._raw_params == "echo hi"
- async_out.invocation.module_args._uses_shell == True
@@ -32,7 +30,6 @@
- async_out.invocation.module_args.creates == None
- async_out.invocation.module_args.executable == None
- async_out.invocation.module_args.removes == None
- - async_out.invocation.module_args.stdin == None
- async_out.invocation.module_args.warn == True
- async_out.rc == 0
- async_out.start.startswith("20")
@@ -40,3 +37,10 @@
- async_out.stdout == "hi"
vars:
async_out: "{{result.content|b64decode|from_json}}"
+
+ - assert:
+ that:
+ - async_out.invocation.module_args.stdin == None
+ when: ansible_version.full > '2.4'
+ vars:
+ async_out: "{{result.content|b64decode|from_json}}"
diff --git a/tests/ansible/integration/async/runner_one_job.yml b/tests/ansible/integration/async/runner_one_job.yml
index 04ffc5ea..ca798a7f 100644
--- a/tests/ansible/integration/async/runner_one_job.yml
+++ b/tests/ansible/integration/async/runner_one_job.yml
@@ -40,11 +40,20 @@
- result1.cmd == "sleep 1;\n echo alldone"
- result1.delta|length == 14
- result1.start|length == 26
- - result1.failed == False
- result1.finished == 1
- result1.rc == 0
- result1.start|length == 26
+
+ - assert:
+ that:
- result1.stderr == ""
- result1.stderr_lines == []
- result1.stdout == "alldone"
- result1.stdout_lines == ["alldone"]
+ when: ansible_version.full > '2.8' # ansible#51393
+
+ - assert:
+ that:
+ - result1.failed == False
+ when: ansible_version.full > '2.4'
+
diff --git a/tests/ansible/integration/async/runner_two_simultaneous_jobs.yml b/tests/ansible/integration/async/runner_two_simultaneous_jobs.yml
index 9474263b..fdde0463 100644
--- a/tests/ansible/integration/async/runner_two_simultaneous_jobs.yml
+++ b/tests/ansible/integration/async/runner_two_simultaneous_jobs.yml
@@ -56,4 +56,8 @@
that:
- result1.rc == 0
- result2.rc == 0
+
+ - assert:
+ that:
- result2.stdout == 'im_alive'
+ when: ansible_version.full > '2.8' # ansible#51393
diff --git a/tests/ansible/integration/become/all.yml b/tests/ansible/integration/become/all.yml
index c9c331dd..5fa030d1 100644
--- a/tests/ansible/integration/become/all.yml
+++ b/tests/ansible/integration/become/all.yml
@@ -1,7 +1,7 @@
-- import_playbook: su_password.yml
-- import_playbook: sudo_flags_failure.yml
-- import_playbook: sudo_nonexistent.yml
-- import_playbook: sudo_nopassword.yml
-- import_playbook: sudo_password.yml
-- import_playbook: sudo_requiretty.yml
+- include: su_password.yml
+- include: sudo_flags_failure.yml
+- include: sudo_nonexistent.yml
+- include: sudo_nopassword.yml
+- include: sudo_password.yml
+- include: sudo_requiretty.yml
diff --git a/tests/ansible/integration/connection/_disconnect_during_module.yml b/tests/ansible/integration/connection/_disconnect_during_module.yml
new file mode 100644
index 00000000..6bd8cd50
--- /dev/null
+++ b/tests/ansible/integration/connection/_disconnect_during_module.yml
@@ -0,0 +1,13 @@
+# issue 352: test ability to notice disconnection during a module invocation.
+---
+
+- name: integration/connection/_disconnect_during_module.yml
+ hosts: test-targets
+ gather_facts: no
+ any_errors_fatal: false
+ tasks:
+ - run_once: true # don't run against localhost
+ shell: |
+ kill -9 $PPID
+ register: out
+ ignore_errors: true
diff --git a/tests/ansible/integration/connection/_put_file.yml b/tests/ansible/integration/connection/_put_file.yml
index a0fea4ed..5b661d9f 100644
--- a/tests/ansible/integration/connection/_put_file.yml
+++ b/tests/ansible/integration/connection/_put_file.yml
@@ -3,7 +3,7 @@
- shell: dd if=/dev/urandom of=/tmp/{{file_name}} bs=1024 count={{file_size}}
args:
creates: /tmp/{{file_name}}
- connection: local
+ delegate_to: localhost
- copy:
dest: /tmp/{{file_name}}.out
@@ -11,7 +11,7 @@
- stat: path=/tmp/{{file_name}}
register: original
- connection: local
+ delegate_to: localhost
- stat: path=/tmp/{{file_name}}.out
register: copied
@@ -19,4 +19,5 @@
- assert:
that:
- original.stat.checksum == copied.stat.checksum
- - original.stat.mtime|int == copied.stat.mtime|int
+ # Upstream does not preserve timestamps at al.
+ #- (not is_mitogen) or (original.stat.mtime|int == copied.stat.mtime|int)
diff --git a/tests/ansible/integration/connection/all.yml b/tests/ansible/integration/connection/all.yml
index 123e11c4..4211f1b3 100644
--- a/tests/ansible/integration/connection/all.yml
+++ b/tests/ansible/integration/connection/all.yml
@@ -1,5 +1,10 @@
---
-- import_playbook: exec_command.yml
-- import_playbook: put_small_file.yml
-- import_playbook: put_large_file.yml
+- include: become_same_user.yml
+- include: disconnect_during_module.yml
+- include: disconnect_resets_connection.yml
+- include: exec_command.yml
+- include: home_dir.yml
+- include: put_large_file.yml
+- include: put_small_file.yml
+- include: reset.yml
diff --git a/tests/ansible/integration/connection/become_same_user.yml b/tests/ansible/integration/connection/become_same_user.yml
new file mode 100644
index 00000000..d73eca86
--- /dev/null
+++ b/tests/ansible/integration/connection/become_same_user.yml
@@ -0,0 +1,39 @@
+# issue #499: ensure C.BECOME_ALLOW_SAME_USER is respected.
+---
+
+- name: integration/connection/become_same_user.yml
+ hosts: bsu-joe
+ gather_facts: no
+ any_errors_fatal: true
+ tasks:
+
+ # bsu-joe's login user is joe, so become should be ignored.
+ - mitogen_get_stack:
+ become: true
+ become_user: joe
+ register: out
+ when: is_mitogen
+
+ - assert:
+ that:
+ - out.result[0].method == "ssh"
+ - out.result[0].kwargs.username == "joe"
+ - out.result|length == 1 # no sudo
+ when: is_mitogen
+
+
+ # Now try with a different account.
+ - mitogen_get_stack:
+ become: true
+ become_user: james
+ register: out
+ when: is_mitogen
+
+ - assert:
+ that:
+ - out.result[0].method == "ssh"
+ - out.result[0].kwargs.username == "joe"
+ - out.result[1].method == "sudo"
+ - out.result[1].kwargs.username == "james"
+ - out.result|length == 2 # no sudo
+ when: is_mitogen
diff --git a/tests/ansible/integration/connection/disconnect_during_module.yml b/tests/ansible/integration/connection/disconnect_during_module.yml
new file mode 100644
index 00000000..e628e68e
--- /dev/null
+++ b/tests/ansible/integration/connection/disconnect_during_module.yml
@@ -0,0 +1,27 @@
+# issue 352: test ability to notice disconnection during a module invocation.
+---
+
+- name: integration/connection/disconnect_during_module.yml
+ hosts: test-targets
+ gather_facts: no
+ any_errors_fatal: false
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - delegate_to: localhost
+ command: |
+ ansible-playbook
+ -i "{{MITOGEN_INVENTORY_FILE}}"
+ integration/connection/_disconnect_during_module.yml
+ args:
+ chdir: ../..
+ register: out
+ ignore_errors: true
+
+ - debug: var=out
+
+ - assert:
+ that:
+ - out.rc == 4
+ - "'Mitogen was disconnected from the remote environment while a call was in-progress.' in out.stdout"
diff --git a/tests/ansible/integration/connection/disconnect_resets_connection.yml b/tests/ansible/integration/connection/disconnect_resets_connection.yml
new file mode 100644
index 00000000..5f02a8d5
--- /dev/null
+++ b/tests/ansible/integration/connection/disconnect_resets_connection.yml
@@ -0,0 +1,47 @@
+# issue 370: Connection should reset to 'disconnected' state when disconnect
+# detected
+#
+# Previously the 'Mitogen was disconnected' error would fail the first task,
+# but the Connection instance would still think it still had a valid
+# connection.
+#
+# See also disconnect_during_module.yml
+
+---
+
+- name: integration/connection/disconnect_resets_connection.yml
+ hosts: test-targets
+ gather_facts: no
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - mitogen_action_script:
+ script: |
+ import sys
+ from ansible.errors import AnsibleConnectionFailure
+
+ assert not self._connection.connected, \
+ "Connection was not initially disconnected."
+
+ self._low_level_execute_command('echo')
+ assert self._connection.connected, \
+ "Connection was not connected after good command."
+
+ try:
+ self._low_level_execute_command('kill -9 $PPID')
+ assert 0, 'AnsibleConnectionFailure was not raised'
+ except AnsibleConnectionFailure:
+ e = sys.exc_info()[1]
+ assert str(e).startswith('Mitogen was disconnected')
+
+ assert not self._connection.connected, \
+ "Connection did not reset."
+
+ try:
+ self._low_level_execute_command('kill -9 $PPID')
+ assert 0, 'AnsibleConnectionFailure was not raised'
+ except AnsibleConnectionFailure:
+ e = sys.exc_info()[1]
+ assert str(e).startswith('Mitogen was disconnected')
diff --git a/tests/ansible/integration/connection/exec_command.yml b/tests/ansible/integration/connection/exec_command.yml
index 6a632961..105505d1 100644
--- a/tests/ansible/integration/connection/exec_command.yml
+++ b/tests/ansible/integration/connection/exec_command.yml
@@ -15,5 +15,5 @@
- assert:
that:
- out.result[0] == 0
- - out.result[1] == "hello, world\r\n"
- - out.result[2].startswith("Shared connection to ")
+ - out.result[1].decode() == "hello, world\r\n"
+ - out.result[2].decode().startswith("Shared connection to ")
diff --git a/tests/ansible/integration/connection/home_dir.yml b/tests/ansible/integration/connection/home_dir.yml
new file mode 100644
index 00000000..10154450
--- /dev/null
+++ b/tests/ansible/integration/connection/home_dir.yml
@@ -0,0 +1,39 @@
+# Verify the value of the Connection.homedir attribute is as expected.
+
+- name: integration/connection/home_dir.yml
+ hosts: test-targets
+ any_errors_fatal: true
+ tasks:
+ - name: "Find out root's homedir."
+ # Runs first because it blats regular Ansible facts with junk, so
+ # non-become run fixes that up.
+ setup:
+ become: true
+ register: root_facts
+ when: is_mitogen
+
+ - name: "Find regular homedir"
+ setup:
+ register: user_facts
+ when: is_mitogen
+
+ - name: "Verify Connection.homedir correct when become:false"
+ mitogen_action_script:
+ script: |
+ self._connection._connect()
+ assert self._connection.homedir == "{{user_facts.ansible_facts.ansible_user_dir}}", {
+ "connection homedir": self._connection.homedir,
+ "homedir from facts": "{{user_facts.ansible_facts.ansible_user_dir}}"
+ }
+ when: is_mitogen
+
+ - name: "Verify Connection.homedir correct when become:true"
+ become: true
+ mitogen_action_script:
+ script: |
+ self._connection._connect()
+ assert self._connection.homedir == "{{root_facts.ansible_facts.ansible_user_dir}}", {
+ "connection homedir": self._connection.homedir,
+ "homedir from facts": "{{root_facts.ansible_facts.ansible_user_dir}}"
+ }
+ when: is_mitogen
diff --git a/tests/ansible/integration/connection/put_large_file.yml b/tests/ansible/integration/connection/put_large_file.yml
index 210c5d6a..392731df 100644
--- a/tests/ansible/integration/connection/put_large_file.yml
+++ b/tests/ansible/integration/connection/put_large_file.yml
@@ -9,4 +9,4 @@
file_name: large-file
file_size: 512
tasks:
- - include_tasks: _put_file.yml
+ - include: _put_file.yml
diff --git a/tests/ansible/integration/connection/put_small_file.yml b/tests/ansible/integration/connection/put_small_file.yml
index aa6cc0d7..d9423f75 100644
--- a/tests/ansible/integration/connection/put_small_file.yml
+++ b/tests/ansible/integration/connection/put_small_file.yml
@@ -9,4 +9,4 @@
file_name: small-file
file_size: 123
tasks:
- - include_tasks: _put_file.yml
+ - include: _put_file.yml
diff --git a/tests/ansible/integration/connection/reset.yml b/tests/ansible/integration/connection/reset.yml
new file mode 100644
index 00000000..768cd2d5
--- /dev/null
+++ b/tests/ansible/integration/connection/reset.yml
@@ -0,0 +1,45 @@
+# issue #369: Connection.reset() should cause destruction of the remote
+# interpreter and any children.
+
+---
+
+- name: integration/connection/reset.yml
+ hosts: test-targets
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - debug: msg="reset.yml skipped on Ansible<2.5.6"
+ when: ansible_version.full < '2.5.6'
+
+ - meta: end_play
+ when: ansible_version.full < '2.5.6'
+
+ - custom_python_detect_environment:
+ register: out
+
+ - custom_python_detect_environment:
+ become: true
+ register: out_become
+
+ - meta: reset_connection
+
+ - custom_python_detect_environment:
+ register: out2
+
+ - custom_python_detect_environment:
+ register: out_become2
+
+ - assert:
+ that:
+ # Interpreter PID has changed.
+ - out.pid != out2.pid
+
+ # SSH PID has changed.
+ - out.ppid != out2.ppid
+
+ # Interpreter PID has changed.
+ - out_become.pid != out_become2.pid
+
+ # sudo PID has changed.
+ - out_become.ppid != out_become2.ppid
diff --git a/tests/ansible/integration/connection_delegation/all.yml b/tests/ansible/integration/connection_delegation/all.yml
new file mode 100644
index 00000000..c9b09687
--- /dev/null
+++ b/tests/ansible/integration/connection_delegation/all.yml
@@ -0,0 +1,5 @@
+- include: delegate_to_template.yml
+- include: local_action.yml
+- include: osa_container_standalone.yml
+- include: osa_delegate_to_self.yml
+- include: stack_construction.yml
diff --git a/tests/ansible/integration/connection_delegation/delegate_to_template.yml b/tests/ansible/integration/connection_delegation/delegate_to_template.yml
new file mode 100644
index 00000000..a5c0216c
--- /dev/null
+++ b/tests/ansible/integration/connection_delegation/delegate_to_template.yml
@@ -0,0 +1,86 @@
+# issue #340: Ensure templated delegate_to field works.
+#
+# Here we delegate from "test-targets" group to a templated "{{physical_host}}"
+# variable, which contains "cd-normal-alias", which has a
+# "mitogen_via=cd-alias", which in turn has an "ansible_host="alias-host".
+#
+# So the full stack should be:
+# - First hop: hostname "alias-host", username "alias-user"
+# - Second hop: hostname "cd-normal-alias"
+
+- name: integration/connection_delegation/delegate_to_template.yml
+ vars:
+ physical_host: "cd-normal-alias"
+ physical_hosts: ["cd-normal-alias", "cd-normal-normal"]
+ hosts: test-targets
+ gather_facts: no
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - meta: end_play
+ when: ansible_version.full < '2.4'
+
+ - mitogen_get_stack:
+ delegate_to: "{{ physical_host }}"
+ register: out
+
+ - assert_equal:
+ left: out.result
+ right: [
+ {
+ 'kwargs': {
+ 'check_host_keys': 'ignore',
+ 'compression': True,
+ 'connect_timeout': 10,
+ 'hostname': 'alias-host',
+ 'identities_only': False,
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
+ 'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
+ '-o',
+ 'ForwardAgent=yes',
+ '-o',
+ 'ControlMaster=auto',
+ '-o',
+ 'ControlPersist=60s',
+ ],
+ 'ssh_debug_level': null,
+ 'ssh_path': 'ssh',
+ 'username': 'alias-user',
+ },
+ 'method': 'ssh',
+ },
+ {
+ 'kwargs': {
+ 'check_host_keys': 'ignore',
+ 'compression': True,
+ 'connect_timeout': 10,
+ 'hostname': 'cd-normal-alias',
+ 'identities_only': False,
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
+ 'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
+ '-o',
+ 'ForwardAgent=yes',
+ '-o',
+ 'ControlMaster=auto',
+ '-o',
+ 'ControlPersist=60s',
+ ],
+ 'ssh_debug_level': null,
+ 'ssh_path': 'ssh',
+ 'username': 'ansible-cfg-remote-user',
+ },
+ 'method': 'ssh',
+ }
+ ]
diff --git a/tests/ansible/integration/connection_delegation/local_action.yml b/tests/ansible/integration/connection_delegation/local_action.yml
new file mode 100644
index 00000000..d166c0d9
--- /dev/null
+++ b/tests/ansible/integration/connection_delegation/local_action.yml
@@ -0,0 +1,34 @@
+
+# issue #251: local_action with mitogen_via= builds wrong stack.
+
+- hosts: cd-newuser-normal-normal
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - local_action: mitogen_get_stack
+ become: true
+ register: out
+
+ - assert_equal:
+ left: out.result
+ right: [
+ {
+ 'kwargs': {
+ 'python_path': null
+ },
+ 'method': 'local',
+ },
+ {
+ 'enable_lru': true,
+ 'kwargs': {
+ 'connect_timeout': 10,
+ 'python_path': null,
+ 'password': null,
+ 'username': 'root',
+ 'sudo_path': null,
+ 'sudo_args': ['-H', '-S', '-n'],
+ },
+ 'method': 'sudo',
+ }
+ ]
diff --git a/tests/ansible/integration/connection_delegation/osa_container_standalone.yml b/tests/ansible/integration/connection_delegation/osa_container_standalone.yml
new file mode 100644
index 00000000..d6483bd6
--- /dev/null
+++ b/tests/ansible/integration/connection_delegation/osa_container_standalone.yml
@@ -0,0 +1,28 @@
+# Verify one OSA-style container has the correct config.
+
+- name: integration/connection_delegation/osa_container_standalone.yml
+ hosts: dtc-container-1
+ gather_facts: false
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - mitogen_get_stack:
+ register: out
+
+ - assert_equal:
+ left: out.result
+ right: [
+ {
+ 'kwargs': {
+ 'container': 'dtc-container-1',
+ 'docker_path': null,
+ 'kind': 'lxc',
+ 'lxc_info_path': null,
+ 'machinectl_path': null,
+ 'python_path': ['/usr/bin/python'],
+ 'username': null,
+ },
+ 'method': 'setns',
+ },
+ ]
diff --git a/tests/ansible/integration/connection_delegation/osa_delegate_to_self.yml b/tests/ansible/integration/connection_delegation/osa_delegate_to_self.yml
new file mode 100644
index 00000000..a761c432
--- /dev/null
+++ b/tests/ansible/integration/connection_delegation/osa_delegate_to_self.yml
@@ -0,0 +1,32 @@
+# OSA: Verify delegating the connection back to the container succeeds.
+
+- name: integration/connection_delegation/osa_delegate_to_self.yml
+ hosts: osa-container-1
+ vars:
+ target: osa-container-1
+ gather_facts: false
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - mitogen_get_stack:
+ delegate_to: "{{target}}"
+ register: out
+
+ - assert_equal:
+ left: out.result
+ right: [
+ {
+ 'kwargs': {
+ 'container': 'osa-container-1',
+ 'docker_path': null,
+ 'kind': 'lxc',
+ 'lxc_info_path': null,
+ 'lxc_path': null,
+ 'machinectl_path': null,
+ 'python_path': null,
+ 'username': 'ansible-cfg-remote-user',
+ },
+ 'method': 'setns',
+ },
+ ]
diff --git a/tests/ansible/integration/delegation/stack_construction.yml b/tests/ansible/integration/connection_delegation/stack_construction.yml
similarity index 63%
rename from tests/ansible/integration/delegation/stack_construction.yml
rename to tests/ansible/integration/connection_delegation/stack_construction.yml
index 4d9c75f4..0c48be3f 100644
--- a/tests/ansible/integration/delegation/stack_construction.yml
+++ b/tests/ansible/integration/connection_delegation/stack_construction.yml
@@ -4,7 +4,7 @@
# 'connection stack' -- this is just a list of dictionaries specifying a
# sequence of proxied Router connection methods and their kwargs used to
# establish the connection. That list is passed to ContextService, which loops
-# over the stack specifying via=(None or previous entry) for each connection
+# over the stack specifying via=(null or previous entry) for each connection
# method.
# mitogen_get_stack is a magic action that returns the stack, so we can test
@@ -16,7 +16,7 @@
# the result list element, it seems to cause assert to silently succeed!
-- name: integration/delegation/stack_construction.yml
+- name: integration/connection_delegation/stack_construction.yml
hosts: cd-normal
tasks:
- meta: end_play
@@ -35,20 +35,20 @@
- mitogen_get_stack:
register: out
- - assert:
- that: |
- out.result == [
- {
- "kwargs": {
- "connect_timeout": 10,
- "doas_path": None,
- "password": None,
- "python_path": ["/usr/bin/python"],
- "username": "normal-user",
- },
- "method": "doas",
- }
- ]
+ - assert_equal:
+ left: out.result
+ right: [
+ {
+ "kwargs": {
+ "connect_timeout": 10,
+ "doas_path": null,
+ "password": null,
+ "python_path": null,
+ "username": "normal-user",
+ },
+ "method": "doas",
+ }
+ ]
- hosts: cd-normal
@@ -59,20 +59,23 @@
- mitogen_get_stack:
delegate_to: cd-alias
register: out
- - assert:
- that: |
- out.result == [
+ - assert_equal:
+ left: out.result
+ right: [
{
'kwargs': {
'check_host_keys': 'ignore',
+ 'compression': True,
'connect_timeout': 10,
'hostname': 'alias-host',
'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': None,
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
'-o',
'ForwardAgent=yes',
'-o',
@@ -80,7 +83,7 @@
'-o',
'ControlPersist=60s',
],
- 'ssh_debug_level': None,
+ 'ssh_debug_level': null,
'ssh_path': 'ssh',
'username': 'alias-user',
},
@@ -96,20 +99,23 @@
- mitogen_get_stack:
register: out
- - assert:
- that: |
- out.result == [
+ - assert_equal:
+ left: out.result
+ right: [
{
'kwargs': {
'check_host_keys': 'ignore',
+ 'compression': True,
'connect_timeout': 10,
'hostname': 'alias-host',
'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': ['/usr/bin/python'],
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
'-o',
'ForwardAgent=yes',
'-o',
@@ -117,7 +123,7 @@
'-o',
'ControlPersist=60s',
],
- 'ssh_debug_level': None,
+ 'ssh_debug_level': null,
'ssh_path': 'ssh',
'username': 'alias-user',
},
@@ -133,15 +139,15 @@
- mitogen_get_stack:
register: out
- - assert:
- that: |
- out.result == [
+ - assert_equal:
+ left: out.result
+ right: [
{
'kwargs': {
'connect_timeout': 10,
- 'doas_path': None,
- 'password': None,
- 'python_path': None,
+ 'doas_path': null,
+ 'password': null,
+ 'python_path': null,
'username': 'normal-user',
},
'method': 'doas',
@@ -149,14 +155,17 @@
{
'kwargs': {
'check_host_keys': 'ignore',
+ 'compression': True,
'connect_timeout': 10,
'hostname': 'cd-normal-normal',
'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': ['/usr/bin/python'],
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
'-o',
'ForwardAgent=yes',
'-o',
@@ -164,9 +173,9 @@
'-o',
'ControlPersist=60s',
],
- 'ssh_debug_level': None,
+ 'ssh_debug_level': null,
'ssh_path': 'ssh',
- 'username': None,
+ 'username': 'ansible-cfg-remote-user',
},
'method': 'ssh',
},
@@ -180,20 +189,23 @@
- mitogen_get_stack:
register: out
- - assert:
- that: |
- out.result == [
+ - assert_equal:
+ left: out.result
+ right: [
{
'kwargs': {
'check_host_keys': 'ignore',
+ 'compression': True,
'connect_timeout': 10,
'hostname': 'alias-host',
'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': None,
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
'-o',
'ForwardAgent=yes',
'-o',
@@ -201,7 +213,7 @@
'-o',
'ControlPersist=60s',
],
- 'ssh_debug_level': None,
+ 'ssh_debug_level': null,
'ssh_path': 'ssh',
'username': 'alias-user',
},
@@ -210,14 +222,17 @@
{
'kwargs': {
'check_host_keys': 'ignore',
+ 'compression': True,
'connect_timeout': 10,
'hostname': 'cd-normal-alias',
'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': ['/usr/bin/python'],
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
'-o',
'ForwardAgent=yes',
'-o',
@@ -225,9 +240,9 @@
'-o',
'ControlPersist=60s',
],
- 'ssh_debug_level': None,
+ 'ssh_debug_level': null,
'ssh_path': 'ssh',
- 'username': None,
+ 'username': 'ansible-cfg-remote-user',
},
'method': 'ssh',
},
@@ -241,15 +256,15 @@
- mitogen_get_stack:
register: out
- - assert:
- that: |
- out.result == [
+ - assert_equal:
+ left: out.result
+ right: [
{
'kwargs': {
'connect_timeout': 10,
- 'doas_path': None,
- 'password': None,
- 'python_path': None,
+ 'doas_path': null,
+ 'password': null,
+ 'python_path': null,
'username': 'normal-user',
},
'method': 'doas',
@@ -257,14 +272,17 @@
{
'kwargs': {
'check_host_keys': 'ignore',
+ 'compression': True,
'connect_timeout': 10,
'hostname': 'cd-newuser-normal-normal',
'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': ['/usr/bin/python'],
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
'-o',
'ForwardAgent=yes',
'-o',
@@ -272,7 +290,7 @@
'-o',
'ControlPersist=60s',
],
- 'ssh_debug_level': None,
+ 'ssh_debug_level': null,
'ssh_path': 'ssh',
'username': 'newuser-normal-normal-user',
},
@@ -289,20 +307,23 @@
- mitogen_get_stack:
delegate_to: cd-alias
register: out
- - assert:
- that: |
- out.result == [
+ - assert_equal:
+ left: out.result
+ right: [
{
'kwargs': {
'check_host_keys': 'ignore',
+ 'compression': True,
'connect_timeout': 10,
'hostname': 'alias-host',
'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': None,
+ 'identity_file': null,
+ 'password': null,
+ 'port': null,
+ 'python_path': null,
'ssh_args': [
+ '-o',
+ 'UserKnownHostsFile=/dev/null',
'-o',
'ForwardAgent=yes',
'-o',
@@ -310,7 +331,7 @@
'-o',
'ControlPersist=60s',
],
- 'ssh_debug_level': None,
+ 'ssh_debug_level': null,
'ssh_path': 'ssh',
'username': 'alias-user',
},
@@ -326,16 +347,16 @@
- local_action: mitogen_get_stack
register: out
- - assert:
- that: |
- out.result == [
- {
- 'kwargs': {
- 'python_path': None
- },
- 'method': 'local',
- },
- ]
+ - assert_equal:
+ left: out.result
+ right: [
+ {
+ 'kwargs': {
+ 'python_path': null
+ },
+ 'method': 'local',
+ },
+ ]
- hosts: cd-newuser-doas-normal
@@ -345,27 +366,27 @@
- mitogen_get_stack:
register: out
- - assert:
- that: |
- out.result == [
- {
- 'kwargs': {
- 'connect_timeout': 10,
- 'doas_path': None,
- 'password': None,
- 'python_path': None,
- 'username': 'normal-user',
- },
- 'method': 'doas',
- },
- {
- 'kwargs': {
- 'connect_timeout': 10,
- 'doas_path': None,
- 'password': None,
- 'python_path': ['/usr/bin/python'],
- 'username': 'newuser-doas-normal-user',
- },
- 'method': 'doas',
- },
- ]
+ - assert_equal:
+ left: out.result
+ right: [
+ {
+ 'kwargs': {
+ 'connect_timeout': 10,
+ 'doas_path': null,
+ 'password': null,
+ 'python_path': null,
+ 'username': 'normal-user',
+ },
+ 'method': 'doas',
+ },
+ {
+ 'kwargs': {
+ 'connect_timeout': 10,
+ 'doas_path': null,
+ 'password': null,
+ 'python_path': null,
+ 'username': 'newuser-doas-normal-user',
+ },
+ 'method': 'doas',
+ },
+ ]
diff --git a/tests/ansible/integration/connection_loader/all.yml b/tests/ansible/integration/connection_loader/all.yml
index 7a44bb2f..76ffe8f4 100644
--- a/tests/ansible/integration/connection_loader/all.yml
+++ b/tests/ansible/integration/connection_loader/all.yml
@@ -1,3 +1,3 @@
-- import_playbook: local_blemished.yml
-- import_playbook: paramiko_unblemished.yml
-- import_playbook: ssh_blemished.yml
+- include: local_blemished.yml
+- include: paramiko_unblemished.yml
+- include: ssh_blemished.yml
diff --git a/tests/ansible/integration/connection_loader/local_blemished.yml b/tests/ansible/integration/connection_loader/local_blemished.yml
index f0a6d4de..d0fcabba 100644
--- a/tests/ansible/integration/connection_loader/local_blemished.yml
+++ b/tests/ansible/integration/connection_loader/local_blemished.yml
@@ -1,13 +1,13 @@
# Ensure 'local' connections are grabbed.
-- name: integration/connection_loader__local_blemished.yml
+- name: integration/connection_loader/local_blemished.yml
hosts: test-targets
any_errors_fatal: true
tasks:
- determine_strategy:
- custom_python_detect_environment:
- connection: local
+ delegate_to: localhost
register: out
- assert:
diff --git a/tests/ansible/integration/connection_loader/paramiko_unblemished.yml b/tests/ansible/integration/connection_loader/paramiko_unblemished.yml
index a71af868..de8de4b0 100644
--- a/tests/ansible/integration/connection_loader/paramiko_unblemished.yml
+++ b/tests/ansible/integration/connection_loader/paramiko_unblemished.yml
@@ -1,6 +1,6 @@
# Ensure paramiko connections aren't grabbed.
-- name: integration/connection_loader__paramiko_unblemished.yml
+- name: integration/connection_loader/paramiko_unblemished.yml
hosts: test-targets
any_errors_fatal: true
tasks:
diff --git a/tests/ansible/integration/context_service/all.yml b/tests/ansible/integration/context_service/all.yml
index e70199f8..79148f7a 100644
--- a/tests/ansible/integration/context_service/all.yml
+++ b/tests/ansible/integration/context_service/all.yml
@@ -1,2 +1,3 @@
-- import_playbook: lru_one_target.yml
-- import_playbook: reconnection.yml
+- include: disconnect_cleanup.yml
+- include: lru_one_target.yml
+- include: reconnection.yml
diff --git a/tests/ansible/integration/context_service/disconnect_cleanup.yml b/tests/ansible/integration/context_service/disconnect_cleanup.yml
new file mode 100644
index 00000000..575358f6
--- /dev/null
+++ b/tests/ansible/integration/context_service/disconnect_cleanup.yml
@@ -0,0 +1,49 @@
+# issue #76, #370: ensure context state is forgotten on disconnect, including
+# state of dependent contexts (e.g. sudo, connection delegation, ..).
+
+- name: integration/context_service/disconnect_cleanup.yml
+ hosts: test-targets[0]
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - meta: end_play
+ when: ansible_version.full < '2.5.6'
+
+ # Start with a clean slate.
+ - mitogen_shutdown_all:
+
+ # Connect a few users.
+ - shell: "true"
+ become: true
+ become_user: "mitogen__user{{item}}"
+ with_items: [1, 2, 3]
+
+ # Verify current state.
+ - mitogen_action_script:
+ script: |
+ self._connection._connect()
+ result['dump'] = self._connection.parent.call_service(
+ service_name='ansible_mitogen.services.ContextService',
+ method_name='dump'
+ )
+ register: out
+
+ - assert:
+ that: out.dump|length == (play_hosts|length) * 4 # ssh account + 3 sudo accounts
+
+ - meta: reset_connection
+
+ # Verify current state.
+ - mitogen_action_script:
+ script: |
+ self._connection._connect()
+ result['dump'] = self._connection.parent.call_service(
+ service_name='ansible_mitogen.services.ContextService',
+ method_name='dump'
+ )
+ register: out
+
+ - assert:
+ that: out.dump|length == play_hosts|length # just the ssh account
diff --git a/tests/ansible/integration/context_service/reconnection.yml b/tests/ansible/integration/context_service/reconnection.yml
index f56719d8..eed1dfdb 100644
--- a/tests/ansible/integration/context_service/reconnection.yml
+++ b/tests/ansible/integration/context_service/reconnection.yml
@@ -5,15 +5,18 @@
hosts: test-targets
any_errors_fatal: true
tasks:
+ - mitogen_shutdown_all:
+
+ - custom_python_detect_environment:
+ register: ssh_account_env
- become: true
custom_python_detect_environment:
register: old_become_env
- become: true
- # This must be >1 for vanilla Ansible.
shell: |
- bash -c "( sleep 3; pkill -f sshd:; ) & disown"
+ bash -c "( sleep 3; kill -9 {{ssh_account_env.pid}}; ) & disown"
- connection: local
shell: sleep 3
diff --git a/tests/ansible/integration/delegation/all.yml b/tests/ansible/integration/delegation/all.yml
deleted file mode 100644
index 743ce157..00000000
--- a/tests/ansible/integration/delegation/all.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-- import_playbook: delegate_to_template.yml
-- import_playbook: osa_container_standalone.yml
-- import_playbook: osa_delegate_to_self.yml
-- import_playbook: stack_construction.yml
diff --git a/tests/ansible/integration/delegation/delegate_to_template.yml b/tests/ansible/integration/delegation/delegate_to_template.yml
deleted file mode 100644
index 2f0830c4..00000000
--- a/tests/ansible/integration/delegation/delegate_to_template.yml
+++ /dev/null
@@ -1,69 +0,0 @@
-# Ensure templated delegate_to field works.
-
-- name: integration/delegation/delegate_to_template.yml
- vars:
- physical_host: "cd-normal-alias"
- physical_hosts: ["cd-normal-alias", "cd-normal-normal"]
- hosts: test-targets
- gather_facts: no
- any_errors_fatal: true
- tasks:
- - meta: end_play
- when: not is_mitogen
-
- - mitogen_get_stack:
- delegate_to: "{{ physical_host }}"
- register: out
-
- - assert:
- that: |
- out.result == [
- {
- 'kwargs': {
- 'check_host_keys': 'ignore',
- 'connect_timeout': 10,
- 'hostname': 'alias-host',
- 'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': None,
- 'ssh_args': [
- '-o',
- 'ForwardAgent=yes',
- '-o',
- 'ControlMaster=auto',
- '-o',
- 'ControlPersist=60s',
- ],
- 'ssh_debug_level': None,
- 'ssh_path': 'ssh',
- 'username': 'alias-user',
- },
- 'method': 'ssh',
- },
- {
- 'kwargs': {
- 'check_host_keys': 'ignore',
- 'connect_timeout': 10,
- 'hostname': 'cd-normal-alias',
- 'identities_only': False,
- 'identity_file': None,
- 'password': None,
- 'port': None,
- 'python_path': None,
- 'ssh_args': [
- '-o',
- 'ForwardAgent=yes',
- '-o',
- 'ControlMaster=auto',
- '-o',
- 'ControlPersist=60s',
- ],
- 'ssh_debug_level': None,
- 'ssh_path': 'ssh',
- 'username': None,
- },
- 'method': 'ssh',
- }
- ]
diff --git a/tests/ansible/integration/delegation/osa_container_standalone.yml b/tests/ansible/integration/delegation/osa_container_standalone.yml
deleted file mode 100644
index b942ef63..00000000
--- a/tests/ansible/integration/delegation/osa_container_standalone.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Verify one OSA-style container has the correct config.
-
-- name: integration/delegation/container_standalone.yml
- hosts: dtc-container-1
- gather_facts: false
- tasks:
- - meta: end_play
- when: not is_mitogen
-
- - mitogen_get_stack:
- register: out
-
- - assert:
- that: |
- out.result == [
- {
- 'kwargs': {
- 'container': 'dtc-container-1',
- 'docker_path': None,
- 'kind': 'lxc',
- 'lxc_info_path': None,
- 'machinectl_path': None,
- 'python_path': ['/usr/bin/python'],
- 'username': None,
- },
- 'method': 'setns',
- },
- ]
diff --git a/tests/ansible/integration/delegation/osa_delegate_to_self.yml b/tests/ansible/integration/delegation/osa_delegate_to_self.yml
deleted file mode 100644
index 0915bbb8..00000000
--- a/tests/ansible/integration/delegation/osa_delegate_to_self.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-# OSA: Verify delegating the connection back to the container succeeds.
-
-- name: integration/delegation/osa_delegate_to_self.yml
- hosts: osa-container-1
- vars:
- target: osa-container-1
- gather_facts: false
- tasks:
- - meta: end_play
- when: not is_mitogen
-
- - mitogen_get_stack:
- delegate_to: "{{target}}"
- register: out
-
- - assert:
- that: |
- out.result == [
- {
- 'kwargs': {
- 'container': 'osa-container-1',
- 'docker_path': None,
- 'kind': 'lxc',
- 'lxc_info_path': None,
- 'machinectl_path': None,
- 'python_path': None,
- 'username': None,
- },
- 'method': 'setns',
- },
- ]
diff --git a/tests/ansible/integration/glibc_caches/all.yml b/tests/ansible/integration/glibc_caches/all.yml
index 7d524540..8cff4ea8 100644
--- a/tests/ansible/integration/glibc_caches/all.yml
+++ b/tests/ansible/integration/glibc_caches/all.yml
@@ -1,2 +1,2 @@
-- import_playbook: resolv_conf.yml
+- include: resolv_conf.yml
diff --git a/tests/ansible/integration/glibc_caches/resolv_conf.yml b/tests/ansible/integration/glibc_caches/resolv_conf.yml
index 643b83ec..da78c308 100644
--- a/tests/ansible/integration/glibc_caches/resolv_conf.yml
+++ b/tests/ansible/integration/glibc_caches/resolv_conf.yml
@@ -12,26 +12,38 @@
- mitogen_test_gethostbyname:
name: www.google.com
register: out
- when: ansible_virtualization_type == "docker"
+ when: |
+ ansible_virtualization_type == "docker" and
+ ansible_python_version > "2.5"
- shell: cp /etc/resolv.conf /tmp/resolv.conf
- when: ansible_virtualization_type == "docker"
+ when: |
+ ansible_virtualization_type == "docker" and
+ ansible_python_version > "2.5"
- shell: echo > /etc/resolv.conf
- when: ansible_virtualization_type == "docker"
+ when: |
+ ansible_virtualization_type == "docker" and
+ ansible_python_version > "2.5"
- mitogen_test_gethostbyname:
name: www.google.com
register: out
ignore_errors: true
- when: ansible_virtualization_type == "docker"
+ when: |
+ ansible_virtualization_type == "docker" and
+ ansible_python_version > "2.5"
- shell: cat /tmp/resolv.conf > /etc/resolv.conf
- when: ansible_virtualization_type == "docker"
+ when: |
+ ansible_virtualization_type == "docker" and
+ ansible_python_version > "2.5"
- assert:
that:
- out.failed
- '"Name or service not known" in out.msg or
"Temporary failure in name resolution" in out.msg'
- when: ansible_virtualization_type == "docker"
+ when: |
+ ansible_virtualization_type == "docker" and
+ ansible_python_version > "2.5"
diff --git a/tests/ansible/integration/local/all.yml b/tests/ansible/integration/local/all.yml
index 383a9108..5f8b4dd4 100644
--- a/tests/ansible/integration/local/all.yml
+++ b/tests/ansible/integration/local/all.yml
@@ -1,4 +1,4 @@
-- import_playbook: cwd_preserved.yml
-- import_playbook: env_preserved.yml
+- include: cwd_preserved.yml
+- include: env_preserved.yml
diff --git a/tests/ansible/integration/module_utils/all.yml b/tests/ansible/integration/module_utils/all.yml
index c8b8f2fb..b68e2ee3 100644
--- a/tests/ansible/integration/module_utils/all.yml
+++ b/tests/ansible/integration/module_utils/all.yml
@@ -1,6 +1,6 @@
-#- import_playbook: from_config_path.yml
-#- import_playbook: from_config_path_pkg.yml
-#- import_playbook: adjacent_to_playbook.yml
-- import_playbook: adjacent_to_role.yml
-#- import_playbook: overrides_builtin.yml
+#- include: from_config_path.yml
+#- include: from_config_path_pkg.yml
+#- include: adjacent_to_playbook.yml
+- include: adjacent_to_role.yml
+#- include: overrides_builtin.yml
diff --git a/tests/ansible/integration/playbook_semantics/all.yml b/tests/ansible/integration/playbook_semantics/all.yml
index 6c8dd065..ec7a9a07 100644
--- a/tests/ansible/integration/playbook_semantics/all.yml
+++ b/tests/ansible/integration/playbook_semantics/all.yml
@@ -1,4 +1,4 @@
-- import_playbook: become_flags.yml
-- import_playbook: delegate_to.yml
-- import_playbook: environment.yml
-- import_playbook: with_items.yml
+- include: become_flags.yml
+- include: delegate_to.yml
+- include: environment.yml
+- include: with_items.yml
diff --git a/tests/ansible/integration/process/all.yml b/tests/ansible/integration/process/all.yml
new file mode 100644
index 00000000..a309113a
--- /dev/null
+++ b/tests/ansible/integration/process/all.yml
@@ -0,0 +1 @@
+- include: "unix_socket_cleanup.yml"
diff --git a/tests/ansible/integration/process/unix_socket_cleanup.yml b/tests/ansible/integration/process/unix_socket_cleanup.yml
new file mode 100644
index 00000000..11a0efe1
--- /dev/null
+++ b/tests/ansible/integration/process/unix_socket_cleanup.yml
@@ -0,0 +1,28 @@
+
+- hosts: test-targets[0]
+ tasks:
+ - mitogen_action_script:
+ script: |
+ import glob
+ result['sockets'] = glob.glob('/tmp/mitogen_unix*.sock')
+ register: socks
+
+ - shell: >
+ ANSIBLE_STRATEGY=mitogen_linear
+ ANSIBLE_SSH_ARGS=""
+ ansible -m shell -c local -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
+ args:
+ chdir: ../..
+ register: out
+ connection: local
+ when: is_mitogen
+
+ - mitogen_action_script:
+ script: |
+ import glob
+ result['sockets'] = glob.glob('/tmp/mitogen_unix*.sock')
+ register: socks2
+
+ - assert_equal:
+ left: socks
+ right: socks2
diff --git a/tests/ansible/integration/runner/_etc_environment_global.yml b/tests/ansible/integration/runner/_etc_environment_global.yml
new file mode 100644
index 00000000..2d22b952
--- /dev/null
+++ b/tests/ansible/integration/runner/_etc_environment_global.yml
@@ -0,0 +1,45 @@
+# /etc/environment
+
+- file:
+ path: /etc/environment
+ state: absent
+ become: true
+
+- shell: echo $MAGIC_ETC_ENV
+ register: echo
+
+- assert:
+ that: echo.stdout == ""
+
+- copy:
+ dest: /etc/environment
+ content: |
+ MAGIC_ETC_ENV=555
+ become: true
+
+- meta: reset_connection
+
+#- mitogen_shutdown_all:
+ #when: not is_mitogen
+
+- shell: echo $MAGIC_ETC_ENV
+ register: echo
+
+- assert:
+ that: echo.stdout == "555"
+
+- file:
+ path: /etc/environment
+ state: absent
+ become: true
+
+- meta: reset_connection
+
+- mitogen_shutdown_all:
+ when: not is_mitogen
+
+- shell: echo $MAGIC_ETC_ENV
+ register: echo
+
+- assert:
+ that: echo.stdout == ""
diff --git a/tests/ansible/integration/runner/_etc_environment_user.yml b/tests/ansible/integration/runner/_etc_environment_user.yml
new file mode 100644
index 00000000..ca1dc5cc
--- /dev/null
+++ b/tests/ansible/integration/runner/_etc_environment_user.yml
@@ -0,0 +1,32 @@
+# ~/.pam_environment
+
+- file:
+ path: ~/.pam_environment
+ state: absent
+
+- shell: echo $MAGIC_PAM_ENV
+ register: echo
+
+- assert:
+ that: echo.stdout == ""
+
+- copy:
+ dest: ~/.pam_environment
+ content: |
+ MAGIC_PAM_ENV=321
+
+- shell: echo $MAGIC_PAM_ENV
+ register: echo
+
+- assert:
+ that: echo.stdout == "321"
+
+- file:
+ path: ~/.pam_environment
+ state: absent
+
+- shell: echo $MAGIC_PAM_ENV
+ register: echo
+
+- assert:
+ that: echo.stdout == ""
diff --git a/tests/ansible/integration/runner/_reset_conn.yml b/tests/ansible/integration/runner/_reset_conn.yml
new file mode 100644
index 00000000..30f1b0c0
--- /dev/null
+++ b/tests/ansible/integration/runner/_reset_conn.yml
@@ -0,0 +1,2 @@
+
+- meta: reset_connection
diff --git a/tests/ansible/integration/runner/all.yml b/tests/ansible/integration/runner/all.yml
index 9dd209d7..dc23901f 100644
--- a/tests/ansible/integration/runner/all.yml
+++ b/tests/ansible/integration/runner/all.yml
@@ -1,21 +1,23 @@
-- import_playbook: atexit.yml
-- import_playbook: builtin_command_module.yml
-- import_playbook: custom_bash_hashbang_argument.yml
-- import_playbook: custom_bash_old_style_module.yml
-- import_playbook: custom_bash_want_json_module.yml
-- import_playbook: custom_binary_producing_json.yml
-- import_playbook: custom_binary_producing_junk.yml
-- import_playbook: custom_binary_single_null.yml
-- import_playbook: custom_perl_json_args_module.yml
-- import_playbook: custom_perl_want_json_module.yml
-- import_playbook: custom_python_json_args_module.yml
-- import_playbook: custom_python_new_style_missing_interpreter.yml
-- import_playbook: custom_python_new_style_module.yml
-- import_playbook: custom_python_want_json_module.yml
-- import_playbook: custom_script_interpreter.yml
-- import_playbook: environment_isolation.yml
-- import_playbook: etc_environment.yml
-- import_playbook: forking_active.yml
-- import_playbook: forking_correct_parent.yml
-- import_playbook: forking_inactive.yml
-- import_playbook: missing_module.yml
+- include: atexit.yml
+- include: builtin_command_module.yml
+- include: crashy_new_style_module.yml
+- include: custom_bash_hashbang_argument.yml
+- include: custom_bash_old_style_module.yml
+- include: custom_bash_want_json_module.yml
+- include: custom_binary_producing_json.yml
+- include: custom_binary_producing_junk.yml
+- include: custom_binary_single_null.yml
+- include: custom_perl_json_args_module.yml
+- include: custom_perl_want_json_module.yml
+- include: custom_python_json_args_module.yml
+- include: custom_python_new_style_missing_interpreter.yml
+- include: custom_python_new_style_module.yml
+- include: custom_python_want_json_module.yml
+- include: custom_script_interpreter.yml
+- include: environment_isolation.yml
+# I hate this test. I hope it dies, it has caused nothing but misery and suffering
+#- include: etc_environment.yml
+- include: forking_active.yml
+- include: forking_correct_parent.yml
+- include: forking_inactive.yml
+- include: missing_module.yml
diff --git a/tests/ansible/integration/runner/atexit.yml b/tests/ansible/integration/runner/atexit.yml
index 872cdd57..65d27d59 100644
--- a/tests/ansible/integration/runner/atexit.yml
+++ b/tests/ansible/integration/runner/atexit.yml
@@ -1,10 +1,13 @@
-# issue #397: newer Ansibles rely on atexit to cleanup their temporary
-# directories. Ensure atexit handlers run during runner completion.
+# issue #397, #454: newer Ansibles rely on atexit to cleanup their temporary
+# directories. Ensure atexit handlers calling shutil.rmtree() run during runner
+# completion.
- name: integration/runner/atexit.yml
hosts: test-targets
gather_facts: false
any_errors_fatal: false
+ vars:
+ path: /tmp/atexit-should-delete-this
tasks:
#
@@ -14,18 +17,15 @@
- custom_python_run_script:
script: |
- import atexit
- atexit.register(lambda:
- open('/tmp/atexit-was-triggered', 'w').write('yep'))
+ import atexit, shutil
+ path = '{{path}}'
+ os.mkdir(path, int('777', 8))
+ atexit.register(shutil.rmtree, path)
- - slurp:
- path: /tmp/atexit-was-triggered
+ - stat:
+ path: "{{path}}"
register: out
- assert:
that:
- - out.content|b64decode == "yep"
-
- - file:
- path: /tmp/atexit-was-triggered
- state: absent
+ - not out.stat.exists
diff --git a/tests/ansible/integration/runner/crashy_new_style_module.yml b/tests/ansible/integration/runner/crashy_new_style_module.yml
new file mode 100644
index 00000000..40ee7f88
--- /dev/null
+++ b/tests/ansible/integration/runner/crashy_new_style_module.yml
@@ -0,0 +1,18 @@
+# issue #527: catch exceptions from crashy modules.
+
+- name: integration/runner/crashy_new_style_module.yml
+ hosts: test-targets
+ tasks:
+ - custom_python_run_script:
+ script: kaboom
+ register: out
+ ignore_errors: true
+
+ - assert:
+ that:
+ - not out.changed
+ - out.rc == 1
+ - out.msg == "MODULE FAILURE"
+ - out.module_stdout == ""
+ - "'Traceback (most recent call last)' in out.module_stderr"
+ - "\"NameError: name 'kaboom' is not defined\" in out.module_stderr"
diff --git a/tests/ansible/integration/runner/custom_binary_producing_junk.yml b/tests/ansible/integration/runner/custom_binary_producing_junk.yml
index 41572aad..b9cfb6b4 100644
--- a/tests/ansible/integration/runner/custom_binary_producing_junk.yml
+++ b/tests/ansible/integration/runner/custom_binary_producing_junk.yml
@@ -25,8 +25,8 @@
any_errors_fatal: true
tasks:
- assert:
- that: |
- out.failed and
- out.results[0].failed and
- out.results[0].msg == 'MODULE FAILURE' and
- out.results[0].rc == 0
+ that:
+ - out.failed
+ - out.results[0].failed
+ - out.results[0].msg.startswith('MODULE FAILURE')
+ - out.results[0].rc == 0
diff --git a/tests/ansible/integration/runner/custom_binary_single_null.yml b/tests/ansible/integration/runner/custom_binary_single_null.yml
index bab84381..d8a1af0c 100644
--- a/tests/ansible/integration/runner/custom_binary_single_null.yml
+++ b/tests/ansible/integration/runner/custom_binary_single_null.yml
@@ -10,15 +10,15 @@
- hosts: test-targets
any_errors_fatal: true
tasks:
- - assert:
- that:
- - "out.failed"
- - "out.results[0].failed"
- - "out.results[0].msg == 'MODULE FAILURE'"
- - "out.results[0].module_stdout.startswith('/bin/sh: ')"
- - |
- out.results[0].module_stdout.endswith('/custom_binary_single_null: cannot execute binary file\r\n') or
- out.results[0].module_stdout.endswith('/custom_binary_single_null: Exec format error\r\n')
+ - assert:
+ that:
+ - "out.failed"
+ - "out.results[0].failed"
+ - "out.results[0].msg.startswith('MODULE FAILURE')"
+ - "out.results[0].module_stdout.startswith('/bin/sh: ')"
+ - |
+ out.results[0].module_stdout.endswith('/custom_binary_single_null: cannot execute binary file\r\n') or
+ out.results[0].module_stdout.endswith('/custom_binary_single_null: Exec format error\r\n')
# Can't test this: Mitogen returns 126, 2.5.x returns 126, 2.4.x discarded the
diff --git a/tests/ansible/integration/runner/custom_perl_json_args_module.yml b/tests/ansible/integration/runner/custom_perl_json_args_module.yml
index 3485463d..f705cfe4 100644
--- a/tests/ansible/integration/runner/custom_perl_json_args_module.yml
+++ b/tests/ansible/integration/runner/custom_perl_json_args_module.yml
@@ -8,8 +8,12 @@
register: out
- assert:
- that: |
- (not out.changed) and
- (not out.results[0].changed) and
- out.results[0].input[0].foo and
- out.results[0].message == 'I am a perl script! Here is my input.'
+ that:
+ - out.results[0].input.foo
+ - out.results[0].message == 'I am a perl script! Here is my input.'
+
+ - when: ansible_version.full > '2.4'
+ assert:
+ that:
+ - (not out.changed)
+ - (not out.results[0].changed)
diff --git a/tests/ansible/integration/runner/custom_perl_want_json_module.yml b/tests/ansible/integration/runner/custom_perl_want_json_module.yml
index 69a1b57b..24527164 100644
--- a/tests/ansible/integration/runner/custom_perl_want_json_module.yml
+++ b/tests/ansible/integration/runner/custom_perl_want_json_module.yml
@@ -8,8 +8,12 @@
register: out
- assert:
- that: |
- (not out.changed) and
- (not out.results[0].changed) and
- out.results[0].input[0].foo and
- out.results[0].message == 'I am a want JSON perl script! Here is my input.'
+ that:
+ - out.results[0].input.foo
+ - out.results[0].message == 'I am a want JSON perl script! Here is my input.'
+
+ - when: ansible_version.full > '2.4'
+ assert:
+ that:
+ - (not out.changed)
+ - (not out.results[0].changed)
diff --git a/tests/ansible/integration/runner/etc_environment.yml b/tests/ansible/integration/runner/etc_environment.yml
index 0037698a..df15bbdb 100644
--- a/tests/ansible/integration/runner/etc_environment.yml
+++ b/tests/ansible/integration/runner/etc_environment.yml
@@ -3,78 +3,13 @@
# but less likely to brick a development workstation
- name: integration/runner/etc_environment.yml
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
gather_facts: true
tasks:
- # ~/.pam_environment
+ - include: _etc_environment_user.yml
+ when: ansible_system == "Linux" and is_mitogen
- - file:
- path: ~/.pam_environment
- state: absent
-
- - shell: echo $MAGIC_PAM_ENV
- register: echo
-
- - assert:
- that: echo.stdout == ""
-
- - copy:
- dest: ~/.pam_environment
- content: |
- MAGIC_PAM_ENV=321
-
- - shell: echo $MAGIC_PAM_ENV
- register: echo
-
- - assert:
- that: echo.stdout == "321"
-
- - file:
- path: ~/.pam_environment
- state: absent
-
- - shell: echo $MAGIC_PAM_ENV
- register: echo
-
- - assert:
- that: echo.stdout == ""
-
-
- # /etc/environment
- - meta: end_play
- when: ansible_virtualization_type != "docker"
-
- - file:
- path: /etc/environment
- state: absent
- become: true
-
- - shell: echo $MAGIC_ETC_ENV
- register: echo
-
- - assert:
- that: echo.stdout == ""
-
- - copy:
- dest: /etc/environment
- content: |
- MAGIC_ETC_ENV=555
- become: true
-
- - shell: echo $MAGIC_ETC_ENV
- register: echo
-
- - assert:
- that: echo.stdout == "555"
-
- - file:
- path: /etc/environment
- state: absent
- become: true
-
- - shell: echo $MAGIC_ETC_ENV
- register: echo
-
- - assert:
- that: echo.stdout == ""
+ - include_tasks: _etc_environment_global.yml
+ # Don't destroy laptops.
+ when: ansible_virtualization_type == "docker"
diff --git a/tests/ansible/integration/runner/forking_correct_parent.yml b/tests/ansible/integration/runner/forking_correct_parent.yml
index e8207676..c70db4e3 100644
--- a/tests/ansible/integration/runner/forking_correct_parent.yml
+++ b/tests/ansible/integration/runner/forking_correct_parent.yml
@@ -5,7 +5,17 @@
tasks:
# Verify mitogen_task_isolation=fork forks from "virginal fork parent", not
- # shared interpreter.
+ # shared interpreter, but only if forking is enabled (e.g. that's never true
+ # on Python 2.4).
+
+ - mitogen_action_script:
+ script: |
+ self._connection._connect()
+ result['uses_fork'] = (
+ self._connection.init_child_result['fork_context'] is not None
+ )
+ register: forkmode
+ when: is_mitogen
- name: get regular process ID.
custom_python_detect_environment:
@@ -22,5 +32,12 @@
- assert:
that:
- fork_proc.pid != regular_proc.pid
- - fork_proc.ppid != regular_proc.pid
when: is_mitogen
+
+ - assert:
+ that: fork_proc.ppid != regular_proc.pid
+ when: is_mitogen and forkmode.uses_fork
+
+ - assert:
+ that: fork_proc.ppid == regular_proc.pid
+ when: is_mitogen and not forkmode.uses_fork
diff --git a/tests/ansible/integration/runner/missing_module.yml b/tests/ansible/integration/runner/missing_module.yml
index 064a9bf8..205c8632 100644
--- a/tests/ansible/integration/runner/missing_module.yml
+++ b/tests/ansible/integration/runner/missing_module.yml
@@ -6,7 +6,7 @@
- connection: local
command: |
ansible -vvv
- -i "{{inventory_file}}"
+ -i "{{MITOGEN_INVENTORY_FILE}}"
test-targets
-m missing_module
args:
diff --git a/tests/ansible/integration/ssh/all.yml b/tests/ansible/integration/ssh/all.yml
index 2425943a..28495f49 100644
--- a/tests/ansible/integration/ssh/all.yml
+++ b/tests/ansible/integration/ssh/all.yml
@@ -1,2 +1,3 @@
-- import_playbook: timeouts.yml
-- import_playbook: variables.yml
+- include: config.yml
+- include: timeouts.yml
+- include: variables.yml
diff --git a/tests/ansible/integration/ssh/config.yml b/tests/ansible/integration/ssh/config.yml
new file mode 100644
index 00000000..07ad1c21
--- /dev/null
+++ b/tests/ansible/integration/ssh/config.yml
@@ -0,0 +1,19 @@
+# issue #334: test expanduser() on key file during config generation.
+
+- name: integration/ssh/config.yml
+ hosts: test-targets
+ connection: ssh
+ vars:
+ ansible_private_key_file: ~/fakekey
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - mitogen_get_stack:
+ register: out
+
+ - assert:
+ that: |
+ out.result[0].kwargs.identity_file == (
+ lookup('env', 'HOME') + '/fakekey'
+ )
diff --git a/tests/ansible/integration/ssh/timeouts.yml b/tests/ansible/integration/ssh/timeouts.yml
index 0fd416f5..92fd9307 100644
--- a/tests/ansible/integration/ssh/timeouts.yml
+++ b/tests/ansible/integration/ssh/timeouts.yml
@@ -6,7 +6,7 @@
- connection: local
command: |
ansible -vvv
- -i "{{inventory_file}}"
+ -i "{{MITOGEN_INVENTORY_FILE}}"
test-targets
-m custom_python_detect_environment
-e ansible_user=mitogen__slow_user -e ansible_password=slow_user_password
diff --git a/tests/ansible/integration/ssh/variables.yml b/tests/ansible/integration/ssh/variables.yml
index dc4fe434..71536391 100644
--- a/tests/ansible/integration/ssh/variables.yml
+++ b/tests/ansible/integration/ssh/variables.yml
@@ -3,7 +3,7 @@
# whatever reason.
- name: integration/ssh/variables.yml
- hosts: test-targets
+ hosts: test-targets[0]
connection: local
vars:
# ControlMaster has the effect of caching the previous auth to the same
@@ -18,7 +18,7 @@
shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_ssh_user=mitogen__has_sudo
-e ansible_ssh_pass=has_sudo_password
args:
@@ -29,7 +29,7 @@
- shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_ssh_user=mitogen__has_sudo
-e ansible_ssh_pass=wrong_password
args:
@@ -47,7 +47,7 @@
shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_user=mitogen__has_sudo
-e ansible_ssh_pass=has_sudo_password
args:
@@ -58,7 +58,7 @@
- shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_user=mitogen__has_sudo
-e ansible_ssh_pass=wrong_password
args:
@@ -76,7 +76,7 @@
shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_user=mitogen__has_sudo
-e ansible_password=has_sudo_password
args:
@@ -87,7 +87,7 @@
- shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_user=mitogen__has_sudo
-e ansible_password=wrong_password
args:
@@ -110,7 +110,7 @@
shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_user=mitogen__has_sudo_pubkey
-e ansible_ssh_private_key_file=../data/docker/mitogen__has_sudo_pubkey.key
args:
@@ -121,7 +121,7 @@
- shell: >
ANSIBLE_STRATEGY=mitogen_linear
ANSIBLE_SSH_ARGS=""
- ansible -m shell -a whoami -i "{{inventory_file}}" test-targets
+ ansible -m shell -a whoami -i "{{MITOGEN_INVENTORY_FILE}}" test-targets
-e ansible_user=mitogen__has_sudo
-e ansible_ssh_private_key_file=/dev/null
args:
diff --git a/tests/ansible/integration/strategy/_mixed_mitogen_vanilla.yml b/tests/ansible/integration/strategy/_mixed_mitogen_vanilla.yml
index 7ac39e8e..1ec76fd1 100644
--- a/tests/ansible/integration/strategy/_mixed_mitogen_vanilla.yml
+++ b/tests/ansible/integration/strategy/_mixed_mitogen_vanilla.yml
@@ -2,9 +2,10 @@
# issue #294: ensure running mixed vanilla/Mitogen succeeds.
- name: integration/strategy/_mixed_mitogen_vanilla.yml (mitogen_linear)
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
strategy: mitogen_linear
+ run_once: true
tasks:
- custom_python_detect_environment:
register: out
@@ -15,8 +16,9 @@
- assert:
that: strategy == 'ansible.plugins.strategy.mitogen_linear.StrategyModule'
+
- name: integration/strategy/_mixed_mitogen_vanilla.yml (linear)
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
strategy: linear
tasks:
@@ -31,7 +33,7 @@
- name: integration/strategy/_mixed_mitogen_vanilla.yml (mitogen_linear)
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
strategy: mitogen_linear
tasks:
diff --git a/tests/ansible/integration/strategy/_mixed_vanilla_mitogen.yml b/tests/ansible/integration/strategy/_mixed_vanilla_mitogen.yml
index 891787af..babcab3f 100644
--- a/tests/ansible/integration/strategy/_mixed_vanilla_mitogen.yml
+++ b/tests/ansible/integration/strategy/_mixed_vanilla_mitogen.yml
@@ -2,7 +2,7 @@
# issue #294: ensure running mixed vanilla/Mitogen succeeds.
- name: integration/strategy/_mixed_vanilla_mitogen.yml (linear)
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
strategy: linear
tasks:
@@ -16,7 +16,7 @@
that: strategy == 'ansible.plugins.strategy.linear.StrategyModule'
- name: integration/strategy/_mixed_vanilla_mitogen.yml (mitogen_linear)
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
strategy: mitogen_linear
tasks:
@@ -31,7 +31,7 @@
- name: integration/strategy/_mixed_vanilla_mitogen.yml (linear)
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
strategy: linear
tasks:
diff --git a/tests/ansible/integration/strategy/all.yml b/tests/ansible/integration/strategy/all.yml
index 3304817c..b519e1bc 100644
--- a/tests/ansible/integration/strategy/all.yml
+++ b/tests/ansible/integration/strategy/all.yml
@@ -1 +1 @@
-- import_playbook: mixed_vanilla_mitogen.yml
+- include: mixed_vanilla_mitogen.yml
diff --git a/tests/ansible/integration/strategy/mixed_vanilla_mitogen.yml b/tests/ansible/integration/strategy/mixed_vanilla_mitogen.yml
index 61a55825..206f80bd 100644
--- a/tests/ansible/integration/strategy/mixed_vanilla_mitogen.yml
+++ b/tests/ansible/integration/strategy/mixed_vanilla_mitogen.yml
@@ -1,12 +1,13 @@
- name: integration/strategy/mixed_vanilla_mitogen.yml (linear->mitogen->linear)
- hosts: test-targets
+ hosts: test-targets[0]
any_errors_fatal: true
tasks:
- connection: local
command: |
ansible-playbook
- -i "{{inventory_file}}"
+ -i "{{MITOGEN_INVENTORY_FILE}}"
+ -vvv
integration/strategy/_mixed_mitogen_vanilla.yml
args:
chdir: ../..
@@ -15,7 +16,8 @@
- connection: local
command: |
ansible-playbook
- -i "{{inventory_file}}"
+ -i "{{MITOGEN_INVENTORY_FILE}}"
+ -vvv
integration/strategy/_mixed_vanilla_mitogen.yml
args:
chdir: ../..
diff --git a/tests/ansible/integration/stub_connections/README.md b/tests/ansible/integration/stub_connections/README.md
new file mode 100644
index 00000000..e12d5557
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/README.md
@@ -0,0 +1,9 @@
+
+# `stub_connections/`
+
+The playbooks in this directory use stub implementations of various third party
+tools (kubectl etc.) to verify arguments passed by Ansible to Mitogen and
+subsequently onward to the tool result in something that looks sane.
+
+These are bare minimum tests just to ensure sporadically tested connection
+methods haven't broken in embarrasingly obvious ways.
diff --git a/tests/ansible/integration/stub_connections/_end_play_if_not_sudo_linux.yml b/tests/ansible/integration/stub_connections/_end_play_if_not_sudo_linux.yml
new file mode 100644
index 00000000..55997a72
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/_end_play_if_not_sudo_linux.yml
@@ -0,0 +1,17 @@
+# End the play if we're not on Linux and a raw 'sudo' command isn't available.
+# Expects connection:local
+
+- shell: uname -s
+ register: out
+
+- meta: end_play
+ when: out.stdout != 'Linux'
+
+- command: sudo -n whoami
+ args:
+ warn: false
+ ignore_errors: true
+ register: sudo_available
+
+- meta: end_play
+ when: sudo_available.rc != 0
diff --git a/tests/ansible/integration/stub_connections/all.yml b/tests/ansible/integration/stub_connections/all.yml
new file mode 100644
index 00000000..e1810138
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/all.yml
@@ -0,0 +1,7 @@
+- include: kubectl.yml
+- include: lxc.yml
+- include: lxd.yml
+- include: mitogen_doas.yml
+- include: mitogen_sudo.yml
+- include: setns_lxc.yml
+- include: setns_lxd.yml
diff --git a/tests/ansible/integration/stub_connections/kubectl.yml b/tests/ansible/integration/stub_connections/kubectl.yml
new file mode 100644
index 00000000..ba53d1e0
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/kubectl.yml
@@ -0,0 +1,21 @@
+
+- name: integration/stub_connections/kubectl.yml
+ hosts: test-targets
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - meta: end_play
+ when: ansible_version.full < '2.5'
+
+ - custom_python_detect_environment:
+ vars:
+ ansible_connection: kubectl
+ mitogen_kubectl_path: stub-kubectl.py
+ register: out
+
+ - assert:
+ that:
+ - out.env.THIS_IS_STUB_KUBECTL == '1'
diff --git a/tests/ansible/integration/stub_connections/lxc.yml b/tests/ansible/integration/stub_connections/lxc.yml
new file mode 100644
index 00000000..7a2cd81c
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/lxc.yml
@@ -0,0 +1,18 @@
+
+- name: integration/stub_connections/lxc.yml
+ hosts: test-targets
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - custom_python_detect_environment:
+ vars:
+ ansible_connection: lxc
+ mitogen_lxc_attach_path: stub-lxc-attach.py
+ register: out
+
+ - assert:
+ that:
+ - out.env.THIS_IS_STUB_LXC_ATTACH == '1'
diff --git a/tests/ansible/integration/stub_connections/lxd.yml b/tests/ansible/integration/stub_connections/lxd.yml
new file mode 100644
index 00000000..86f4b185
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/lxd.yml
@@ -0,0 +1,18 @@
+
+- name: integration/stub_connections/lxd.yml
+ hosts: test-targets
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - custom_python_detect_environment:
+ vars:
+ ansible_connection: lxd
+ mitogen_lxc_path: stub-lxc.py
+ register: out
+
+ - assert:
+ that:
+ - out.env.THIS_IS_STUB_LXC == '1'
diff --git a/tests/ansible/integration/stub_connections/mitogen_doas.yml b/tests/ansible/integration/stub_connections/mitogen_doas.yml
new file mode 100644
index 00000000..40d4f4b0
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/mitogen_doas.yml
@@ -0,0 +1,21 @@
+
+- name: integration/stub_connections/mitogen_doas.yml
+ hosts: test-targets
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - custom_python_detect_environment:
+ vars:
+ ansible_connection: mitogen_doas
+ ansible_become_exe: stub-doas.py
+ ansible_user: someuser
+ register: out
+
+ - debug: var=out.env.ORIGINAL_ARGV
+ - assert:
+ that:
+ - out.env.THIS_IS_STUB_DOAS == '1'
+ - (out.env.ORIGINAL_ARGV|from_json)[1:3] == ['-u', 'someuser']
diff --git a/tests/ansible/integration/stub_connections/mitogen_sudo.yml b/tests/ansible/integration/stub_connections/mitogen_sudo.yml
new file mode 100644
index 00000000..b7ca3d26
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/mitogen_sudo.yml
@@ -0,0 +1,22 @@
+
+- name: integration/stub_connections/mitogen_sudo.yml
+ hosts: test-targets
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - custom_python_detect_environment:
+ vars:
+ ansible_connection: mitogen_sudo
+ ansible_user: root
+ ansible_become_exe: stub-sudo.py
+ ansible_become_flags: -H --type=sometype --role=somerole
+ register: out
+
+ - assert:
+ that: out.env.THIS_IS_STUB_SUDO == '1'
+ - assert_equal:
+ left: (out.env.ORIGINAL_ARGV|from_json)[1:9]
+ right: ['-u', 'root', '-H', '-r', 'somerole', '-t', 'sometype', '--']
diff --git a/tests/ansible/integration/stub_connections/setns_lxc.yml b/tests/ansible/integration/stub_connections/setns_lxc.yml
new file mode 100644
index 00000000..c57a8c5c
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/setns_lxc.yml
@@ -0,0 +1,32 @@
+# issue #409.
+# setns is hard -- it wants to do superuser syscalls, so we must run it in a
+# child Ansible via sudo. But that only works if sudo works.
+
+- name: integration/stub_connections/setns_lxc.yml
+ hosts: test-targets
+ gather_facts: false
+ any_errors_fatal: false
+ connection: local
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - include: _end_play_if_not_sudo_linux.yml
+
+ - command: |
+ sudo -nE "{{lookup('env', 'VIRTUAL_ENV')}}/bin/ansible"
+ -i localhost,
+ -c setns
+ -e mitogen_kind=lxc
+ -e mitogen_lxc_info_path={{git_basedir}}/tests/data/stubs/stub-lxc-info.py
+ -m shell
+ -a "echo hi"
+ -u root
+ localhost
+ args:
+ chdir: ../..
+ warn: false
+ register: result
+
+ - assert:
+ that: result.rc == 0
diff --git a/tests/ansible/integration/stub_connections/setns_lxd.yml b/tests/ansible/integration/stub_connections/setns_lxd.yml
new file mode 100644
index 00000000..7db47661
--- /dev/null
+++ b/tests/ansible/integration/stub_connections/setns_lxd.yml
@@ -0,0 +1,32 @@
+# issue #409.
+# setns is hard -- it wants to do superuser syscalls, so we must run it in a
+# child Ansible via sudo. But that only works if sudo works.
+
+- name: integration/stub_connections/setns_lxd.yml
+ hosts: test-targets
+ gather_facts: false
+ any_errors_fatal: false
+ connection: local
+ tasks:
+ - meta: end_play
+ when: not is_mitogen
+
+ - include: _end_play_if_not_sudo_linux.yml
+
+ - command: |
+ sudo -nE "{{lookup('env', 'VIRTUAL_ENV')}}/bin/ansible"
+ -i localhost,
+ -c setns
+ -e mitogen_kind=lxd
+ -e mitogen_lxc_path={{git_basedir}}/tests/data/stubs/stub-lxc.py
+ -m shell
+ -a "echo hi"
+ -u root
+ localhost
+ args:
+ chdir: ../..
+ warn: false
+ register: result
+
+ - assert:
+ that: result.rc == 0
diff --git a/tests/ansible/integration/transport/all.yml b/tests/ansible/integration/transport/all.yml
index 89949b58..534534db 100644
--- a/tests/ansible/integration/transport/all.yml
+++ b/tests/ansible/integration/transport/all.yml
@@ -1,2 +1,2 @@
-- import_playbook: kubectl.yml
+- include: kubectl.yml
diff --git a/tests/ansible/lib/action/assert_equal.py b/tests/ansible/lib/action/assert_equal.py
new file mode 100644
index 00000000..84ec7606
--- /dev/null
+++ b/tests/ansible/lib/action/assert_equal.py
@@ -0,0 +1,70 @@
+#
+# Print data structure diff on assertion failure.
+#
+# assert_equal: left=some.result right={1:2}
+#
+
+__metaclass__ = type
+
+import inspect
+import unittest2
+
+import ansible.template
+
+from ansible.errors import AnsibleError
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
+
+
+TEMPLATE_KWARGS = {}
+
+_argspec = inspect.getargspec(ansible.template.Templar.template)
+if 'bare_deprecated' in _argspec.args:
+ TEMPLATE_KWARGS['bare_deprecated'] = False
+
+
+class TestCase(unittest2.TestCase):
+ def runTest(self):
+ pass
+
+
+def text_diff(a, b):
+ tc = TestCase()
+ tc.maxDiff = None
+ try:
+ tc.assertEqual(a, b)
+ return None
+ except AssertionError as e:
+ return str(e)
+
+
+class ActionModule(ActionBase):
+ ''' Fail with custom message '''
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('left', 'right'))
+
+ def template(self, obj):
+ return self._templar.template(
+ obj,
+ convert_bare=True,
+ **TEMPLATE_KWARGS
+ )
+
+ def run(self, tmp=None, task_vars=None):
+ result = super(ActionModule, self).run(tmp, task_vars or {})
+ left = self.template(self._task.args['left'])
+ right = self.template(self._task.args['right'])
+
+ diff = text_diff(left, right)
+ if diff is None:
+ return {
+ 'changed': False
+ }
+
+ return {
+ 'changed': False,
+ 'failed': True,
+ 'msg': diff,
+ '_ansible_verbose_always': True,
+ }
diff --git a/tests/ansible/lib/action/mitogen_action_script.py b/tests/ansible/lib/action/mitogen_action_script.py
new file mode 100644
index 00000000..e034345c
--- /dev/null
+++ b/tests/ansible/lib/action/mitogen_action_script.py
@@ -0,0 +1,28 @@
+# I am an Ansible action plug-in. I run the script provided in the parameter in
+# the context of the action.
+
+import sys
+
+from ansible.plugins.action import ActionBase
+
+
+def execute(s, gbls, lcls):
+ if sys.version_info > (3,):
+ exec(s, gbls, lcls)
+ else:
+ exec('exec s in gbls, lcls')
+
+
+class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+ super(ActionModule, self).run(tmp=tmp, task_vars=task_vars)
+ lcls = {
+ 'self': self,
+ 'result': {}
+ }
+ execute(self._task.args['script'], globals(), lcls)
+ return lcls['result']
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/ansible/lib/action/mitogen_get_stack.py b/tests/ansible/lib/action/mitogen_get_stack.py
deleted file mode 100644
index f1b87f35..00000000
--- a/tests/ansible/lib/action/mitogen_get_stack.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-Fetch the connection configuration stack that would be used to connect to a
-target, without actually connecting to it.
-"""
-
-import ansible_mitogen.connection
-
-from ansible.plugins.action import ActionBase
-
-
-class ActionModule(ActionBase):
- def run(self, tmp=None, task_vars=None):
- if not isinstance(self._connection,
- ansible_mitogen.connection.Connection):
- return {
- 'skipped': True,
- }
-
- return {
- 'changed': True,
- 'result': self._connection._build_stack(),
- }
diff --git a/tests/ansible/lib/action/mitogen_get_stack.py b/tests/ansible/lib/action/mitogen_get_stack.py
new file mode 120000
index 00000000..f055f341
--- /dev/null
+++ b/tests/ansible/lib/action/mitogen_get_stack.py
@@ -0,0 +1 @@
+../../../../ansible_mitogen/plugins/action/mitogen_get_stack.py
\ No newline at end of file
diff --git a/tests/ansible/lib/action/mitogen_shutdown_all.py b/tests/ansible/lib/action/mitogen_shutdown_all.py
index 4909dfe9..c28d9d4b 100644
--- a/tests/ansible/lib/action/mitogen_shutdown_all.py
+++ b/tests/ansible/lib/action/mitogen_shutdown_all.py
@@ -12,6 +12,9 @@ from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
+ # Running this for every host is pointless.
+ BYPASS_HOST_LOOP = True
+
def run(self, tmp=None, task_vars=None):
if not isinstance(self._connection,
ansible_mitogen.connection.Connection):
diff --git a/tests/ansible/lib/callback/fork_histogram.py b/tests/ansible/lib/callback/fork_histogram.py
new file mode 100644
index 00000000..9ce50e13
--- /dev/null
+++ b/tests/ansible/lib/callback/fork_histogram.py
@@ -0,0 +1,96 @@
+
+# Monkey-patch os.fork() to produce a latency histogram on run completion.
+# Requires 'hdrhsitograms' PyPI module.
+
+from __future__ import print_function
+
+import os
+import resource
+import sys
+import time
+
+import ansible.plugins.callback
+import hdrh.histogram
+
+
+def get_fault_count(who=resource.RUSAGE_CHILDREN):
+ ru = resource.getrusage(who)
+ return ru.ru_minflt + ru.ru_majflt
+
+
+class CallbackModule(ansible.plugins.callback.CallbackBase):
+ hist = None
+
+ def v2_playbook_on_start(self, playbook):
+ if self.hist is not None:
+ return
+
+ self.hist = hdrh.histogram.HdrHistogram(1, int(1e6*60), 3)
+ self.fork_latency_sum_usec = 0.0
+ if 'FORK_HISTOGRAM' in os.environ:
+ self.install()
+
+ def install(self):
+ self.faults_at_start = get_fault_count(resource.RUSAGE_SELF)
+ self.run_start_time = time.time()
+ self.real_fork = os.fork
+ os.fork = self.my_fork
+
+ self_fault_usec = 1.113664156753052
+ child_fault_usec = 4.734975610975617
+
+ dummy_heap_size = int(os.environ.get('FORK_STATS_FAKE_HEAP_MB', '0'))
+ dummy_heap = 'x' * (dummy_heap_size * 1048576)
+
+ def my_fork(self):
+ # doesnt count last child, oh well
+ now_faults = get_fault_count()
+ t0 = time.time()
+ try:
+ return self.real_fork()
+ finally:
+ latency_usec = (1e6 * (time.time() - t0))
+ self.fork_latency_sum_usec += latency_usec
+ self.hist.record_value(latency_usec)
+
+ def playbook_on_stats(self, stats):
+ if 'FORK_HISTOGRAM' not in os.environ:
+ return
+
+ self_faults = get_fault_count(resource.RUSAGE_SELF) - self.faults_at_start
+ child_faults = get_fault_count()
+ run_duration_sec = time.time() - self.run_start_time
+ fault_wastage_usec = (
+ ((self.self_fault_usec * self_faults) +
+ (self.child_fault_usec * child_faults))
+ )
+ fork_wastage = self.hist.get_total_count()
+ all_wastage_usec = ((2*self.fork_latency_sum_usec) + fault_wastage_usec)
+
+ print('--- Fork statistics ---')
+ print('Post-boot run duration: %.02f ms, %d total forks' % (
+ 1000 * run_duration_sec,
+ self.hist.get_total_count(),
+ ))
+ print('Self faults during boot: %d, post-boot: %d, avg %d/child' % (
+ self.faults_at_start,
+ self_faults,
+ self_faults / self.hist.get_total_count(),
+ ))
+ print('Total child faults: %d, avg %d/child' % (
+ child_faults,
+ child_faults / self.hist.get_total_count(),
+ ))
+ print('Est. wastage on faults: %d ms, forks+faults+waits: %d ms (%.2f%%)' % (
+ fault_wastage_usec / 1000,
+ all_wastage_usec / 1000,
+ 100 * (all_wastage_usec / (run_duration_sec * 1e6)),
+ ))
+ print('99th%% fork latency: %.03f msec, max %d new tasks/sec' % (
+ self.hist.get_value_at_percentile(99) / 1000.0,
+ 1e6 / self.hist.get_value_at_percentile(99),
+ ))
+
+ self.hist.output_percentile_distribution(sys.stdout, 1000)
+ print('--- End fork statistics ---')
+ print()
diff --git a/tests/ansible/lib/filters/mitogen_tests.py b/tests/ansible/lib/filters/mitogen_tests.py
new file mode 100644
index 00000000..e7614658
--- /dev/null
+++ b/tests/ansible/lib/filters/mitogen_tests.py
@@ -0,0 +1,27 @@
+
+from ansible.module_utils._text import to_text
+
+
+try:
+ Unicode = unicode
+except:
+ Unicode = str
+
+
+def to_text(s):
+ """
+ Ensure the str or unicode `s` is unicode, and strip away any subclass. Also
+ works on lists.
+ """
+ if isinstance(s, list):
+ return [to_text(ss) for ss in s]
+ if not isinstance(s, Unicode):
+ s = to_text(s)
+ return Unicode(s)
+
+
+class FilterModule(object):
+ def filters(self):
+ return {
+ 'to_text': to_text,
+ }
diff --git a/tests/ansible/lib/modules/action_passthrough.py b/tests/ansible/lib/modules/action_passthrough.py
new file mode 100644
index 00000000..bf14820f
--- /dev/null
+++ b/tests/ansible/lib/modules/action_passthrough.py
@@ -0,0 +1,2 @@
+# This is a placeholder so Ansible 2.3 can detect the corresponding action
+# plug-in.
diff --git a/tests/ansible/lib/modules/assert_equal.py b/tests/ansible/lib/modules/assert_equal.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/ansible/lib/modules/connection_passthrough.py b/tests/ansible/lib/modules/connection_passthrough.py
new file mode 100644
index 00000000..bf14820f
--- /dev/null
+++ b/tests/ansible/lib/modules/connection_passthrough.py
@@ -0,0 +1,2 @@
+# This is a placeholder so Ansible 2.3 can detect the corresponding action
+# plug-in.
diff --git a/tests/ansible/lib/modules/custom_perl_json_args_module.pl b/tests/ansible/lib/modules/custom_perl_json_args_module.pl
index c999ca6c..4c8cbd75 100644
--- a/tests/ansible/lib/modules/custom_perl_json_args_module.pl
+++ b/tests/ansible/lib/modules/custom_perl_json_args_module.pl
@@ -1,15 +1,10 @@
#!/usr/bin/perl
-binmode STDOUT, ":utf8";
-use utf8;
-
-use JSON;
-
my $json_args = <<'END_MESSAGE';
<>
END_MESSAGE
-print encode_json({
- message => "I am a perl script! Here is my input.",
- input => [decode_json($json_args)]
-});
+print '{';
+print ' "message": "I am a perl script! Here is my input.",' . "\n";
+print ' "input": ' . $json_args;
+print '}' . "\n";
diff --git a/tests/ansible/lib/modules/custom_perl_want_json_module.pl b/tests/ansible/lib/modules/custom_perl_want_json_module.pl
index 8b45e5b4..83388ac0 100644
--- a/tests/ansible/lib/modules/custom_perl_want_json_module.pl
+++ b/tests/ansible/lib/modules/custom_perl_want_json_module.pl
@@ -1,12 +1,7 @@
#!/usr/bin/perl
-binmode STDOUT, ":utf8";
-use utf8;
-
my $WANT_JSON = 1;
-use JSON;
-
my $json;
{
local $/; #Enable 'slurp' mode
@@ -15,7 +10,7 @@ my $json;
close $fh;
}
-print encode_json({
- message => "I am a want JSON perl script! Here is my input.",
- input => [decode_json($json_args)]
-});
+print "{\n";
+print ' "message": "I am a want JSON perl script! Here is my input.",' . "\n";
+print ' "input": ' . $json_args . "\n";
+print "}\n";
diff --git a/tests/ansible/lib/modules/custom_python_detect_environment.py b/tests/ansible/lib/modules/custom_python_detect_environment.py
index 2da9cddf..9f628a03 100644
--- a/tests/ansible/lib/modules/custom_python_detect_environment.py
+++ b/tests/ansible/lib/modules/custom_python_detect_environment.py
@@ -12,6 +12,17 @@ import socket
import sys
+try:
+ all
+except NameError:
+ # Python 2.4
+ def all(it):
+ for elem in it:
+ if not elem:
+ return False
+ return True
+
+
def main():
module = AnsibleModule(argument_spec={})
module.exit_json(
diff --git a/tests/ansible/lib/modules/custom_python_json_args_module.py b/tests/ansible/lib/modules/custom_python_json_args_module.py
index 008829f6..a63ce8e6 100755
--- a/tests/ansible/lib/modules/custom_python_json_args_module.py
+++ b/tests/ansible/lib/modules/custom_python_json_args_module.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# I am an Ansible Python JSONARGS module. I should receive an encoding string.
-import json
import sys
json_arguments = """<>"""
diff --git a/tests/ansible/lib/modules/custom_python_modify_environ.py b/tests/ansible/lib/modules/custom_python_modify_environ.py
index 8cdd3bde..347bedf2 100644
--- a/tests/ansible/lib/modules/custom_python_modify_environ.py
+++ b/tests/ansible/lib/modules/custom_python_modify_environ.py
@@ -12,8 +12,8 @@ import sys
def main():
module = AnsibleModule(argument_spec={
- 'key': {'type': str},
- 'val': {'type': str}
+ 'key': {'type': 'str'},
+ 'val': {'type': 'str'}
})
os.environ[module.params['key']] = module.params['val']
module.exit_json(msg='Muahahaha!')
diff --git a/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py b/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py
index 6b3f9ef7..66264010 100644
--- a/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py
+++ b/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py
@@ -1,6 +1,5 @@
# I am an Ansible new-style Python module, but I lack an interpreter.
-import json
import sys
# This is the magic marker Ansible looks for:
diff --git a/tests/ansible/lib/modules/custom_python_new_style_module.py b/tests/ansible/lib/modules/custom_python_new_style_module.py
index 9641e88d..70ee062d 100755
--- a/tests/ansible/lib/modules/custom_python_new_style_module.py
+++ b/tests/ansible/lib/modules/custom_python_new_style_module.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# I am an Ansible new-style Python module. I should receive an encoding string.
-import json
import sys
# This is the magic marker Ansible looks for:
diff --git a/tests/ansible/lib/modules/custom_python_run_script.py b/tests/ansible/lib/modules/custom_python_run_script.py
index 2313291b..31e0609f 100644
--- a/tests/ansible/lib/modules/custom_python_run_script.py
+++ b/tests/ansible/lib/modules/custom_python_run_script.py
@@ -22,7 +22,7 @@ def execute(s, gbls, lcls):
def main():
module = AnsibleModule(argument_spec={
'script': {
- 'type': str
+ 'type': 'str'
}
})
diff --git a/tests/ansible/lib/modules/custom_python_want_json_module.py b/tests/ansible/lib/modules/custom_python_want_json_module.py
index 439aa522..d9ea7113 100755
--- a/tests/ansible/lib/modules/custom_python_want_json_module.py
+++ b/tests/ansible/lib/modules/custom_python_want_json_module.py
@@ -1,9 +1,14 @@
#!/usr/bin/python
-# I am an Ansible Python WANT_JSON module. I should receive an encoding string.
+# I am an Ansible Python WANT_JSON module. I should receive a JSON-encoded file.
-import json
import sys
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
WANT_JSON = 1
@@ -16,12 +21,18 @@ if len(sys.argv) < 2:
# Also must slurp in our own source code, to verify the encoding string was
# added.
-with open(sys.argv[0]) as fp:
+fp = open(sys.argv[0])
+try:
me = fp.read()
+finally:
+ fp.close()
try:
- with open(sys.argv[1]) as fp:
+ fp = open(sys.argv[1])
+ try:
input_json = fp.read()
+ finally:
+ fp.close()
except IOError:
usage()
diff --git a/tests/ansible/lib/modules/determine_strategy.py b/tests/ansible/lib/modules/determine_strategy.py
new file mode 100644
index 00000000..bf14820f
--- /dev/null
+++ b/tests/ansible/lib/modules/determine_strategy.py
@@ -0,0 +1,2 @@
+# This is a placeholder so Ansible 2.3 can detect the corresponding action
+# plug-in.
diff --git a/tests/ansible/lib/modules/mitogen_action_script.py b/tests/ansible/lib/modules/mitogen_action_script.py
new file mode 100644
index 00000000..bf14820f
--- /dev/null
+++ b/tests/ansible/lib/modules/mitogen_action_script.py
@@ -0,0 +1,2 @@
+# This is a placeholder so Ansible 2.3 can detect the corresponding action
+# plug-in.
diff --git a/tests/ansible/lib/modules/mitogen_get_stack.py b/tests/ansible/lib/modules/mitogen_get_stack.py
new file mode 100644
index 00000000..bf14820f
--- /dev/null
+++ b/tests/ansible/lib/modules/mitogen_get_stack.py
@@ -0,0 +1,2 @@
+# This is a placeholder so Ansible 2.3 can detect the corresponding action
+# plug-in.
diff --git a/tests/ansible/lib/modules/mitogen_shutdown_all.py b/tests/ansible/lib/modules/mitogen_shutdown_all.py
new file mode 100644
index 00000000..bf14820f
--- /dev/null
+++ b/tests/ansible/lib/modules/mitogen_shutdown_all.py
@@ -0,0 +1,2 @@
+# This is a placeholder so Ansible 2.3 can detect the corresponding action
+# plug-in.
diff --git a/tests/ansible/lib/modules/mitogen_test_gethostbyname.py b/tests/ansible/lib/modules/mitogen_test_gethostbyname.py
index f6070b03..289e9662 100644
--- a/tests/ansible/lib/modules/mitogen_test_gethostbyname.py
+++ b/tests/ansible/lib/modules/mitogen_test_gethostbyname.py
@@ -3,13 +3,16 @@
# I am a module that indirectly depends on glibc cached /etc/resolv.conf state.
import socket
+import sys
+
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(argument_spec={'name': {'type': 'str'}})
try:
module.exit_json(addr=socket.gethostbyname(module.params['name']))
- except socket.error as e:
+ except socket.error:
+ e = sys.exc_info()[1]
module.fail_json(msg=str(e))
if __name__ == '__main__':
diff --git a/tests/ansible/lib/vars/custom_modifies_os_environ.py b/tests/ansible/lib/vars/custom_modifies_os_environ.py
index 4039dfa7..9d43573f 100644
--- a/tests/ansible/lib/vars/custom_modifies_os_environ.py
+++ b/tests/ansible/lib/vars/custom_modifies_os_environ.py
@@ -1,16 +1,12 @@
# https://github.com/dw/mitogen/issues/297
from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-from ansible.plugins.vars import BaseVarsPlugin
import os
-class VarsModule(BaseVarsPlugin):
+class VarsModule(object):
def __init__(self, *args):
- super(VarsModule, self).__init__(*args)
os.environ['EVIL_VARS_PLUGIN'] = 'YIPEEE'
def get_vars(self, loader, path, entities, cache=True):
- super(VarsModule, self).get_vars(loader, path, entities)
return {}
diff --git a/tests/ansible/mitogen_ansible_playbook.py b/tests/ansible/mitogen_ansible_playbook.py
new file mode 100755
index 00000000..3af1791c
--- /dev/null
+++ b/tests/ansible/mitogen_ansible_playbook.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+import os
+import subprocess
+import sys
+os.environ['ANSIBLE_STRATEGY'] = 'mitogen_linear'
+subprocess.check_call(['./run_ansible_playbook.py'] + sys.argv[1:])
diff --git a/tests/ansible/mitogen_ansible_playbook.sh b/tests/ansible/mitogen_ansible_playbook.sh
deleted file mode 100755
index cd5c1e53..00000000
--- a/tests/ansible/mitogen_ansible_playbook.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-export ANSIBLE_STRATEGY=mitogen_linear
-exec ./run_ansible_playbook.sh "$@"
diff --git a/tests/ansible/regression/all.yml b/tests/ansible/regression/all.yml
index 46798b3e..123d87d9 100644
--- a/tests/ansible/regression/all.yml
+++ b/tests/ansible/regression/all.yml
@@ -1,10 +1,10 @@
-- import_playbook: issue_109__target_has_old_ansible_installed.yml
-- import_playbook: issue_113__duplicate_module_imports.yml
-- import_playbook: issue_118__script_not_marked_exec.yml
-- import_playbook: issue_122__environment_difference.yml
-- import_playbook: issue_140__thread_pileup.yml
-- import_playbook: issue_152__local_action_wrong_interpreter.yml
-- import_playbook: issue_152__virtualenv_python_fails.yml
-- import_playbook: issue_154__module_state_leaks.yml
-- import_playbook: issue_177__copy_module_failing.yml
-- import_playbook: issue_332_ansiblemoduleerror_first_occurrence.yml
+- include: issue_109__target_has_old_ansible_installed.yml
+- include: issue_113__duplicate_module_imports.yml
+- include: issue_118__script_not_marked_exec.yml
+- include: issue_122__environment_difference.yml
+- include: issue_140__thread_pileup.yml
+- include: issue_152__local_action_wrong_interpreter.yml
+- include: issue_152__virtualenv_python_fails.yml
+- include: issue_154__module_state_leaks.yml
+- include: issue_177__copy_module_failing.yml
+- include: issue_332_ansiblemoduleerror_first_occurrence.yml
diff --git a/tests/ansible/regression/issue_122__environment_difference.yml b/tests/ansible/regression/issue_122__environment_difference.yml
index bf9df861..b020cc5d 100644
--- a/tests/ansible/regression/issue_122__environment_difference.yml
+++ b/tests/ansible/regression/issue_122__environment_difference.yml
@@ -9,6 +9,6 @@
hosts: test-targets
tasks:
- - script: scripts/print_env.sh
+ - script: scripts/print_env.py
register: env
- debug: msg={{env}}
diff --git a/tests/ansible/regression/issue_152__virtualenv_python_fails.yml b/tests/ansible/regression/issue_152__virtualenv_python_fails.yml
index 0234a0ef..85109309 100644
--- a/tests/ansible/regression/issue_152__virtualenv_python_fails.yml
+++ b/tests/ansible/regression/issue_152__virtualenv_python_fails.yml
@@ -8,20 +8,20 @@
# Can't use pip module because it can't create virtualenvs, must call it
# directly.
- shell: virtualenv /tmp/issue_152_virtualenv
- when: lout.python_version != '2.6'
+ when: lout.python_version > '2.6'
- custom_python_detect_environment:
vars:
ansible_python_interpreter: /tmp/issue_152_virtualenv/bin/python
register: out
- when: lout.python_version != '2.6'
+ when: lout.python_version > '2.6'
- assert:
that:
- out.sys_executable == "/tmp/issue_152_virtualenv/bin/python"
- when: lout.python_version != '2.6'
+ when: lout.python_version > '2.6'
- file:
path: /tmp/issue_152_virtualenv
state: absent
- when: lout.python_version != '2.6'
+ when: lout.python_version > '2.6'
diff --git a/tests/ansible/regression/scripts/print_env.py b/tests/ansible/regression/scripts/print_env.py
new file mode 100644
index 00000000..50a2504e
--- /dev/null
+++ b/tests/ansible/regression/scripts/print_env.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+import os
+import pprint
+
+pprint.pprint(dict(os.environ))
diff --git a/tests/ansible/regression/scripts/print_env.sh b/tests/ansible/regression/scripts/print_env.sh
deleted file mode 100644
index c03c9936..00000000
--- a/tests/ansible/regression/scripts/print_env.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-set
diff --git a/tests/ansible/requirements.txt b/tests/ansible/requirements.txt
new file mode 100644
index 00000000..47ed9abb
--- /dev/null
+++ b/tests/ansible/requirements.txt
@@ -0,0 +1,6 @@
+ansible; python_version >= '2.7'
+ansible<2.7; python_version < '2.7'
+paramiko==2.3.2 # Last 2.6-compat version.
+hdrhistogram==0.6.1
+PyYAML==3.11; python_version < '2.7'
+PyYAML==3.13; python_version >= '2.7'
diff --git a/tests/ansible/run_ansible_playbook.py b/tests/ansible/run_ansible_playbook.py
new file mode 100755
index 00000000..51f864f4
--- /dev/null
+++ b/tests/ansible/run_ansible_playbook.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# Wrap ansible-playbook, setting up some test of the test environment.
+
+import json
+import os
+import sys
+
+
+GIT_BASEDIR = os.path.dirname(
+ os.path.abspath(
+ os.path.join(__file__, '..', '..')
+ )
+)
+
+# Ensure VIRTUAL_ENV is exported.
+os.environ.setdefault(
+ 'VIRTUAL_ENV',
+ os.path.dirname(os.path.dirname(sys.executable))
+)
+
+# Set LANG and LC_ALL to C in order to avoid locale errors spammed by vanilla
+# during exec_command().
+os.environ.pop('LANG', None)
+os.environ.pop('LC_ALL', None)
+
+
+# Used by delegate_to.yml to ensure "sudo -E" preserves environment.
+os.environ['I_WAS_PRESERVED'] = '1'
+
+# Used by LRU tests.
+os.environ['MITOGEN_MAX_INTERPRETERS'] = '3'
+
+# Add test stubs to path.
+os.environ['PATH'] = '%s%s%s' % (
+ os.path.join(GIT_BASEDIR, 'tests', 'data', 'stubs'),
+ os.pathsep,
+ os.environ['PATH'],
+)
+
+extra = {
+ 'is_mitogen': os.environ.get('ANSIBLE_STRATEGY', '').startswith('mitogen'),
+ 'git_basedir': GIT_BASEDIR,
+}
+
+if '-i' in sys.argv:
+ extra['MITOGEN_INVENTORY_FILE'] = (
+ os.path.abspath(sys.argv[1 + sys.argv.index('-i')])
+ )
+
+args = ['ansible-playbook']
+args += ['-e', json.dumps(extra)]
+args += sys.argv[1:]
+os.execvp(args[0], args)
diff --git a/tests/ansible/run_ansible_playbook.sh b/tests/ansible/run_ansible_playbook.sh
deleted file mode 100755
index 39580e37..00000000
--- a/tests/ansible/run_ansible_playbook.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-# Wrap ansible-playbook, setting up some test of the test environment.
-
-# Used by delegate_to.yml to ensure "sudo -E" preserves environment.
-export I_WAS_PRESERVED=1
-export MITOGEN_MAX_INTERPRETERS=3
-
-if [ "${ANSIBLE_STRATEGY:0:7}" = "mitogen" ]
-then
- EXTRA='{"is_mitogen": true}'
-else
- EXTRA='{"is_mitogen": false}'
-fi
-
-exec ansible-playbook -e "$EXTRA" "$@"
diff --git a/tests/ansible/soak/file_service.yml b/tests/ansible/soak/file_service.yml
index 3b338b3c..0640233a 100644
--- a/tests/ansible/soak/file_service.yml
+++ b/tests/ansible/soak/file_service.yml
@@ -2,5 +2,5 @@
tasks:
- set_fact:
content: "{% for x in range(126977) %}x{% endfor %}"
- - include_tasks: _file_service_loop.yml
+ - include: _file_service_loop.yml
with_sequence: start=1 end=100
diff --git a/tests/ansible/tests/__init__.py b/tests/ansible/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/ansible/tests/affinity_test.py b/tests/ansible/tests/affinity_test.py
new file mode 100644
index 00000000..d898c782
--- /dev/null
+++ b/tests/ansible/tests/affinity_test.py
@@ -0,0 +1,63 @@
+
+import multiprocessing
+import os
+import tempfile
+
+import mock
+import unittest2
+import testlib
+
+import mitogen.parent
+import ansible_mitogen.affinity
+
+
+@unittest2.skipIf(
+ reason='Linux/SMP only',
+ condition=(not (
+ os.uname()[0] == 'Linux' and
+ multiprocessing.cpu_count() >= 4
+ ))
+)
+class LinuxPolicyTest(testlib.TestCase):
+ klass = ansible_mitogen.affinity.LinuxPolicy
+
+ def setUp(self):
+ self.policy = self.klass()
+
+ def _get_cpus(self, path='/proc/self/status'):
+ fp = open(path)
+ try:
+ for line in fp:
+ if line.startswith('Cpus_allowed'):
+ return int(line.split()[1], 16)
+ finally:
+ fp.close()
+
+ def test_set_clear(self):
+ before = self._get_cpus()
+ self.policy._set_cpu(3)
+ self.assertEquals(self._get_cpus(), 1 << 3)
+ self.policy._clear()
+ self.assertEquals(self._get_cpus(), before)
+
+ def test_clear_on_popen(self):
+ tf = tempfile.NamedTemporaryFile()
+ try:
+ before = self._get_cpus()
+ self.policy._set_cpu(3)
+ my_cpu = self._get_cpus()
+
+ pid = mitogen.parent.detach_popen(
+ args=['cp', '/proc/self/status', tf.name]
+ )
+ os.waitpid(pid, 0)
+
+ his_cpu = self._get_cpus(tf.name)
+ self.assertNotEquals(my_cpu, his_cpu)
+ self.policy._clear()
+ finally:
+ tf.close()
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/ansible/tests/connection_test.py b/tests/ansible/tests/connection_test.py
new file mode 100644
index 00000000..401cbe9e
--- /dev/null
+++ b/tests/ansible/tests/connection_test.py
@@ -0,0 +1,135 @@
+
+from __future__ import absolute_import
+import os
+import os.path
+import subprocess
+import tempfile
+import time
+
+import unittest2
+
+import mock
+import ansible.errors
+import ansible.playbook.play_context
+
+import mitogen.core
+import ansible_mitogen.connection
+import ansible_mitogen.plugins.connection.mitogen_local
+import ansible_mitogen.process
+import testlib
+
+
+LOGGER_NAME = ansible_mitogen.target.LOG.name
+
+
+# TODO: fixtureize
+import mitogen.utils
+mitogen.utils.log_to_file()
+ansible_mitogen.process.MuxProcess.start(_init_logging=False)
+
+
+class OptionalIntTest(unittest2.TestCase):
+ func = staticmethod(ansible_mitogen.connection.optional_int)
+
+ def test_already_int(self):
+ self.assertEquals(0, self.func(0))
+ self.assertEquals(1, self.func(1))
+ self.assertEquals(-1, self.func(-1))
+
+ def test_is_string(self):
+ self.assertEquals(0, self.func("0"))
+ self.assertEquals(1, self.func("1"))
+ self.assertEquals(-1, self.func("-1"))
+
+ def test_is_none(self):
+ self.assertEquals(None, self.func(None))
+
+ def test_is_junk(self):
+ self.assertEquals(None, self.func({1:2}))
+
+
+class ConnectionMixin(object):
+ klass = ansible_mitogen.plugins.connection.mitogen_local.Connection
+
+ def make_connection(self):
+ play_context = ansible.playbook.play_context.PlayContext()
+ return self.klass(play_context, new_stdin=False)
+
+ def wait_for_completion(self):
+ # put_data() is asynchronous, must wait for operation to happen. Do
+ # that by making RPC for some junk that must run on the thread once op
+ # completes.
+ self.conn.get_chain().call(os.getpid)
+
+ def setUp(self):
+ super(ConnectionMixin, self).setUp()
+ self.conn = self.make_connection()
+
+ def tearDown(self):
+ self.conn.close()
+ super(ConnectionMixin, self).tearDown()
+
+
+class PutDataTest(ConnectionMixin, unittest2.TestCase):
+ def test_out_path(self):
+ path = tempfile.mktemp(prefix='mitotest')
+ contents = mitogen.core.b('contents')
+
+ self.conn.put_data(path, contents)
+ self.wait_for_completion()
+ self.assertEquals(contents, open(path, 'rb').read())
+ os.unlink(path)
+
+ def test_mode(self):
+ path = tempfile.mktemp(prefix='mitotest')
+ contents = mitogen.core.b('contents')
+
+ self.conn.put_data(path, contents, mode=int('0123', 8))
+ self.wait_for_completion()
+ st = os.stat(path)
+ self.assertEquals(int('0123', 8), st.st_mode & int('0777', 8))
+ os.unlink(path)
+
+
+class PutFileTest(ConnectionMixin, unittest2.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(PutFileTest, cls).setUpClass()
+ cls.big_path = tempfile.mktemp(prefix='mitotestbig')
+ fp = open(cls.big_path, 'w')
+ try:
+ fp.write('x'*1048576)
+ finally:
+ fp.close()
+
+ @classmethod
+ def tearDownClass(cls):
+ os.unlink(cls.big_path)
+ super(PutFileTest, cls).tearDownClass()
+
+ def test_out_path_tiny(self):
+ path = tempfile.mktemp(prefix='mitotest')
+ self.conn.put_file(in_path=__file__, out_path=path)
+ self.wait_for_completion()
+ self.assertEquals(open(path, 'rb').read(),
+ open(__file__, 'rb').read())
+
+ os.unlink(path)
+
+ def test_out_path_big(self):
+ path = tempfile.mktemp(prefix='mitotest')
+ self.conn.put_file(in_path=self.big_path, out_path=path)
+ self.wait_for_completion()
+ self.assertEquals(open(path, 'rb').read(),
+ open(self.big_path, 'rb').read())
+ #self._compare_times_modes(path, __file__)
+ os.unlink(path)
+
+ def test_big_in_path_not_found(self):
+ path = tempfile.mktemp(prefix='mitotest')
+ self.assertRaises(ansible.errors.AnsibleFileNotFound,
+ lambda: self.conn.put_file(in_path='/nonexistent', out_path=path))
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/ansible/tests/target_test.py b/tests/ansible/tests/target_test.py
index e3d59433..7d6c0b46 100644
--- a/tests/ansible/tests/target_test.py
+++ b/tests/ansible/tests/target_test.py
@@ -28,10 +28,10 @@ class ApplyModeSpecTest(unittest2.TestCase):
def test_simple(self):
spec = 'u+rwx,go=x'
- self.assertEquals(0711, self.func(spec, 0))
+ self.assertEquals(int('0711', 8), self.func(spec, 0))
spec = 'g-rw'
- self.assertEquals(0717, self.func(spec, 0777))
+ self.assertEquals(int('0717', 8), self.func(spec, int('0777', 8)))
class IsGoodTempDirTest(unittest2.TestCase):
diff --git a/tests/bench/README.md b/tests/bench/README.md
new file mode 100644
index 00000000..0ef27df3
--- /dev/null
+++ b/tests/bench/README.md
@@ -0,0 +1,5 @@
+
+# tests/bench/
+
+Various manually executed scripts to aid benchmarking, or trigger old
+performance problems.
diff --git a/tests/bench/large_messages.py b/tests/bench/large_messages.py
new file mode 100644
index 00000000..24220023
--- /dev/null
+++ b/tests/bench/large_messages.py
@@ -0,0 +1,28 @@
+
+# Verify _receive_one() quadratic behaviour fixed.
+
+import subprocess
+import time
+import socket
+import mitogen
+
+
+@mitogen.main()
+def main(router):
+ c = router.fork()
+
+ n = 1048576 * 127
+ s = ' ' * n
+ print('bytes in %.2fMiB string...' % (n/1048576.0),)
+
+ t0 = time.time()
+ for x in range(10):
+ tt0 = time.time()
+ assert n == c.call(len, s)
+ print('took %dms' % (1000 * (time.time() - tt0),))
+ t1 = time.time()
+ print('total %dms / %dms avg / %.2fMiB/sec' % (
+ 1000 * (t1 - t0),
+ (1000 * (t1 - t0)) / (x + 1),
+ ((n * (x + 1)) / (t1 - t0)) / 1048576.0,
+ ))
diff --git a/tests/bench/latch_roundtrip.py b/tests/bench/latch_roundtrip.py
new file mode 100644
index 00000000..49314fb9
--- /dev/null
+++ b/tests/bench/latch_roundtrip.py
@@ -0,0 +1,40 @@
+"""
+Measure latency of IPC between two local threads.
+"""
+
+import threading
+import time
+
+import mitogen
+import mitogen.utils
+import ansible_mitogen.affinity
+
+mitogen.utils.setup_gil()
+ansible_mitogen.affinity.policy.assign_worker()
+
+X = 20000
+
+def flip_flop(ready, inp, out):
+ ready.put(None)
+ for x in xrange(X):
+ inp.get()
+ out.put(None)
+
+
+ready = mitogen.core.Latch()
+l1 = mitogen.core.Latch()
+l2 = mitogen.core.Latch()
+
+t1 = threading.Thread(target=flip_flop, args=(ready, l1, l2))
+t2 = threading.Thread(target=flip_flop, args=(ready, l2, l1))
+t1.start()
+t2.start()
+
+ready.get()
+ready.get()
+
+t0 = time.time()
+l1.put(None)
+t1.join()
+t2.join()
+print('++', int(1e6 * ((time.time() - t0) / (1.0+X))), 'usec')
diff --git a/tests/bench/linux_record_cpu_net.sh b/tests/bench/linux_record_cpu_net.sh
index bc5c44ee..d125e467 100755
--- a/tests/bench/linux_record_cpu_net.sh
+++ b/tests/bench/linux_record_cpu_net.sh
@@ -6,7 +6,19 @@
#
[ ! "$1" ] && exit 1
-sudo tcpdump -w $1-out.cap -s 0 host k1.botanicus.net &
-date +%s.%N > $1-task-clock.csv
-perf stat -x, -I 25 -e task-clock --append -o $1-task-clock.csv ansible-playbook run_hostname_100_times.yml
+name="$1"; shift
+
+
+sudo tcpdump -i any -w $name-net.pcap -s 66 port 22 or port 9122 &
+sleep 0.5
+
+perf stat -x, -I 100 \
+ -e branches \
+ -e instructions \
+ -e task-clock \
+ -e context-switches \
+ -e page-faults \
+ -e cpu-migrations \
+ -o $name-perf.csv "$@"
+pkill -f ssh:; sleep 0.1
sudo pkill -f tcpdump
diff --git a/tests/bench/local.py b/tests/bench/local.py
index a4ec2428..2808d803 100644
--- a/tests/bench/local.py
+++ b/tests/bench/local.py
@@ -2,19 +2,23 @@
Measure latency of .local() setup.
"""
-import mitogen
import time
+import mitogen
+import mitogen.utils
+import ansible_mitogen.affinity
+
+
+mitogen.utils.setup_gil()
+#ansible_mitogen.affinity.policy.assign_worker()
+
@mitogen.main()
def main(router):
- for x in range(1000):
+ t0=time.time()
+ for x in range(100):
t = time.time()
f = router.local()# debug=True)
tt = time.time()
print(x, 1000 * (tt - t))
-
- print(f)
- print('EEK', f.call(socket.gethostname))
- print('MY PID', os.getpid())
- print('EEKERY', f.call(os.getpid))
+ print('%.03f ms' % (1000 * (time.time() - t0) / (1.0 + x)))
diff --git a/tests/bench/roundtrip.py b/tests/bench/roundtrip.py
index 13b9413d..8d86d75b 100644
--- a/tests/bench/roundtrip.py
+++ b/tests/bench/roundtrip.py
@@ -2,16 +2,28 @@
Measure latency of local RPC.
"""
-import mitogen
import time
+import mitogen
+import mitogen.utils
+import ansible_mitogen.affinity
+
+mitogen.utils.setup_gil()
+ansible_mitogen.affinity.policy.assign_worker()
+
+try:
+ xrange
+except NameError:
+ xrange = range
+
def do_nothing():
pass
@mitogen.main()
def main(router):
f = router.fork()
+ f.call(do_nothing)
t0 = time.time()
- for x in xrange(10000):
+ for x in xrange(20000):
f.call(do_nothing)
- print '++', int(1e6 * ((time.time() - t0) / (1.0+x))), 'usec'
+ print('++', int(1e6 * ((time.time() - t0) / (1.0+x))), 'usec')
diff --git a/tests/bench/service.py b/tests/bench/service.py
new file mode 100644
index 00000000..6d866b5c
--- /dev/null
+++ b/tests/bench/service.py
@@ -0,0 +1,23 @@
+"""
+Measure latency of local service RPC.
+"""
+
+import time
+
+import mitogen.service
+import mitogen
+
+
+class MyService(mitogen.service.Service):
+ @mitogen.service.expose(policy=mitogen.service.AllowParents())
+ def ping(self):
+ return 'pong'
+
+
+@mitogen.main()
+def main(router):
+ f = router.fork()
+ t0 = time.time()
+ for x in range(1000):
+ f.call_service(service_name=MyService, method_name='ping')
+ print('++', int(1e6 * ((time.time() - t0) / (1.0+x))), 'usec')
diff --git a/tests/bench/throughput.py b/tests/bench/throughput.py
new file mode 100644
index 00000000..896ee9ac
--- /dev/null
+++ b/tests/bench/throughput.py
@@ -0,0 +1,74 @@
+# Verify throughput over sudo and SSH at various compression levels.
+
+import os
+import random
+import socket
+import subprocess
+import tempfile
+import time
+
+import mitogen
+import mitogen.service
+
+
+def prepare():
+ pass
+
+
+def transfer(context, path):
+ fp = open('/dev/null', 'wb')
+ mitogen.service.FileService.get(context, path, fp)
+ fp.close()
+
+
+def fill_with_random(fp, size):
+ n = 0
+ s = os.urandom(1048576*16)
+ while n < size:
+ fp.write(s)
+ n += len(s)
+
+
+def run_test(router, fp, s, context):
+ fp.seek(0, 2)
+ size = fp.tell()
+ print('Testing %s...' % (s,))
+ context.call(prepare)
+ t0 = time.time()
+ context.call(transfer, router.myself(), fp.name)
+ t1 = time.time()
+ print('%s took %.2f ms to transfer %.2f MiB, %.2f MiB/s' % (
+ s, 1000 * (t1 - t0), size / 1048576.0,
+ (size / (t1 - t0) / 1048576.0),
+ ))
+
+
+@mitogen.main()
+def main(router):
+ bigfile = tempfile.NamedTemporaryFile()
+ fill_with_random(bigfile, 1048576*512)
+
+ file_service = mitogen.service.FileService(router)
+ pool = mitogen.service.Pool(router, ())
+ file_service.register(bigfile.name)
+ pool.add(file_service)
+ try:
+ context = router.local()
+ run_test(router, bigfile, 'local()', context)
+ context.shutdown(wait=True)
+
+ context = router.sudo()
+ run_test(router, bigfile, 'sudo()', context)
+ context.shutdown(wait=True)
+
+ context = router.ssh(hostname='localhost', compression=False)
+ run_test(router, bigfile, 'ssh(compression=False)', context)
+ context.shutdown(wait=True)
+
+ context = router.ssh(hostname='localhost', compression=True)
+ run_test(router, bigfile, 'ssh(compression=True)', context)
+ context.shutdown(wait=True)
+ finally:
+ pool.stop()
+ bigfile.close()
+
diff --git a/tests/broker_test.py b/tests/broker_test.py
new file mode 100644
index 00000000..23839a54
--- /dev/null
+++ b/tests/broker_test.py
@@ -0,0 +1,72 @@
+
+import threading
+
+import mock
+import unittest2
+
+import testlib
+
+import mitogen.core
+
+
+class ShutdownTest(testlib.TestCase):
+ klass = mitogen.core.Broker
+
+ def test_poller_closed(self):
+ broker = self.klass()
+ actual_close = broker.poller.close
+ broker.poller.close = mock.Mock()
+ broker.shutdown()
+ broker.join()
+ self.assertEquals(1, len(broker.poller.close.mock_calls))
+ actual_close()
+
+
+class DeferTest(testlib.TestCase):
+ klass = mitogen.core.Broker
+
+ def test_defer(self):
+ latch = mitogen.core.Latch()
+ broker = self.klass()
+ try:
+ broker.defer(lambda: latch.put(123))
+ self.assertEquals(123, latch.get())
+ finally:
+ broker.shutdown()
+ broker.join()
+
+ def test_defer_after_shutdown(self):
+ latch = mitogen.core.Latch()
+ broker = self.klass()
+ broker.shutdown()
+ broker.join()
+
+ e = self.assertRaises(mitogen.core.Error,
+ lambda: broker.defer(lambda: latch.put(123)))
+ self.assertEquals(e.args[0], mitogen.core.Waker.broker_shutdown_msg)
+
+
+class DeferSyncTest(testlib.TestCase):
+ klass = mitogen.core.Broker
+
+ def test_okay(self):
+ broker = self.klass()
+ try:
+ th = broker.defer_sync(lambda: threading.currentThread())
+ self.assertEquals(th, broker._thread)
+ finally:
+ broker.shutdown()
+ broker.join()
+
+ def test_exception(self):
+ broker = self.klass()
+ try:
+ self.assertRaises(ValueError,
+ broker.defer_sync, lambda: int('dave'))
+ finally:
+ broker.shutdown()
+ broker.join()
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/call_error_test.py b/tests/call_error_test.py
index 447a80a9..00ff0ed9 100644
--- a/tests/call_error_test.py
+++ b/tests/call_error_test.py
@@ -1,4 +1,3 @@
-import os
import pickle
import sys
@@ -10,28 +9,33 @@ import testlib
import plain_old_module
-class ConstructorTest(unittest2.TestCase):
+class ConstructorTest(testlib.TestCase):
klass = mitogen.core.CallError
def test_string_noargs(self):
e = self.klass('%s%s')
self.assertEquals(e.args[0], '%s%s')
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_string_args(self):
e = self.klass('%s%s', 1, 1)
self.assertEquals(e.args[0], '11')
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_from_exc(self):
ve = plain_old_module.MyError('eek')
e = self.klass(ve)
self.assertEquals(e.args[0], 'plain_old_module.MyError: eek')
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_form_base_exc(self):
ve = SystemExit('eek')
e = self.klass(ve)
+ cls = ve.__class__
self.assertEquals(e.args[0],
# varies across 2/3.
- '%s.%s: eek' % (type(ve).__module__, type(ve).__name__))
+ '%s.%s: eek' % (cls.__module__, cls.__name__))
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
def test_from_exc_tb(self):
try:
@@ -41,10 +45,41 @@ class ConstructorTest(unittest2.TestCase):
e = self.klass(ve)
self.assertTrue(e.args[0].startswith('plain_old_module.MyError: eek'))
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
self.assertTrue('test_from_exc_tb' in e.args[0])
+ def test_bytestring_conversion(self):
+ e = self.klass(mitogen.core.b('bytes'))
+ self.assertEquals(u'bytes', e.args[0])
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
-class PickleTest(unittest2.TestCase):
+ def test_reduce(self):
+ e = self.klass('eek')
+ func, (arg,) = e.__reduce__()
+ self.assertTrue(func is mitogen.core._unpickle_call_error)
+ self.assertEquals(arg, e.args[0])
+
+
+class UnpickleCallErrorTest(testlib.TestCase):
+ func = staticmethod(mitogen.core._unpickle_call_error)
+
+ def test_not_unicode(self):
+ self.assertRaises(TypeError,
+ lambda: self.func(mitogen.core.b('bad')))
+
+ def test_oversized(self):
+ self.assertRaises(TypeError,
+ lambda: self.func(mitogen.core.b('b'*10001)))
+
+ def test_reify(self):
+ e = self.func(u'some error')
+ self.assertEquals(mitogen.core.CallError, e.__class__)
+ self.assertEquals(1, len(e.args))
+ self.assertEquals(mitogen.core.UnicodeType, type(e.args[0]))
+ self.assertEquals(u'some error', e.args[0])
+
+
+class PickleTest(testlib.TestCase):
klass = mitogen.core.CallError
def test_string_noargs(self):
diff --git a/tests/call_function_test.py b/tests/call_function_test.py
index dc9a2298..9e821b27 100644
--- a/tests/call_function_test.py
+++ b/tests/call_function_test.py
@@ -6,6 +6,7 @@ import unittest2
import mitogen.core
import mitogen.parent
import mitogen.master
+from mitogen.core import str_partition
import testlib
import plain_old_module
@@ -50,7 +51,7 @@ class CallFunctionTest(testlib.RouterMixin, testlib.TestCase):
def setUp(self):
super(CallFunctionTest, self).setUp()
- self.local = self.router.fork()
+ self.local = self.router.local()
def test_succeeds(self):
self.assertEqual(3, self.local.call(function_that_adds_numbers, 1, 2))
@@ -65,11 +66,11 @@ class CallFunctionTest(testlib.RouterMixin, testlib.TestCase):
exc = self.assertRaises(mitogen.core.CallError,
lambda: self.local.call(function_that_fails))
- s = str(exc)
- etype, _, s = s.partition(': ')
- self.assertEqual(etype, 'plain_old_module.MyError')
+ s = mitogen.core.to_text(exc)
+ etype, _, s = str_partition(s, u': ')
+ self.assertEqual(etype, u'plain_old_module.MyError')
- msg, _, s = s.partition('\n')
+ msg, _, s = str_partition(s, u'\n')
self.assertEqual(msg, 'exception text')
# Traceback
@@ -90,20 +91,22 @@ class CallFunctionTest(testlib.RouterMixin, testlib.TestCase):
self.broker.defer(stream.on_disconnect, self.broker)
exc = self.assertRaises(mitogen.core.ChannelError,
lambda: recv.get())
- self.assertEquals(exc.args[0], mitogen.core.ChannelError.local_msg)
+ self.assertEquals(exc.args[0], self.router.respondent_disconnect_msg)
def test_aborted_on_local_broker_shutdown(self):
stream = self.router._stream_by_id[self.local.context_id]
recv = self.local.call_async(time.sleep, 120)
time.sleep(0.05) # Ensure GIL is released
self.broker.shutdown()
+ self.broker_shutdown = True
exc = self.assertRaises(mitogen.core.ChannelError,
lambda: recv.get())
- self.assertEquals(exc.args[0], mitogen.core.ChannelError.local_msg)
+ self.assertEquals(exc.args[0], self.router.respondent_disconnect_msg)
def test_accepts_returns_context(self):
context = self.local.call(func_returns_arg, self.local)
- self.assertIsNot(context, self.local)
+ # Unpickling now deduplicates Context instances.
+ self.assertIs(context, self.local)
self.assertEqual(context.context_id, self.local.context_id)
self.assertEqual(context.name, self.local.name)
@@ -119,13 +122,13 @@ class CallFunctionTest(testlib.RouterMixin, testlib.TestCase):
lambda: recv.get().unpickle())
-class ChainTest(testlib.RouterMixin, testlib.TestCase):
+class CallChainTest(testlib.RouterMixin, testlib.TestCase):
# Verify mitogen_chain functionality.
klass = mitogen.parent.CallChain
def setUp(self):
- super(ChainTest, self).setUp()
- self.local = self.router.fork()
+ super(CallChainTest, self).setUp()
+ self.local = self.router.local()
def test_subsequent_calls_produce_same_error(self):
chain = self.klass(self.local, pipelined=True)
@@ -154,5 +157,34 @@ class ChainTest(testlib.RouterMixin, testlib.TestCase):
self.assertEquals('x3', c1.call(func_returns_arg, 'x3'))
+class UnsupportedCallablesTest(testlib.RouterMixin, testlib.TestCase):
+ # Verify mitogen_chain functionality.
+ klass = mitogen.parent.CallChain
+
+ def setUp(self):
+ super(UnsupportedCallablesTest, self).setUp()
+ self.local = self.router.local()
+
+ def test_closures_unsuppored(self):
+ a = 1
+ closure = lambda: a
+ e = self.assertRaises(TypeError,
+ lambda: self.local.call(closure))
+ self.assertEquals(e.args[0], self.klass.closures_msg)
+
+ def test_lambda_unsupported(self):
+ lam = lambda: None
+ e = self.assertRaises(TypeError,
+ lambda: self.local.call(lam))
+ self.assertEquals(e.args[0], self.klass.lambda_msg)
+
+ def test_instance_method_unsupported(self):
+ class X:
+ def x(): pass
+ e = self.assertRaises(TypeError,
+ lambda: self.local.call(X().x))
+ self.assertEquals(e.args[0], self.klass.method_msg)
+
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/data/.gitattributes b/tests/data/.gitattributes
new file mode 100644
index 00000000..5eb6edd9
--- /dev/null
+++ b/tests/data/.gitattributes
@@ -0,0 +1 @@
+*.tar.bz2 filter=lfs diff=lfs merge=lfs -text
diff --git a/tests/data/docker/001-mitogen.sudo b/tests/data/docker/001-mitogen.sudo
deleted file mode 100644
index 71e20e6a..00000000
--- a/tests/data/docker/001-mitogen.sudo
+++ /dev/null
@@ -1,9 +0,0 @@
-
-# https://www.toofishes.net/blog/trouble-sudoers-or-last-entry-wins/
-%mitogen__sudo_nopw ALL=(ALL:ALL) NOPASSWD:ALL
-mitogen__has_sudo_nopw ALL = (mitogen__pw_required) ALL
-mitogen__has_sudo_nopw ALL = (mitogen__require_tty_pw_required) ALL
-
-Defaults>mitogen__pw_required targetpw
-Defaults>mitogen__require_tty requiretty
-Defaults>mitogen__require_tty_pw_required requiretty,targetpw
diff --git a/tests/data/docker/mitogen__has_sudo_pubkey.key.pub b/tests/data/docker/mitogen__has_sudo_pubkey.key.pub
index 245ce379..b132d993 100644
--- a/tests/data/docker/mitogen__has_sudo_pubkey.key.pub
+++ b/tests/data/docker/mitogen__has_sudo_pubkey.key.pub
@@ -1 +1 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkMz7vE4piReKXBNarhGhzfMr6g7capaUHllxThmtm4ndlM3kbiEFvxI9P7s17T50CycfesJf5/1bmLxACROtdMGrgBrCAAGwEy2qnCNPhqrLpd2amoLUkBcthmiaTVmU+eMMHm8ubxh0qEauXOaaVqXTGcK1bGMsufLYGr0lv5RE2AErg9jPYkh6qT0CpxGtRmfbYubFAIunP5gxHgiOQrD7Yzs2NFDqPq9rRuvRMGX/XLpDurFm9x16LTx1fDSU1aqmu88QMJtXoMyPlHCqd5x/FdZ1KorR79LB+H/cptB1/ND1geZv5OAD8ydCc3nNGi8hiyPobb6jOX68agXyX dmw@Eldil.local
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkMz7vE4piReKXBNarhGhzfMr6g7capaUHllxThmtm4ndlM3kbiEFvxI9P7s17T50CycfesJf5/1bmLxACROtdMGrgBrCAAGwEy2qnCNPhqrLpd2amoLUkBcthmiaTVmU+eMMHm8ubxh0qEauXOaaVqXTGcK1bGMsufLYGr0lv5RE2AErg9jPYkh6qT0CpxGtRmfbYubFAIunP5gxHgiOQrD7Yzs2NFDqPq9rRuvRMGX/XLpDurFm9x16LTx1fDSU1aqmu88QMJtXoMyPlHCqd5x/FdZ1KorR79LB+H/cptB1/ND1geZv5OAD8ydCc3nNGi8hiyPobb6jOX68agXyX mitogen__has_sudo_pubkey@testdata
diff --git a/tests/data/env_wrapper.sh b/tests/data/env_wrapper.sh
deleted file mode 100755
index afb523f0..00000000
--- a/tests/data/env_wrapper.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-# This script exists to test the behavior of Stream.python_path being set to a
-# list. It sets an environmnt variable that we can detect, then executes any
-# arguments passed to it.
-export EXECUTED_VIA_ENV_WRAPPER=1
-if [ "${1:0:1}" == "-" ]; then
- exec "$PYTHON" "$@"
-else
- export ENV_WRAPPER_FIRST_ARG="$1"
- shift
- exec "$@"
-fi
diff --git a/tests/data/iter_read_generator.py b/tests/data/iter_read_generator.py
new file mode 100755
index 00000000..3fd3c08c
--- /dev/null
+++ b/tests/data/iter_read_generator.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+# I produce text every 100ms, for testing mitogen.core.iter_read()
+
+import sys
+import time
+
+
+i = 0
+while True:
+ i += 1
+ sys.stdout.write(str(i))
+ sys.stdout.flush()
+ time.sleep(0.1)
diff --git a/tests/data/iter_read_generator.sh b/tests/data/iter_read_generator.sh
deleted file mode 100755
index 3aa6d6ac..00000000
--- a/tests/data/iter_read_generator.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-# I produce text every 100ms, for testing mitogen.core.iter_read()
-
-i=0
-
-while :; do
- i=$(($i + 1))
- echo "$i"
- sleep 0.1
-done
diff --git a/tests/data/main_with_no_exec_guard.py b/tests/data/main_with_no_exec_guard.py
new file mode 100644
index 00000000..153e4743
--- /dev/null
+++ b/tests/data/main_with_no_exec_guard.py
@@ -0,0 +1,12 @@
+
+import logging
+import mitogen.master
+
+def foo():
+ pass
+
+logging.basicConfig(level=logging.INFO)
+router = mitogen.master.Router()
+
+l = router.local()
+l.call(foo)
diff --git a/tests/data/pkg_like_plumbum/__init__.py b/tests/data/pkg_like_plumbum/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/data/pkg_like_plumbum/colors.py b/tests/data/pkg_like_plumbum/colors.py
new file mode 100644
index 00000000..bff19555
--- /dev/null
+++ b/tests/data/pkg_like_plumbum/colors.py
@@ -0,0 +1,15 @@
+
+# coding=utf-8
+
+import sys
+
+
+# £
+
+class EvilObject(object):
+ """
+ Wild cackles! I have come to confuse perplex your importer with rainbows!
+ """
+
+sys.modules[__name__] = EvilObject()
+
diff --git a/tests/data/plain_old_module.py b/tests/data/plain_old_module.py
index 49294464..608f27a5 100755
--- a/tests/data/plain_old_module.py
+++ b/tests/data/plain_old_module.py
@@ -12,7 +12,7 @@ class MyError(Exception):
def get_sentinel_value():
# Some proof we're even talking to the mitogen-test Docker image
- return open('/etc/sentinel').read().decode()
+ return open('/etc/sentinel', 'rb').read().decode()
def add(x, y):
diff --git a/tests/data/python_never_responds.sh b/tests/data/python_never_responds.py
similarity index 51%
rename from tests/data/python_never_responds.sh
rename to tests/data/python_never_responds.py
index f1ad5787..449d8565 100755
--- a/tests/data/python_never_responds.sh
+++ b/tests/data/python_never_responds.py
@@ -1,3 +1,7 @@
-#!/bin/bash
+#!/usr/bin/python
# I am a Python interpreter that sits idle until the connection times out.
-exec -a mitogen-tests-python-never-responds.sh sleep 86400
+
+import time
+
+while True:
+ time.sleep(86400)
diff --git a/tests/data/simple_pkg/ping.py b/tests/data/simple_pkg/ping.py
new file mode 100644
index 00000000..722f7b87
--- /dev/null
+++ b/tests/data/simple_pkg/ping.py
@@ -0,0 +1,6 @@
+
+
+def ping(*args):
+ return args
+
+
diff --git a/tests/data/six_brokenpkg/__init__.py b/tests/data/six_brokenpkg/__init__.py
index 42e633c7..e5944b83 100644
--- a/tests/data/six_brokenpkg/__init__.py
+++ b/tests/data/six_brokenpkg/__init__.py
@@ -29,9 +29,9 @@ import os.path
try:
import six as _system_six
- print('six_brokenpkg: using system six:', _system_six)
+ #print('six_brokenpkg: using system six:', _system_six)
except ImportError:
- print('six_brokenpkg: no system six available')
+ #print('six_brokenpkg: no system six available')
_system_six = None
if _system_six:
diff --git a/tests/data/stubs/README.md b/tests/data/stubs/README.md
new file mode 100644
index 00000000..02de6456
--- /dev/null
+++ b/tests/data/stubs/README.md
@@ -0,0 +1,5 @@
+
+# tests/data/stubs/
+
+Dummy implementations of various third party tools that just spawn local Python
+interpreters. Used to roughly test the tools' associated Mitogen classes.
diff --git a/tests/data/stubs/stub-doas.py b/tests/data/stubs/stub-doas.py
new file mode 100755
index 00000000..ca929bc0
--- /dev/null
+++ b/tests/data/stubs/stub-doas.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+import json
+import os
+import subprocess
+import sys
+
+os.environ['ORIGINAL_ARGV'] = json.dumps(sys.argv)
+os.environ['THIS_IS_STUB_DOAS'] = '1'
+
+# This must be a child process and not exec() since Mitogen replaces its stderr
+# descriptor, causing the last user of the slave PTY to close it, resulting in
+# the master side indicating EIO.
+subprocess.check_call(sys.argv[sys.argv.index('--') + 1:])
diff --git a/tests/data/stubs/stub-docker.py b/tests/data/stubs/stub-docker.py
new file mode 100755
index 00000000..341cc818
--- /dev/null
+++ b/tests/data/stubs/stub-docker.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+import sys
+import os
+
+os.environ['ORIGINAL_ARGV'] = repr(sys.argv)
+os.execv(sys.executable, sys.argv[sys.argv.index('-c') - 1:])
diff --git a/tests/data/fake_lxc_attach.py b/tests/data/stubs/stub-kubectl.py
similarity index 78%
rename from tests/data/fake_lxc_attach.py
rename to tests/data/stubs/stub-kubectl.py
index 2fedb961..16f7e460 100755
--- a/tests/data/fake_lxc_attach.py
+++ b/tests/data/stubs/stub-kubectl.py
@@ -4,4 +4,5 @@ import sys
import os
os.environ['ORIGINAL_ARGV'] = repr(sys.argv)
+os.environ['THIS_IS_STUB_KUBECTL'] = '1'
os.execv(sys.executable, sys.argv[sys.argv.index('--') + 1:])
diff --git a/tests/data/fake_lxc.py b/tests/data/stubs/stub-lxc-attach.py
similarity index 77%
rename from tests/data/fake_lxc.py
rename to tests/data/stubs/stub-lxc-attach.py
index 2fedb961..5263d362 100755
--- a/tests/data/fake_lxc.py
+++ b/tests/data/stubs/stub-lxc-attach.py
@@ -4,4 +4,5 @@ import sys
import os
os.environ['ORIGINAL_ARGV'] = repr(sys.argv)
+os.environ['THIS_IS_STUB_LXC_ATTACH'] = '1'
os.execv(sys.executable, sys.argv[sys.argv.index('--') + 1:])
diff --git a/tests/data/stubs/stub-lxc-info.py b/tests/data/stubs/stub-lxc-info.py
new file mode 100755
index 00000000..480bf266
--- /dev/null
+++ b/tests/data/stubs/stub-lxc-info.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+# Mainly for use in stubconnections/kubectl.yml
+
+print('PID: 1')
diff --git a/tests/data/stubs/stub-lxc.py b/tests/data/stubs/stub-lxc.py
new file mode 100755
index 00000000..9d14090a
--- /dev/null
+++ b/tests/data/stubs/stub-lxc.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+
+import sys
+import os
+
+# setns.py fetching leader PID.
+if sys.argv[1] == 'info':
+ print('Pid: 1')
+ sys.exit(0)
+
+os.environ['ORIGINAL_ARGV'] = repr(sys.argv)
+os.environ['THIS_IS_STUB_LXC'] = '1'
+os.execv(sys.executable, sys.argv[sys.argv.index('--') + 1:])
diff --git a/tests/data/stubs/stub-python.py b/tests/data/stubs/stub-python.py
new file mode 100755
index 00000000..d9239c2b
--- /dev/null
+++ b/tests/data/stubs/stub-python.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+import json
+import os
+import subprocess
+import sys
+
+os.environ['ORIGINAL_ARGV'] = json.dumps(sys.argv)
+os.environ['THIS_IS_STUB_PYTHON'] = '1'
+
+if sys.argv[1].startswith('-'):
+ os.execvp(sys.executable, [sys.executable] + sys.argv[1:])
+else:
+ os.environ['STUB_PYTHON_FIRST_ARG'] = sys.argv.pop(1)
+ os.execvp(sys.executable, sys.argv[1:])
diff --git a/tests/data/fakessh.py b/tests/data/stubs/stub-ssh.py
similarity index 78%
rename from tests/data/fakessh.py
rename to tests/data/stubs/stub-ssh.py
index 8df5aa39..80c02835 100755
--- a/tests/data/fakessh.py
+++ b/tests/data/stubs/stub-ssh.py
@@ -15,6 +15,9 @@ Are you sure you want to continue connecting (yes/no)?
HOST_KEY_STRICT_MSG = """Host key verification failed.\n"""
+PERMDENIED_CLASSIC_MSG = 'Permission denied (publickey,password)\n'
+PERMDENIED_75_MSG = 'chicken@nandos.com: permission denied (publickey,password)\n'
+
def tty(msg):
fp = open('/dev/tty', 'wb', 0)
@@ -37,13 +40,23 @@ def confirm(msg):
fp.close()
-if os.getenv('FAKESSH_MODE') == 'ask':
- assert 'y\n' == confirm(HOST_KEY_ASK_MSG)
+mode = os.getenv('STUBSSH_MODE')
+
+if mode == 'ask':
+ assert 'yes\n' == confirm(HOST_KEY_ASK_MSG)
-if os.getenv('FAKESSH_MODE') == 'strict':
+elif mode == 'strict':
stderr(HOST_KEY_STRICT_MSG)
sys.exit(255)
+elif mode == 'permdenied_classic':
+ stderr(PERMDENIED_CLASSIC_MSG)
+ sys.exit(255)
+
+elif mode == 'permdenied_75':
+ stderr(PERMDENIED_75_MSG)
+ sys.exit(255)
+
#
# Set an env var if stderr was a TTY to make ssh_test tests easier to write.
diff --git a/tests/data/stubs/stub-su.py b/tests/data/stubs/stub-su.py
new file mode 100755
index 00000000..c32c91de
--- /dev/null
+++ b/tests/data/stubs/stub-su.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+import json
+import os
+import subprocess
+import sys
+
+os.environ['ORIGINAL_ARGV'] = json.dumps(sys.argv)
+os.environ['THIS_IS_STUB_SU'] = '1'
+
+# This must be a child process and not exec() since Mitogen replaces its stderr
+# descriptor, causing the last user of the slave PTY to close it, resulting in
+# the master side indicating EIO.
+os.execlp('sh', 'sh', '-c', sys.argv[sys.argv.index('-c') + 1])
diff --git a/tests/data/stubs/stub-sudo.py b/tests/data/stubs/stub-sudo.py
new file mode 100755
index 00000000..a7f2704f
--- /dev/null
+++ b/tests/data/stubs/stub-sudo.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+import json
+import os
+import subprocess
+import sys
+
+os.environ['ORIGINAL_ARGV'] = json.dumps(sys.argv)
+os.environ['THIS_IS_STUB_SUDO'] = '1'
+
+# This must be a child process and not exec() since Mitogen replaces its stderr
+# descriptor, causing the last user of the slave PTY to close it, resulting in
+# the master side indicating EIO.
+subprocess.check_call(sys.argv[sys.argv.index('--') + 1:])
diff --git a/tests/data/ubuntu-python-2.4.6.tar.bz2 b/tests/data/ubuntu-python-2.4.6.tar.bz2
new file mode 100644
index 00000000..8677e26a
--- /dev/null
+++ b/tests/data/ubuntu-python-2.4.6.tar.bz2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:123ddbd9055745d37e8f14bf1c8352541ff4d500e6daa4aa3165e604fb7e8b6a
+size 6176131
diff --git a/tests/data/write_all_consumer.py b/tests/data/write_all_consumer.py
new file mode 100755
index 00000000..4013ccdd
--- /dev/null
+++ b/tests/data/write_all_consumer.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# I consume 65535 bytes every 10ms, for testing mitogen.core.write_all()
+
+import os
+import time
+
+while True:
+ os.read(0, 65535)
+ time.sleep(0.01)
diff --git a/tests/data/write_all_consumer.sh b/tests/data/write_all_consumer.sh
deleted file mode 100755
index e6aaaf72..00000000
--- a/tests/data/write_all_consumer.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-# I consume 65535 bytes every 10ms, for testing mitogen.core.write_all()
-
-while :; do
- read -n 65535
- sleep 0.01
-done
diff --git a/tests/doas_test.py b/tests/doas_test.py
new file mode 100644
index 00000000..0e27c2ab
--- /dev/null
+++ b/tests/doas_test.py
@@ -0,0 +1,31 @@
+
+import os
+
+import mitogen
+import mitogen.parent
+
+import unittest2
+
+import testlib
+
+
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
+ doas_path = testlib.data_path('stubs/stub-doas.py')
+
+ def test_okay(self):
+ context = self.router.doas(
+ doas_path=self.doas_path,
+ username='someuser',
+ )
+ argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
+ self.assertEquals(argv[:4], [
+ self.doas_path,
+ '-u',
+ 'someuser',
+ '--',
+ ])
+ self.assertEquals('1', context.call(os.getenv, 'THIS_IS_STUB_DOAS'))
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/docker_test.py b/tests/docker_test.py
new file mode 100644
index 00000000..49c742ee
--- /dev/null
+++ b/tests/docker_test.py
@@ -0,0 +1,28 @@
+import os
+
+import mitogen
+
+import unittest2
+
+import testlib
+
+
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
+ def test_okay(self):
+ docker_path = testlib.data_path('stubs/stub-docker.py')
+ context = self.router.docker(
+ container='container_name',
+ docker_path=docker_path,
+ )
+ stream = self.router.stream_by_id(context.context_id)
+
+ argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
+ self.assertEquals(argv[0], docker_path)
+ self.assertEquals(argv[1], 'exec')
+ self.assertEquals(argv[2], '--interactive')
+ self.assertEquals(argv[3], 'container_name')
+ self.assertEquals(argv[4], stream.python_path)
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/error_test.py b/tests/error_test.py
new file mode 100644
index 00000000..2eefd567
--- /dev/null
+++ b/tests/error_test.py
@@ -0,0 +1,33 @@
+
+import unittest2
+
+import testlib
+import mitogen.core
+
+
+class ConstructorTest(testlib.TestCase):
+ klass = mitogen.core.Error
+
+ def test_literal_no_format(self):
+ e = self.klass('error')
+ self.assertEquals(e.args[0], 'error')
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
+
+ def test_literal_format_chars_present(self):
+ e = self.klass('error%s')
+ self.assertEquals(e.args[0], 'error%s')
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
+
+ def test_format(self):
+ e = self.klass('error%s', 123)
+ self.assertEquals(e.args[0], 'error123')
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
+
+ def test_bytes_to_unicode(self):
+ e = self.klass(mitogen.core.b('error'))
+ self.assertEquals(e.args[0], 'error')
+ self.assertTrue(isinstance(e.args[0], mitogen.core.UnicodeType))
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/fakessh_test.py b/tests/fakessh_test.py
index c584acfe..e7dde711 100644
--- a/tests/fakessh_test.py
+++ b/tests/fakessh_test.py
@@ -2,7 +2,6 @@
import os
import shutil
-import timeoutcontext
import unittest2
import mitogen.fakessh
@@ -10,8 +9,7 @@ import mitogen.fakessh
import testlib
-class RsyncTest(testlib.DockerMixin, unittest2.TestCase):
- @timeoutcontext.timeout(5)
+class RsyncTest(testlib.DockerMixin, testlib.TestCase):
@unittest2.skip('broken')
def test_rsync_from_master(self):
context = self.docker_ssh_any()
@@ -28,7 +26,6 @@ class RsyncTest(testlib.DockerMixin, unittest2.TestCase):
self.assertTrue(context.call(os.path.exists, '/tmp/data'))
self.assertTrue(context.call(os.path.exists, '/tmp/data/simple_pkg/a.py'))
- @timeoutcontext.timeout(5)
@unittest2.skip('broken')
def test_rsync_between_direct_children(self):
# master -> SSH -> mitogen__has_sudo_pubkey -> rsync(.ssh) -> master ->
diff --git a/tests/file_service_test.py b/tests/file_service_test.py
new file mode 100644
index 00000000..135d8e14
--- /dev/null
+++ b/tests/file_service_test.py
@@ -0,0 +1,109 @@
+
+import unittest2
+
+import mitogen.service
+
+import testlib
+
+
+class FetchTest(testlib.RouterMixin, testlib.TestCase):
+ klass = mitogen.service.FileService
+
+ def replyable_msg(self, **kwargs):
+ recv = mitogen.core.Receiver(self.router, persist=False)
+ msg = mitogen.core.Message(
+ src_id=mitogen.context_id,
+ reply_to=recv.handle,
+ **kwargs
+ )
+ msg.router = self.router
+ return recv, msg
+
+ def test_unauthorized(self):
+ service = self.klass(self.router)
+ recv, msg = self.replyable_msg()
+ service.fetch(
+ path='/etc/shadow',
+ sender=None,
+ msg=msg,
+ )
+ e = self.assertRaises(mitogen.core.CallError,
+ lambda: recv.get().unpickle())
+ expect = service.unregistered_msg % ('/etc/shadow',)
+ self.assertTrue(expect in e.args[0])
+
+ def _validate_response(self, resp):
+ self.assertTrue(isinstance(resp, dict))
+ self.assertEquals('root', resp['owner'])
+ self.assertEquals('root', resp['group'])
+ self.assertTrue(isinstance(resp['mode'], int))
+ self.assertTrue(isinstance(resp['mtime'], float))
+ self.assertTrue(isinstance(resp['atime'], float))
+ self.assertTrue(isinstance(resp['size'], int))
+
+ def test_path_authorized(self):
+ recv = mitogen.core.Receiver(self.router)
+ service = self.klass(self.router)
+ service.register('/etc/passwd')
+ recv, msg = self.replyable_msg()
+ service.fetch(
+ path='/etc/passwd',
+ sender=recv.to_sender(),
+ msg=msg,
+ )
+ self._validate_response(recv.get().unpickle())
+
+ def test_root_authorized(self):
+ recv = mitogen.core.Receiver(self.router)
+ service = self.klass(self.router)
+ service.register_prefix('/')
+ recv, msg = self.replyable_msg()
+ service.fetch(
+ path='/etc/passwd',
+ sender=recv.to_sender(),
+ msg=msg,
+ )
+ self._validate_response(recv.get().unpickle())
+
+ def test_prefix_authorized(self):
+ recv = mitogen.core.Receiver(self.router)
+ service = self.klass(self.router)
+ service.register_prefix('/etc')
+ recv, msg = self.replyable_msg()
+ service.fetch(
+ path='/etc/passwd',
+ sender=recv.to_sender(),
+ msg=msg,
+ )
+ self._validate_response(recv.get().unpickle())
+
+ def test_prefix_authorized_abspath_bad(self):
+ recv = mitogen.core.Receiver(self.router)
+ service = self.klass(self.router)
+ service.register_prefix('/etc')
+ recv, msg = self.replyable_msg()
+ service.fetch(
+ path='/etc/foo/bar/../../../passwd',
+ sender=recv.to_sender(),
+ msg=msg,
+ )
+ self.assertEquals(None, recv.get().unpickle())
+
+ def test_prefix_authorized_abspath_bad(self):
+ recv = mitogen.core.Receiver(self.router)
+ service = self.klass(self.router)
+ service.register_prefix('/etc')
+ recv, msg = self.replyable_msg()
+ service.fetch(
+ path='/etc/../shadow',
+ sender=recv.to_sender(),
+ msg=msg,
+ )
+ e = self.assertRaises(mitogen.core.CallError,
+ lambda: recv.get().unpickle())
+ expect = service.unregistered_msg % ('/etc/../shadow',)
+ self.assertTrue(expect in e.args[0])
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/first_stage_test.py b/tests/first_stage_test.py
index feaa34ce..470afc7a 100644
--- a/tests/first_stage_test.py
+++ b/tests/first_stage_test.py
@@ -30,15 +30,18 @@ class CommandLineTest(testlib.RouterMixin, testlib.TestCase):
# success.
fp = open("/dev/null", "r")
- proc = subprocess.Popen(args,
- stdin=fp,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- stdout, stderr = proc.communicate()
- self.assertEquals(0, proc.returncode)
- self.assertEquals(mitogen.parent.Stream.EC0_MARKER, stdout)
- self.assertIn(b("Error -5 while decompressing data: incomplete or truncated stream"), stderr)
+ try:
+ proc = subprocess.Popen(args,
+ stdin=fp,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = proc.communicate()
+ self.assertEquals(0, proc.returncode)
+ self.assertEquals(mitogen.parent.Stream.EC0_MARKER, stdout)
+ self.assertIn(b("Error -5 while decompressing data"), stderr)
+ finally:
+ fp.close()
if __name__ == '__main__':
diff --git a/tests/fork_test.py b/tests/fork_test.py
index 8b396bbf..7ca41194 100644
--- a/tests/fork_test.py
+++ b/tests/fork_test.py
@@ -1,11 +1,25 @@
-import ctypes
import os
import random
-import ssl
import struct
import sys
+try:
+ import _ssl
+except ImportError:
+ _ssl = None
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+try:
+ import ctypes
+except ImportError:
+ # Python 2.4
+ ctypes = None
+
import mitogen
import unittest2
@@ -13,23 +27,32 @@ import testlib
import plain_old_module
-IS_64BIT = struct.calcsize('P') == 8
-PLATFORM_TO_PATH = {
- ('darwin', False): '/usr/lib/libssl.dylib',
- ('darwin', True): '/usr/lib/libssl.dylib',
- ('linux2', False): '/usr/lib/libssl.so',
- ('linux2', True): '/usr/lib/x86_64-linux-gnu/libssl.so',
- # Python 2.6
- ('linux3', False): '/usr/lib/libssl.so',
- ('linux3', True): '/usr/lib/x86_64-linux-gnu/libssl.so',
- # Python 3
- ('linux', False): '/usr/lib/libssl.so',
- ('linux', True): '/usr/lib/x86_64-linux-gnu/libssl.so',
-}
+def _find_ssl_linux():
+ s = testlib.subprocess__check_output(['ldd', _ssl.__file__])
+ for line in s.decode().splitlines():
+ bits = line.split()
+ if bits[0].startswith('libssl'):
+ return bits[2]
+
+def _find_ssl_darwin():
+ s = testlib.subprocess__check_output(['otool', '-l', _ssl.__file__])
+ for line in s.decode().splitlines():
+ bits = line.split()
+ if bits[0] == 'name' and 'libssl' in bits[1]:
+ return bits[1]
+
+
+if ctypes and sys.platform.startswith('linux'):
+ LIBSSL_PATH = _find_ssl_linux()
+elif ctypes and sys.platform == 'darwin':
+ LIBSSL_PATH = _find_ssl_darwin()
+else:
+ LIBSSL_PATH = None
-c_ssl = ctypes.CDLL(PLATFORM_TO_PATH[sys.platform, IS_64BIT])
-c_ssl.RAND_pseudo_bytes.argtypes = [ctypes.c_char_p, ctypes.c_int]
-c_ssl.RAND_pseudo_bytes.restype = ctypes.c_int
+if ctypes and LIBSSL_PATH:
+ c_ssl = ctypes.CDLL(LIBSSL_PATH)
+ c_ssl.RAND_pseudo_bytes.argtypes = [ctypes.c_char_p, ctypes.c_int]
+ c_ssl.RAND_pseudo_bytes.restype = ctypes.c_int
def ping():
@@ -55,7 +78,13 @@ def exercise_importer(n):
return simple_pkg.a.subtract_one_add_two(n)
-class ForkTest(testlib.RouterMixin, unittest2.TestCase):
+skipIfUnsupported = unittest2.skipIf(
+ condition=(not mitogen.fork.FORK_SUPPORTED),
+ reason="mitogen.fork unsupported on this platform"
+)
+
+
+class ForkTest(testlib.RouterMixin, testlib.TestCase):
def test_okay(self):
context = self.router.fork()
self.assertNotEqual(context.call(os.getpid), os.getpid())
@@ -65,6 +94,10 @@ class ForkTest(testlib.RouterMixin, unittest2.TestCase):
context = self.router.fork()
self.assertNotEqual(context.call(random_random), random_random())
+ @unittest2.skipIf(
+ condition=LIBSSL_PATH is None or ctypes is None,
+ reason='cant test libssl on this platform',
+ )
def test_ssl_module_diverges(self):
# Ensure generator state is initialized.
RAND_pseudo_bytes()
@@ -84,7 +117,10 @@ class ForkTest(testlib.RouterMixin, unittest2.TestCase):
context = self.router.fork(on_start=on_start)
self.assertEquals(123, recv.get().unpickle())
-class DoubleChildTest(testlib.RouterMixin, unittest2.TestCase):
+ForkTest = skipIfUnsupported(ForkTest)
+
+
+class DoubleChildTest(testlib.RouterMixin, testlib.TestCase):
def test_okay(self):
# When forking from the master process, Mitogen had nothing to do with
# setting up stdio -- that was inherited wherever the Master is running
@@ -105,6 +141,8 @@ class DoubleChildTest(testlib.RouterMixin, unittest2.TestCase):
c2 = self.router.fork(name='c2', via=c1)
self.assertEqual(2, c2.call(exercise_importer, 1))
+DoubleChildTest = skipIfUnsupported(DoubleChildTest)
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/image_prep/_container_setup.yml b/tests/image_prep/_container_setup.yml
index db0d3789..dc0bbf53 100644
--- a/tests/image_prep/_container_setup.yml
+++ b/tests/image_prep/_container_setup.yml
@@ -22,7 +22,6 @@
packages:
common:
- - git
- openssh-server
- rsync
- strace
@@ -31,7 +30,12 @@
"9":
- libjson-perl
- python-virtualenv
+ - locales
CentOS:
+ "5":
+ - perl
+ - sudo
+ #- perl-JSON -- skipped on CentOS 5, packages are a pain.
"6":
- perl-JSON
"7":
@@ -64,8 +68,26 @@
with_items:
- /var/cache/apt
- /var/lib/apt/lists
+
+ - copy:
+ dest: /etc/locale.gen
+ content: |
+ en_US.UTF-8 UTF-8
+ fr_FR.UTF-8 UTF-8
when: distro == "Debian"
+ - shell: locale-gen
+ when: distro == "Debian"
+
+ # Vanilla Ansible needs simplejson on CentOS 5.
+ - shell: mkdir -p /usr/lib/python2.4/site-packages/simplejson/
+ when: distro == "CentOS" and ver == "5"
+
+ - synchronize:
+ dest: /usr/lib/python2.4/site-packages/simplejson/
+ src: ../../ansible_mitogen/compat/simplejson/
+ when: distro == "CentOS" and ver == "5"
+
- user:
name: root
password: "{{ 'rootpassword' | password_hash('sha256') }}"
@@ -91,9 +113,33 @@
dest: /etc/ssh/banner.txt
src: ../data/docker/ssh_login_banner.txt
- - copy:
- dest: /etc/sudoers.d/001-mitogen
- src: ../data/docker/001-mitogen.sudo
+ - name: Ensure /etc/sudoers.d exists
+ file:
+ state: directory
+ path: /etc/sudoers.d
+ mode: 'u=rwx,go='
+
+ - blockinfile:
+ path: /etc/sudoers
+ block: |
+ # https://www.toofishes.net/blog/trouble-sudoers-or-last-entry-wins/
+ %mitogen__sudo_nopw ALL=(ALL:ALL) NOPASSWD:ALL
+ mitogen__has_sudo_nopw ALL = (mitogen__pw_required) ALL
+ mitogen__has_sudo_nopw ALL = (mitogen__require_tty_pw_required) ALL
+
+ Defaults>mitogen__pw_required targetpw
+ Defaults>mitogen__require_tty requiretty
+ Defaults>mitogen__require_tty_pw_required requiretty,targetpw
+
+ # Prevent permission denied errors.
+ - file:
+ path: /etc/sudoers.d/README
+ state: absent
+
+ - lineinfile:
+ path: /etc/sudoers
+ line: "%wheel ALL=(ALL) ALL"
+ when: distro == "CentOS"
- lineinfile:
path: /etc/ssh/sshd_config
diff --git a/tests/image_prep/_user_accounts.yml b/tests/image_prep/_user_accounts.yml
index f9cac85c..a5b63c13 100644
--- a/tests/image_prep/_user_accounts.yml
+++ b/tests/image_prep/_user_accounts.yml
@@ -67,18 +67,18 @@
shell: /bin/bash
groups: "{{user_groups[item]|default(['mitogen__group'])}}"
password: "{{ (item + '_password') | password_hash('sha256') }}"
- loop: "{{all_users}}"
+ with_items: "{{all_users}}"
when: ansible_system != 'Darwin'
- user:
name: "mitogen__{{item}}"
shell: /bin/bash
groups: "{{user_groups[item]|default(['mitogen__group'])}}"
password: "{{item}}_password"
- loop: "{{all_users}}"
+ with_items: "{{all_users}}"
when: ansible_system == 'Darwin'
- name: Hide users from login window.
- loop: "{{all_users}}"
+ with_items: "{{all_users}}"
when: ansible_system == 'Darwin'
osx_defaults:
array_add: true
@@ -149,4 +149,4 @@
lineinfile:
path: /etc/sudoers
line: "{{lookup('pipe', 'whoami')}} ALL = (mitogen__{{item}}) NOPASSWD:ALL"
- loop: "{{normal_users}}"
+ with_items: "{{normal_users}}"
diff --git a/tests/image_prep/ansible.cfg b/tests/image_prep/ansible.cfg
index a3937825..8a8c47fa 100644
--- a/tests/image_prep/ansible.cfg
+++ b/tests/image_prep/ansible.cfg
@@ -2,3 +2,5 @@
[defaults]
strategy_plugins = ../../ansible_mitogen/plugins/strategy
retry_files_enabled = false
+display_args_to_stdout = True
+no_target_syslog = True
diff --git a/tests/image_prep/build_docker_images.py b/tests/image_prep/build_docker_images.py
index 94a17104..9fc89c05 100755
--- a/tests/image_prep/build_docker_images.py
+++ b/tests/image_prep/build_docker_images.py
@@ -6,9 +6,10 @@ Build the Docker images used for testing.
import commands
import os
-import tempfile
import shlex
import subprocess
+import sys
+import tempfile
BASEDIR = os.path.dirname(os.path.abspath(__file__))
@@ -24,9 +25,10 @@ def sh(s, *args):
label_by_id = {}
for base_image, label in [
- ('debian:stretch', 'debian'), # Python 2.7.13, 3.5.3
- ('centos:6', 'centos6'), # Python 2.6.6
- ('centos:7', 'centos7') # Python 2.7.5
+ ('astj/centos5-vault', 'centos5'), # Python 2.4.3
+ ('debian:stretch', 'debian'), # Python 2.7.13, 3.5.3
+ ('centos:6', 'centos6'), # Python 2.6.6
+ ('centos:7', 'centos7') # Python 2.7.5
]:
args = sh('docker run --rm -it -d -h mitogen-%s %s /bin/bash',
label, base_image)
@@ -42,7 +44,7 @@ with tempfile.NamedTemporaryFile() as fp:
try:
subprocess.check_call(
cwd=BASEDIR,
- args=sh('ansible-playbook -i %s -c docker setup.yml', fp.name),
+ args=sh('ansible-playbook -i %s -c docker setup.yml', fp.name) + sys.argv[1:],
)
for container_id, label in label_by_id.items():
diff --git a/tests/image_prep/py24-build.sh b/tests/image_prep/py24-build.sh
new file mode 100755
index 00000000..b30cc24b
--- /dev/null
+++ b/tests/image_prep/py24-build.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Build the tests/data/ubuntu-python-2.4.6.tar.bz2 tarball.
+
+set -ex
+
+wget -cO setuptools-1.4.2.tar.gz https://files.pythonhosted.org/packages/source/s/setuptools/setuptools-1.4.2.tar.gz
+wget -cO ez_setup.py https://raw.githubusercontent.com/pypa/setuptools/bootstrap-py24/ez_setup.py
+wget -cO simplejson-2.0.9.tar.gz https://github.com/simplejson/simplejson/archive/v2.0.9.tar.gz
+wget -cO psutil-2.1.3.tar.gz https://github.com/giampaolo/psutil/archive/release-2.1.3.tar.gz
+wget -cO unittest2-0.5.1.zip http://voidspace.org.uk/downloads/unittest2-0.5.1-python2.3.zip
+wget -cO cpython-2.4.6.tar.gz https://github.com/python/cpython/archive/v2.4.6.tar.gz
+wget -cO mock-0.8.0.tar.gz https://github.com/testing-cabal/mock/archive/0.8.0.tar.gz
+
+tar xzvf cpython-2.4.6.tar.gz
+
+(
+ cd cpython-2.4.6
+ ./configure --prefix=/usr/local/python2.4.6 --with-pydebug --enable-debug CFLAGS="-g -O0" # --enable-debug
+ echo 'zlib zlibmodule.c -I$(prefix)/include -L$(exec_prefix)/lib -lz' >> Modules/Setup.config
+ make -j 8
+ sudo make install
+)
+
+sudo /usr/local/python2.4.6/bin/python2.4 ez_setup.py
+sudo /usr/local/python2.4.6/bin/easy_install -Z psutil-2.1.3.tar.gz
+sudo /usr/local/python2.4.6/bin/easy_install -Z simplejson-2.0.9.tar.gz
+sudo /usr/local/python2.4.6/bin/easy_install -Z unittest2-0.5.1.zip
+sudo /usr/local/python2.4.6/bin/easy_install -Z mock-0.8.0.tar.gz
+sudo find /usr/local/python2.4.6 -name '*.py[co]' -delete
+tar jcvf ubuntu-python-2.4.6.tar.bz2 /usr/local/python2.4.6
diff --git a/tests/image_prep/py24.sh b/tests/image_prep/py24.sh
new file mode 100755
index 00000000..3db1f6ab
--- /dev/null
+++ b/tests/image_prep/py24.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+docker run \
+ -it --rm \
+ -v `pwd`:`pwd` \
+ ubuntu:trusty \
+ bash -c "set -ex; sudo apt-get update; sudo apt-get -y install zlib1g-dev build-essential wget; cd `pwd`; bash py24-build.sh"
diff --git a/tests/image_prep/setup.yml b/tests/image_prep/setup.yml
index 77a80e3b..760da0f6 100644
--- a/tests/image_prep/setup.yml
+++ b/tests/image_prep/setup.yml
@@ -10,5 +10,5 @@
Ubuntu: sudo
CentOS: wheel
-- import_playbook: _container_setup.yml
-- import_playbook: _user_accounts.yml
+- include: _container_setup.yml
+- include: _user_accounts.yml
diff --git a/tests/importer_test.py b/tests/importer_test.py
index 5c970438..fc6f4bd6 100644
--- a/tests/importer_test.py
+++ b/tests/importer_test.py
@@ -1,12 +1,10 @@
-import email.utils
import sys
import threading
import types
import zlib
import mock
-import pytest
import unittest2
import mitogen.core
@@ -144,7 +142,6 @@ class LoadModulePackageTest(ImporterMixin, testlib.TestCase):
class EmailParseAddrSysTest(testlib.RouterMixin, testlib.TestCase):
- @pytest.fixture(autouse=True)
def initdir(self, caplog):
self.caplog = caplog
@@ -212,5 +209,10 @@ class ImporterBlacklistTest(testlib.TestCase):
self.assertTrue(mitogen.core.is_blacklisted_import(importer, 'builtins'))
+class Python24LineCacheTest(testlib.TestCase):
+ # TODO: mitogen.core.Importer._update_linecache()
+ pass
+
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/io_op_test.py b/tests/io_op_test.py
index 8ec204b6..525a1b12 100644
--- a/tests/io_op_test.py
+++ b/tests/io_op_test.py
@@ -9,6 +9,15 @@ import testlib
import mitogen.core
+def py24_mock_fix(m):
+ def wrapper(*args, **kwargs):
+ ret = m(*args, **kwargs)
+ if isinstance(ret, Exception):
+ raise ret
+ return ret
+ return wrapper
+
+
class RestartTest(object):
func = staticmethod(mitogen.core.io_op)
exception_class = None
@@ -21,7 +30,7 @@ class RestartTest(object):
self.exception_class(errno.EINTR),
'yay',
]
- rc, disconnected = self.func(m, 'input')
+ rc, disconnected = self.func(py24_mock_fix(m), 'input')
self.assertEquals(rc, 'yay')
self.assertFalse(disconnected)
self.assertEquals(4, m.call_count)
diff --git a/tests/kubectl_test.py b/tests/kubectl_test.py
new file mode 100644
index 00000000..0bac3048
--- /dev/null
+++ b/tests/kubectl_test.py
@@ -0,0 +1,29 @@
+
+import os
+
+import mitogen
+import mitogen.parent
+
+import unittest2
+
+import testlib
+
+
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
+ kubectl_path = testlib.data_path('stubs/stub-kubectl.py')
+
+ def test_okay(self):
+ context = self.router.kubectl(
+ pod='pod_name',
+ kubectl_path=self.kubectl_path
+ )
+
+ argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
+ self.assertEquals(argv[0], self.kubectl_path)
+ self.assertEquals(argv[1], 'exec')
+ self.assertEquals(argv[2], '-it')
+ self.assertEquals(argv[3], 'pod_name')
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/latch_test.py b/tests/latch_test.py
index 5be12d73..6ae43221 100644
--- a/tests/latch_test.py
+++ b/tests/latch_test.py
@@ -21,6 +21,13 @@ class EmptyTest(testlib.TestCase):
latch.put(None)
self.assertTrue(not latch.empty())
+ def test_closed_is_empty(self):
+ latch = self.klass()
+ latch.put(None)
+ latch.close()
+ self.assertRaises(mitogen.core.LatchError,
+ lambda: latch.empty())
+
class GetTest(testlib.TestCase):
klass = mitogen.core.Latch
diff --git a/tests/local_test.py b/tests/local_test.py
index fbf5c1c8..fe2bd149 100644
--- a/tests/local_test.py
+++ b/tests/local_test.py
@@ -5,11 +5,8 @@ import sys
import unittest2
import mitogen
-import mitogen.ssh
-import mitogen.utils
import testlib
-import plain_old_module
def get_sys_executable():
@@ -20,43 +17,37 @@ def get_os_environ():
return dict(os.environ)
-class LocalTest(testlib.RouterMixin, unittest2.TestCase):
- stream_class = mitogen.ssh.Stream
+class ConstructionTest(testlib.RouterMixin, testlib.TestCase):
+ stub_python_path = testlib.data_path('stubs/stub-python.py')
def test_stream_name(self):
context = self.router.local()
pid = context.call(os.getpid)
self.assertEquals('local.%d' % (pid,), context.name)
-
-class PythonPathTest(testlib.RouterMixin, unittest2.TestCase):
- stream_class = mitogen.ssh.Stream
-
- def test_inherited(self):
+ def test_python_path_inherited(self):
context = self.router.local()
self.assertEquals(sys.executable, context.call(get_sys_executable))
- def test_string(self):
- os.environ['PYTHON'] = sys.executable
+ def test_python_path_string(self):
context = self.router.local(
- python_path=testlib.data_path('env_wrapper.sh'),
+ python_path=self.stub_python_path,
)
- self.assertEquals(sys.executable, context.call(get_sys_executable))
env = context.call(get_os_environ)
- self.assertEquals('1', env['EXECUTED_VIA_ENV_WRAPPER'])
+ self.assertEquals('1', env['THIS_IS_STUB_PYTHON'])
- def test_list(self):
+ def test_python_path_list(self):
context = self.router.local(
python_path=[
- testlib.data_path('env_wrapper.sh'),
+ self.stub_python_path,
"magic_first_arg",
sys.executable
]
)
self.assertEquals(sys.executable, context.call(get_sys_executable))
env = context.call(get_os_environ)
- self.assertEquals('magic_first_arg', env['ENV_WRAPPER_FIRST_ARG'])
- self.assertEquals('1', env['EXECUTED_VIA_ENV_WRAPPER'])
+ self.assertEquals('magic_first_arg', env['STUB_PYTHON_FIRST_ARG'])
+ self.assertEquals('1', env['THIS_IS_STUB_PYTHON'])
if __name__ == '__main__':
diff --git a/tests/lxc_test.py b/tests/lxc_test.py
index a30cd186..ae5990f6 100644
--- a/tests/lxc_test.py
+++ b/tests/lxc_test.py
@@ -1,6 +1,12 @@
import os
import mitogen
+import mitogen.lxc
+
+try:
+ any
+except NameError:
+ from mitogen.core import any
import unittest2
@@ -11,19 +17,29 @@ def has_subseq(seq, subseq):
return any(seq[x:x+len(subseq)] == subseq for x in range(0, len(seq)))
-class FakeLxcAttachTest(testlib.RouterMixin, unittest2.TestCase):
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
+ lxc_attach_path = testlib.data_path('stubs/stub-lxc-attach.py')
+
def test_okay(self):
- lxc_attach_path = testlib.data_path('fake_lxc_attach.py')
context = self.router.lxc(
container='container_name',
- lxc_attach_path=lxc_attach_path,
+ lxc_attach_path=self.lxc_attach_path,
)
argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
- self.assertEquals(argv[0], lxc_attach_path)
+ self.assertEquals(argv[0], self.lxc_attach_path)
self.assertTrue('--clear-env' in argv)
self.assertTrue(has_subseq(argv, ['--name', 'container_name']))
+ def test_eof(self):
+ e = self.assertRaises(mitogen.parent.EofError,
+ lambda: self.router.lxc(
+ container='container_name',
+ lxc_attach_path='true',
+ )
+ )
+ self.assertTrue(str(e).endswith(mitogen.lxc.Stream.eof_error_hint))
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/lxd_test.py b/tests/lxd_test.py
index 9c2397a2..e59da43c 100644
--- a/tests/lxd_test.py
+++ b/tests/lxd_test.py
@@ -1,15 +1,17 @@
import os
import mitogen
+import mitogen.lxd
+import mitogen.parent
import unittest2
import testlib
-class FakeLxcTest(testlib.RouterMixin, unittest2.TestCase):
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
def test_okay(self):
- lxc_path = testlib.data_path('fake_lxc.py')
+ lxc_path = testlib.data_path('stubs/stub-lxc.py')
context = self.router.lxd(
container='container_name',
lxc_path=lxc_path,
@@ -21,6 +23,15 @@ class FakeLxcTest(testlib.RouterMixin, unittest2.TestCase):
self.assertEquals(argv[2], '--mode=noninteractive')
self.assertEquals(argv[3], 'container_name')
+ def test_eof(self):
+ e = self.assertRaises(mitogen.parent.EofError,
+ lambda: self.router.lxd(
+ container='container_name',
+ lxc_path='true',
+ )
+ )
+ self.assertTrue(str(e).endswith(mitogen.lxd.Stream.eof_error_hint))
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/master_test.py b/tests/master_test.py
index 19a9b414..31d11013 100644
--- a/tests/master_test.py
+++ b/tests/master_test.py
@@ -6,7 +6,7 @@ import testlib
import mitogen.master
-class ScanCodeImportsTest(unittest2.TestCase):
+class ScanCodeImportsTest(testlib.TestCase):
func = staticmethod(mitogen.master.scan_code_imports)
if mitogen.core.PY3:
diff --git a/tests/minify_test.py b/tests/minify_test.py
new file mode 100644
index 00000000..15609896
--- /dev/null
+++ b/tests/minify_test.py
@@ -0,0 +1,118 @@
+import codecs
+import glob
+import pprint
+import sys
+
+import unittest2
+
+import mitogen.minify
+import testlib
+
+
+def read_sample(fname):
+ sample_path = testlib.data_path('minimize_samples/' + fname)
+ sample_file = open(sample_path)
+ sample = sample_file.read()
+ sample_file.close()
+ return sample
+
+
+class MinimizeSourceTest(testlib.TestCase):
+ func = staticmethod(mitogen.minify.minimize_source)
+
+ def test_class(self):
+ original = read_sample('class.py')
+ expected = read_sample('class_min.py')
+ self.assertEqual(expected, self.func(original))
+
+ def test_comment(self):
+ original = read_sample('comment.py')
+ expected = read_sample('comment_min.py')
+ self.assertEqual(expected, self.func(original))
+
+ def test_def(self):
+ original = read_sample('def.py')
+ expected = read_sample('def_min.py')
+ self.assertEqual(expected, self.func(original))
+
+ def test_hashbang(self):
+ original = read_sample('hashbang.py')
+ expected = read_sample('hashbang_min.py')
+ self.assertEqual(expected, self.func(original))
+
+ def test_mod(self):
+ original = read_sample('mod.py')
+ expected = read_sample('mod_min.py')
+ self.assertEqual(expected, self.func(original))
+
+ def test_pass(self):
+ original = read_sample('pass.py')
+ expected = read_sample('pass_min.py')
+ self.assertEqual(expected, self.func(original))
+
+ def test_obstacle_course(self):
+ original = read_sample('obstacle_course.py')
+ expected = read_sample('obstacle_course_min.py')
+ self.assertEqual(expected, self.func(original))
+
+
+class MitogenCoreTest(testlib.TestCase):
+ # Verify minimize_source() succeeds for all built-in modules.
+ func = staticmethod(mitogen.minify.minimize_source)
+
+ def read_source(self, name):
+ fp = codecs.open(name, encoding='utf-8')
+ try:
+ return fp.read()
+ finally:
+ fp.close()
+
+ def _test_syntax_valid(self, minified, name):
+ compile(minified, name, 'exec')
+
+ def _test_line_counts_match(self, original, minified):
+ self.assertEquals(original.count('\n'),
+ minified.count('\n'))
+
+ def _test_non_blank_lines_match(self, name, original, minified):
+ # Verify first token matches. We just want to ensure line numbers make
+ # sense, this is good enough.
+ olines = original.splitlines()
+ mlines = minified.splitlines()
+ for i, (orig, mini) in enumerate(zip(olines, mlines)):
+ if i < 2:
+ assert orig == mini
+ continue
+
+ owords = orig.split()
+ mwords = mini.split()
+ assert len(mwords) == 0 or (mwords[0] == owords[0]), pprint.pformat({
+ 'line': i+1,
+ 'filename': name,
+ 'owords': owords,
+ 'mwords': mwords,
+ })
+
+ PY_24_25_SKIP = [
+ # cProfile unsupported on 2.4, 2.6+ syntax is fine here.
+ 'mitogen/profiler.py',
+ ]
+
+ def test_minify_all(self):
+ for name in glob.glob('mitogen/*.py'):
+ if name in self.PY_24_25_SKIP and sys.version_info < (2, 6):
+ continue
+ original = self.read_source(name)
+ try:
+ minified = self.func(original)
+ except Exception:
+ print('file was: ' + name)
+ raise
+
+ self._test_syntax_valid(minified, name)
+ self._test_line_counts_match(original, minified)
+ self._test_non_blank_lines_match(name, original, minified)
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/minimize_source_test.py b/tests/minimize_source_test.py
deleted file mode 100644
index b98cdebd..00000000
--- a/tests/minimize_source_test.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import unittest2
-
-import mitogen.minify
-import testlib
-
-
-def read_sample(fname):
- sample_path = testlib.data_path('minimize_samples/' + fname)
- sample_file = open(sample_path)
- sample = sample_file.read()
- sample_file.close()
- return sample
-
-
-class MinimizeSource(unittest2.TestCase):
- func = staticmethod(mitogen.minify.minimize_source)
-
- def test_class(self):
- original = read_sample('class.py')
- expected = read_sample('class_min.py')
- self.assertEqual(expected, self.func(original))
-
- def test_comment(self):
- original = read_sample('comment.py')
- expected = read_sample('comment_min.py')
- self.assertEqual(expected, self.func(original))
-
- def test_def(self):
- original = read_sample('def.py')
- expected = read_sample('def_min.py')
- self.assertEqual(expected, self.func(original))
-
- def test_hashbang(self):
- original = read_sample('hashbang.py')
- expected = read_sample('hashbang_min.py')
- self.assertEqual(expected, self.func(original))
-
- def test_mod(self):
- original = read_sample('mod.py')
- expected = read_sample('mod_min.py')
- self.assertEqual(expected, self.func(original))
-
- def test_pass(self):
- original = read_sample('pass.py')
- expected = read_sample('pass_min.py')
- self.assertEqual(expected, self.func(original))
-
- def test_obstacle_course(self):
- original = read_sample('obstacle_course.py')
- expected = read_sample('obstacle_course_min.py')
- self.assertEqual(expected, self.func(original))
-
-
-if __name__ == '__main__':
- unittest2.main()
diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py
index 1d5a0796..409adc6d 100644
--- a/tests/module_finder_test.py
+++ b/tests/module_finder_test.py
@@ -50,6 +50,33 @@ class IsStdlibNameTest(testlib.TestCase):
self.assertFalse(self.func('mitogen.fakessh'))
+class GetMainModuleDefectivePython3x(testlib.TestCase):
+ klass = mitogen.master.ModuleFinder
+
+ def call(self, fullname):
+ return self.klass()._get_main_module_defective_python_3x(fullname)
+
+ def test_builtin(self):
+ self.assertEquals(None, self.call('sys'))
+
+ def test_not_main(self):
+ self.assertEquals(None, self.call('mitogen'))
+
+ def test_main(self):
+ import __main__
+
+ path, source, is_pkg = self.call('__main__')
+ self.assertTrue(path is not None)
+ self.assertTrue(os.path.exists(path))
+ self.assertEquals(path, __main__.__file__)
+ fp = open(path, 'rb')
+ try:
+ self.assertEquals(source, fp.read())
+ finally:
+ fp.close()
+ self.assertFalse(is_pkg)
+
+
class GetModuleViaPkgutilTest(testlib.TestCase):
klass = mitogen.master.ModuleFinder
@@ -105,6 +132,39 @@ class GetModuleViaSysModulesTest(testlib.TestCase):
self.assertIsNone(tup)
+class GetModuleViaParentEnumerationTest(testlib.TestCase):
+ klass = mitogen.master.ModuleFinder
+
+ def call(self, fullname):
+ return self.klass()._get_module_via_parent_enumeration(fullname)
+
+ def test_main_fails(self):
+ import __main__
+ self.assertIsNone(self.call('__main__'))
+
+ def test_dylib_fails(self):
+ # _socket comes from a .so
+ import _socket
+ tup = self.call('_socket')
+ self.assertIsNone(tup)
+
+ def test_builtin_fails(self):
+ # sys is built-in
+ tup = self.call('sys')
+ self.assertIsNone(tup)
+
+ def test_plumbum_colors_like_pkg_succeeds(self):
+ # plumbum has been eating too many rainbow-colored pills
+ import pkg_like_plumbum.colors
+ path, src, is_pkg = self.call('pkg_like_plumbum.colors')
+ self.assertEquals(path,
+ testlib.data_path('pkg_like_plumbum/colors.py'))
+
+ s = open(testlib.data_path('pkg_like_plumbum/colors.py'), 'rb').read()
+ self.assertEquals(src, s)
+ self.assertFalse(is_pkg)
+
+
class ResolveRelPathTest(testlib.TestCase):
klass = mitogen.master.ModuleFinder
@@ -129,35 +189,7 @@ class ResolveRelPathTest(testlib.TestCase):
self.assertEquals('', self.call('email.utils', 3))
-class DjangoMixin(object):
- WEBPROJECT_PATH = testlib.data_path('webproject')
-
- # TODO: rip out Django and replace with a static tree of weird imports that
- # don't depend on .. Django! The hack below is because the version of
- # Django we need to test against 2.6 doesn't actually run on 3.6. But we
- # don't care, we just need to be able to import it.
- #
- # File "django/utils/html_parser.py", line 12, in
- # AttributeError: module 'html.parser' has no attribute 'HTMLParseError'
- #
- import pkg_resources._vendor.six
- from django.utils.six.moves import html_parser as _html_parser
- _html_parser.HTMLParseError = Exception
-
- @classmethod
- def setUpClass(cls):
- super(DjangoMixin, cls).setUpClass()
- sys.path.append(cls.WEBPROJECT_PATH)
- os.environ['DJANGO_SETTINGS_MODULE'] = 'webproject.settings'
-
- @classmethod
- def tearDownClass(cls):
- sys.path.remove(cls.WEBPROJECT_PATH)
- del os.environ['DJANGO_SETTINGS_MODULE']
- super(DjangoMixin, cls).tearDownClass()
-
-
-class FindRelatedImportsTest(DjangoMixin, testlib.TestCase):
+class FakeSshTest(testlib.TestCase):
klass = mitogen.master.ModuleFinder
def call(self, fullname):
@@ -173,60 +205,27 @@ class FindRelatedImportsTest(DjangoMixin, testlib.TestCase):
'mitogen.parent',
])
- def test_django_db(self):
- import django.db
- related = self.call('django.db')
- self.assertEquals(related, [
- 'django',
- 'django.core',
- 'django.core.signals',
- 'django.db.utils',
- 'django.utils.functional',
- ])
-
- def test_django_db_models(self):
- import django.db.models
- related = self.call('django.db.models')
- self.maxDiff=None
- self.assertEquals(related, [
- 'django',
- 'django.core.exceptions',
- 'django.db',
- 'django.db.models',
- 'django.db.models.aggregates',
- 'django.db.models.base',
- 'django.db.models.deletion',
- 'django.db.models.expressions',
- 'django.db.models.fields',
- 'django.db.models.fields.files',
- 'django.db.models.fields.related',
- 'django.db.models.fields.subclassing',
- 'django.db.models.loading',
- 'django.db.models.manager',
- 'django.db.models.query',
- 'django.db.models.signals',
- ])
-
-class FindRelatedTest(DjangoMixin, testlib.TestCase):
+class FindRelatedTest(testlib.TestCase):
klass = mitogen.master.ModuleFinder
def call(self, fullname):
return self.klass().find_related(fullname)
SIMPLE_EXPECT = set([
- 'mitogen',
- 'mitogen.core',
- 'mitogen.master',
- 'mitogen.minify',
- 'mitogen.parent',
+ u'mitogen',
+ u'mitogen.core',
+ u'mitogen.master',
+ u'mitogen.minify',
+ u'mitogen.parent',
])
- if sys.version_info < (3, 2):
- SIMPLE_EXPECT.add('mitogen.compat')
- SIMPLE_EXPECT.add('mitogen.compat.functools')
if sys.version_info < (2, 7):
+ SIMPLE_EXPECT.add('mitogen.compat')
SIMPLE_EXPECT.add('mitogen.compat.tokenize')
+ if sys.version_info < (2, 6):
+ SIMPLE_EXPECT.add('mitogen.compat')
+ SIMPLE_EXPECT.add('mitogen.compat.pkgutil')
def test_simple(self):
import mitogen.fakessh
@@ -234,131 +233,203 @@ class FindRelatedTest(DjangoMixin, testlib.TestCase):
self.assertEquals(set(related), self.SIMPLE_EXPECT)
-class DjangoFindRelatedTest(DjangoMixin, testlib.TestCase):
- klass = mitogen.master.ModuleFinder
- maxDiff = None
-
- def call(self, fullname):
- return self.klass().find_related(fullname)
-
- def test_django_db(self):
- import django.db
- related = self.call('django.db')
- self.assertEquals(related, [
- 'django',
- 'django.conf',
- 'django.conf.global_settings',
- 'django.core',
- 'django.core.exceptions',
- 'django.core.signals',
- 'django.db.utils',
- 'django.dispatch',
- 'django.dispatch.dispatcher',
- 'django.dispatch.saferef',
- 'django.utils',
- 'django.utils._os',
- 'django.utils.encoding',
- 'django.utils.functional',
- 'django.utils.importlib',
- 'django.utils.module_loading',
- 'django.utils.six',
- ])
-
- def test_django_db_models(self):
- if sys.version_info >= (3, 0):
- raise unittest2.SkipTest('broken due to ancient vendored six.py')
-
- import django.db.models
- related = self.call('django.db.models')
- self.assertEquals(related, [
- 'django',
- 'django.conf',
- 'django.conf.global_settings',
- 'django.core',
- 'django.core.exceptions',
- 'django.core.files',
- 'django.core.files.base',
- 'django.core.files.images',
- 'django.core.files.locks',
- 'django.core.files.move',
- 'django.core.files.storage',
- 'django.core.files.utils',
- 'django.core.signals',
- 'django.core.validators',
- 'django.db',
- 'django.db.backends',
- 'django.db.backends.signals',
- 'django.db.backends.util',
- 'django.db.models.aggregates',
- 'django.db.models.base',
- 'django.db.models.constants',
- 'django.db.models.deletion',
- 'django.db.models.expressions',
- 'django.db.models.fields',
- 'django.db.models.fields.files',
- 'django.db.models.fields.proxy',
- 'django.db.models.fields.related',
- 'django.db.models.fields.subclassing',
- 'django.db.models.loading',
- 'django.db.models.manager',
- 'django.db.models.options',
- 'django.db.models.query',
- 'django.db.models.query_utils',
- 'django.db.models.related',
- 'django.db.models.signals',
- 'django.db.models.sql',
- 'django.db.models.sql.aggregates',
- 'django.db.models.sql.constants',
- 'django.db.models.sql.datastructures',
- 'django.db.models.sql.expressions',
- 'django.db.models.sql.query',
- 'django.db.models.sql.subqueries',
- 'django.db.models.sql.where',
- 'django.db.transaction',
- 'django.db.utils',
- 'django.dispatch',
- 'django.dispatch.dispatcher',
- 'django.dispatch.saferef',
- 'django.forms',
- 'django.forms.fields',
- 'django.forms.forms',
- 'django.forms.formsets',
- 'django.forms.models',
- 'django.forms.util',
- 'django.forms.widgets',
- 'django.utils',
- 'django.utils._os',
- 'django.utils.crypto',
- 'django.utils.datastructures',
- 'django.utils.dateformat',
- 'django.utils.dateparse',
- 'django.utils.dates',
- 'django.utils.datetime_safe',
- 'django.utils.decorators',
- 'django.utils.deprecation',
- 'django.utils.encoding',
- 'django.utils.formats',
- 'django.utils.functional',
- 'django.utils.html',
- 'django.utils.html_parser',
- 'django.utils.importlib',
- 'django.utils.ipv6',
- 'django.utils.itercompat',
- 'django.utils.module_loading',
- 'django.utils.numberformat',
- 'django.utils.safestring',
- 'django.utils.six',
- 'django.utils.text',
- 'django.utils.timezone',
- 'django.utils.translation',
- 'django.utils.tree',
- 'django.utils.tzinfo',
- 'pytz',
- 'pytz.exceptions',
- 'pytz.lazy',
- 'pytz.tzfile',
- 'pytz.tzinfo',
- ])
+if sys.version_info > (2, 6):
+ class DjangoMixin(object):
+ WEBPROJECT_PATH = testlib.data_path('webproject')
+
+ # TODO: rip out Django and replace with a static tree of weird imports
+ # that don't depend on .. Django! The hack below is because the version
+ # of Django we need to test against 2.6 doesn't actually run on 3.6.
+ # But we don't care, we just need to be able to import it.
+ #
+ # File "django/utils/html_parser.py", line 12, in
+ # AttributeError: module 'html.parser' has no attribute
+ # 'HTMLParseError'
+ #
+ import pkg_resources._vendor.six
+ from django.utils.six.moves import html_parser as _html_parser
+ _html_parser.HTMLParseError = Exception
+
+ @classmethod
+ def setUpClass(cls):
+ super(DjangoMixin, cls).setUpClass()
+ sys.path.append(cls.WEBPROJECT_PATH)
+ os.environ['DJANGO_SETTINGS_MODULE'] = 'webproject.settings'
+
+ @classmethod
+ def tearDownClass(cls):
+ sys.path.remove(cls.WEBPROJECT_PATH)
+ del os.environ['DJANGO_SETTINGS_MODULE']
+ super(DjangoMixin, cls).tearDownClass()
+
+
+ class FindRelatedImportsTest(DjangoMixin, testlib.TestCase):
+ klass = mitogen.master.ModuleFinder
+
+ def call(self, fullname):
+ return self.klass().find_related_imports(fullname)
+
+ def test_django_db(self):
+ import django.db
+ related = self.call('django.db')
+ self.assertEquals(related, [
+ 'django',
+ 'django.core',
+ 'django.core.signals',
+ 'django.db.utils',
+ 'django.utils.functional',
+ ])
+
+ def test_django_db_models(self):
+ import django.db.models
+ related = self.call('django.db.models')
+ self.maxDiff=None
+ self.assertEquals(related, [
+ u'django',
+ u'django.core.exceptions',
+ u'django.db',
+ u'django.db.models',
+ u'django.db.models.aggregates',
+ u'django.db.models.base',
+ u'django.db.models.deletion',
+ u'django.db.models.expressions',
+ u'django.db.models.fields',
+ u'django.db.models.fields.files',
+ u'django.db.models.fields.related',
+ u'django.db.models.fields.subclassing',
+ u'django.db.models.loading',
+ u'django.db.models.manager',
+ u'django.db.models.query',
+ u'django.db.models.signals',
+ ])
+
+
+ class DjangoFindRelatedTest(DjangoMixin, testlib.TestCase):
+ klass = mitogen.master.ModuleFinder
+ maxDiff = None
+
+ def call(self, fullname):
+ return self.klass().find_related(fullname)
+
+ def test_django_db(self):
+ import django.db
+ related = self.call('django.db')
+ self.assertEquals(related, [
+ u'django',
+ u'django.conf',
+ u'django.conf.global_settings',
+ u'django.core',
+ u'django.core.exceptions',
+ u'django.core.signals',
+ u'django.db.utils',
+ u'django.dispatch',
+ u'django.dispatch.dispatcher',
+ u'django.dispatch.saferef',
+ u'django.utils',
+ u'django.utils._os',
+ u'django.utils.encoding',
+ u'django.utils.functional',
+ u'django.utils.importlib',
+ u'django.utils.module_loading',
+ u'django.utils.six',
+ ])
+
+ @unittest2.skipIf(
+ condition=(sys.version_info >= (3, 0)),
+ reason='broken due to ancient vendored six.py'
+ )
+ def test_django_db_models(self):
+ import django.db.models
+ related = self.call('django.db.models')
+ self.assertEquals(related, [
+ u'django',
+ u'django.conf',
+ u'django.conf.global_settings',
+ u'django.core',
+ u'django.core.exceptions',
+ u'django.core.files',
+ u'django.core.files.base',
+ u'django.core.files.images',
+ u'django.core.files.locks',
+ u'django.core.files.move',
+ u'django.core.files.storage',
+ u'django.core.files.utils',
+ u'django.core.signals',
+ u'django.core.validators',
+ u'django.db',
+ u'django.db.backends',
+ u'django.db.backends.signals',
+ u'django.db.backends.util',
+ u'django.db.models.aggregates',
+ u'django.db.models.base',
+ u'django.db.models.constants',
+ u'django.db.models.deletion',
+ u'django.db.models.expressions',
+ u'django.db.models.fields',
+ u'django.db.models.fields.files',
+ u'django.db.models.fields.proxy',
+ u'django.db.models.fields.related',
+ u'django.db.models.fields.subclassing',
+ u'django.db.models.loading',
+ u'django.db.models.manager',
+ u'django.db.models.options',
+ u'django.db.models.query',
+ u'django.db.models.query_utils',
+ u'django.db.models.related',
+ u'django.db.models.signals',
+ u'django.db.models.sql',
+ u'django.db.models.sql.aggregates',
+ u'django.db.models.sql.constants',
+ u'django.db.models.sql.datastructures',
+ u'django.db.models.sql.expressions',
+ u'django.db.models.sql.query',
+ u'django.db.models.sql.subqueries',
+ u'django.db.models.sql.where',
+ u'django.db.transaction',
+ u'django.db.utils',
+ u'django.dispatch',
+ u'django.dispatch.dispatcher',
+ u'django.dispatch.saferef',
+ u'django.forms',
+ u'django.forms.fields',
+ u'django.forms.forms',
+ u'django.forms.formsets',
+ u'django.forms.models',
+ u'django.forms.util',
+ u'django.forms.widgets',
+ u'django.utils',
+ u'django.utils._os',
+ u'django.utils.crypto',
+ u'django.utils.datastructures',
+ u'django.utils.dateformat',
+ u'django.utils.dateparse',
+ u'django.utils.dates',
+ u'django.utils.datetime_safe',
+ u'django.utils.decorators',
+ u'django.utils.deprecation',
+ u'django.utils.encoding',
+ u'django.utils.formats',
+ u'django.utils.functional',
+ u'django.utils.html',
+ u'django.utils.html_parser',
+ u'django.utils.importlib',
+ u'django.utils.ipv6',
+ u'django.utils.itercompat',
+ u'django.utils.module_loading',
+ u'django.utils.numberformat',
+ u'django.utils.safestring',
+ u'django.utils.six',
+ u'django.utils.text',
+ u'django.utils.timezone',
+ u'django.utils.translation',
+ u'django.utils.tree',
+ u'django.utils.tzinfo',
+ u'pytz',
+ u'pytz.exceptions',
+ u'pytz.lazy',
+ u'pytz.tzfile',
+ u'pytz.tzinfo',
+ ])
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/parent_test.py b/tests/parent_test.py
index c9ccaf3f..00bddb4d 100644
--- a/tests/parent_test.py
+++ b/tests/parent_test.py
@@ -1,4 +1,5 @@
import errno
+import fcntl
import os
import signal
import subprocess
@@ -9,6 +10,7 @@ import time
import mock
import unittest2
import testlib
+from testlib import Popen__terminate
import mitogen.parent
@@ -30,6 +32,28 @@ def wait_for_child(pid, timeout=1.0):
assert False, "wait_for_child() timed out"
+@mitogen.core.takes_econtext
+def call_func_in_sibling(ctx, econtext, sync_sender):
+ recv = ctx.call_async(time.sleep, 99999)
+ sync_sender.send(None)
+ recv.get().unpickle()
+
+
+def wait_for_empty_output_queue(sync_recv, context):
+ # wait for sender to submit their RPC. Since the RPC is sent first, the
+ # message sent to this sender cannot arrive until we've routed the RPC.
+ sync_recv.get()
+
+ router = context.router
+ broker = router.broker
+ while True:
+ # Now wait for the RPC to exit the output queue.
+ stream = router.stream_by_id(context.context_id)
+ if broker.defer_sync(lambda: stream.pending_bytes()) == 0:
+ return
+ time.sleep(0.1)
+
+
class GetDefaultRemoteNameTest(testlib.TestCase):
func = staticmethod(mitogen.parent.get_default_remote_name)
@@ -74,7 +98,7 @@ class WstatusToStrTest(testlib.TestCase):
(pid, status), _ = mitogen.core.io_op(os.waitpid, pid, 0)
self.assertEquals(
self.func(status),
- 'exited due to signal %s (SIGKILL)' % (signal.SIGKILL,)
+ 'exited due to signal %s (SIGKILL)' % (int(signal.SIGKILL),)
)
# can't test SIGSTOP without POSIX sessions rabbithole
@@ -88,7 +112,7 @@ class ReapChildTest(testlib.RouterMixin, testlib.TestCase):
remote_id=1234,
old_router=self.router,
max_message_size=self.router.max_message_size,
- python_path=testlib.data_path('python_never_responds.sh'),
+ python_path=testlib.data_path('python_never_responds.py'),
connect_timeout=0.5,
)
self.assertRaises(mitogen.core.TimeoutError,
@@ -114,7 +138,7 @@ class StreamErrorTest(testlib.RouterMixin, testlib.TestCase):
def test_via_eof(self):
# Verify FD leakage does not keep failed process open.
- local = self.router.fork()
+ local = self.router.local()
e = self.assertRaises(mitogen.core.StreamError,
lambda: self.router.local(
via=local,
@@ -136,7 +160,7 @@ class StreamErrorTest(testlib.RouterMixin, testlib.TestCase):
self.assertTrue(e.args[0].startswith(prefix))
def test_via_enoent(self):
- local = self.router.fork()
+ local = self.router.local()
e = self.assertRaises(mitogen.core.StreamError,
lambda: self.router.local(
via=local,
@@ -148,7 +172,7 @@ class StreamErrorTest(testlib.RouterMixin, testlib.TestCase):
self.assertTrue(s in e.args[0])
-class ContextTest(testlib.RouterMixin, unittest2.TestCase):
+class ContextTest(testlib.RouterMixin, testlib.TestCase):
def test_context_shutdown(self):
local = self.router.local()
pid = local.call(os.getpid)
@@ -175,8 +199,28 @@ class OpenPtyTest(testlib.TestCase):
msg = mitogen.parent.OPENPTY_MSG % (openpty.side_effect,)
self.assertEquals(e.args[0], msg)
+ @unittest2.skipIf(condition=(os.uname()[0] != 'Linux'),
+ reason='Fallback only supported on Linux')
+ @mock.patch('os.openpty')
+ def test_broken_linux_fallback(self, openpty):
+ openpty.side_effect = OSError(errno.EPERM)
+ master_fd, slave_fd = self.func()
+ try:
+ st = os.fstat(master_fd)
+ self.assertEquals(5, os.major(st.st_rdev))
+ flags = fcntl.fcntl(master_fd, fcntl.F_GETFL)
+ self.assertTrue(flags & os.O_RDWR)
+
+ st = os.fstat(slave_fd)
+ self.assertEquals(136, os.major(st.st_rdev))
+ flags = fcntl.fcntl(slave_fd, fcntl.F_GETFL)
+ self.assertTrue(flags & os.O_RDWR)
+ finally:
+ os.close(master_fd)
+ os.close(slave_fd)
-class TtyCreateChildTest(unittest2.TestCase):
+
+class TtyCreateChildTest(testlib.TestCase):
func = staticmethod(mitogen.parent.tty_create_child)
def test_dev_tty_open_succeeds(self):
@@ -202,15 +246,17 @@ class TtyCreateChildTest(unittest2.TestCase):
self.assertEquals(pid, waited_pid)
self.assertEquals(0, status)
self.assertEquals(mitogen.core.b(''), tf.read())
+ os.close(fd)
finally:
tf.close()
-class IterReadTest(unittest2.TestCase):
+class IterReadTest(testlib.TestCase):
func = staticmethod(mitogen.parent.iter_read)
def make_proc(self):
- args = [testlib.data_path('iter_read_generator.sh')]
+ # I produce text every 100ms.
+ args = [testlib.data_path('iter_read_generator.py')]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mitogen.core.set_nonblock(proc.stdout.fileno())
return proc
@@ -219,12 +265,13 @@ class IterReadTest(unittest2.TestCase):
proc = self.make_proc()
try:
reader = self.func([proc.stdout.fileno()])
- for i, chunk in enumerate(reader, 1):
- self.assertEqual(i, int(chunk))
- if i > 3:
+ for i, chunk in enumerate(reader):
+ self.assertEqual(1+i, int(chunk))
+ if i > 2:
break
finally:
- proc.terminate()
+ Popen__terminate(proc)
+ proc.stdout.close()
def test_deadline_exceeded_before_call(self):
proc = self.make_proc()
@@ -238,31 +285,37 @@ class IterReadTest(unittest2.TestCase):
except mitogen.core.TimeoutError:
self.assertEqual(len(got), 0)
finally:
- proc.terminate()
+ Popen__terminate(proc)
+ proc.stdout.close()
def test_deadline_exceeded_during_call(self):
proc = self.make_proc()
- reader = self.func([proc.stdout.fileno()], time.time() + 0.4)
+ deadline = time.time() + 0.4
+
+ reader = self.func([proc.stdout.fileno()], deadline)
try:
got = []
try:
for chunk in reader:
+ if time.time() > (deadline + 1.0):
+ assert 0, 'TimeoutError not raised'
got.append(chunk)
- assert 0, 'TimeoutError not raised'
except mitogen.core.TimeoutError:
# Give a little wiggle room in case of imperfect scheduling.
# Ideal number should be 9.
- self.assertLess(3, len(got))
- self.assertLess(len(got), 5)
+ self.assertLess(deadline, time.time())
+ self.assertLess(1, len(got))
+ self.assertLess(len(got), 20)
finally:
- proc.terminate()
+ Popen__terminate(proc)
+ proc.stdout.close()
-class WriteAllTest(unittest2.TestCase):
+class WriteAllTest(testlib.TestCase):
func = staticmethod(mitogen.parent.write_all)
def make_proc(self):
- args = [testlib.data_path('write_all_consumer.sh')]
+ args = [testlib.data_path('write_all_consumer.py')]
proc = subprocess.Popen(args, stdin=subprocess.PIPE)
mitogen.core.set_nonblock(proc.stdin.fileno())
return proc
@@ -274,7 +327,8 @@ class WriteAllTest(unittest2.TestCase):
try:
self.func(proc.stdin.fileno(), self.ten_ms_chunk)
finally:
- proc.terminate()
+ Popen__terminate(proc)
+ proc.stdin.close()
def test_deadline_exceeded_before_call(self):
proc = self.make_proc()
@@ -283,7 +337,8 @@ class WriteAllTest(unittest2.TestCase):
lambda: self.func(proc.stdin.fileno(), self.ten_ms_chunk, 0)
))
finally:
- proc.terminate()
+ Popen__terminate(proc)
+ proc.stdin.close()
def test_deadline_exceeded_during_call(self):
proc = self.make_proc()
@@ -295,7 +350,89 @@ class WriteAllTest(unittest2.TestCase):
deadline)
))
finally:
- proc.terminate()
+ Popen__terminate(proc)
+ proc.stdin.close()
+
+
+class DisconnectTest(testlib.RouterMixin, testlib.TestCase):
+ def test_child_disconnected(self):
+ # Easy mode: process notices its own directly connected child is
+ # disconnected.
+ c1 = self.router.local()
+ recv = c1.call_async(time.sleep, 9999)
+ c1.shutdown(wait=True)
+ e = self.assertRaises(mitogen.core.ChannelError,
+ lambda: recv.get())
+ self.assertEquals(e.args[0], self.router.respondent_disconnect_msg)
+
+ def test_indirect_child_disconnected(self):
+ # Achievement unlocked: process notices an indirectly connected child
+ # is disconnected.
+ c1 = self.router.local()
+ c2 = self.router.local(via=c1)
+ recv = c2.call_async(time.sleep, 9999)
+ c2.shutdown(wait=True)
+ e = self.assertRaises(mitogen.core.ChannelError,
+ lambda: recv.get())
+ self.assertEquals(e.args[0], self.router.respondent_disconnect_msg)
+
+ def test_indirect_child_intermediary_disconnected(self):
+ # Battlefield promotion: process notices indirect child disconnected
+ # due to an intermediary child disconnecting.
+ c1 = self.router.local()
+ c2 = self.router.local(via=c1)
+ recv = c2.call_async(time.sleep, 9999)
+ c1.shutdown(wait=True)
+ e = self.assertRaises(mitogen.core.ChannelError,
+ lambda: recv.get())
+ self.assertEquals(e.args[0], self.router.respondent_disconnect_msg)
+
+ def test_near_sibling_disconnected(self):
+ # Hard mode: child notices sibling connected to same parent has
+ # disconnected.
+ c1 = self.router.local()
+ c2 = self.router.local()
+
+ # Let c1 call functions in c2.
+ self.router.stream_by_id(c1.context_id).auth_id = mitogen.context_id
+ c1.call(mitogen.parent.upgrade_router)
+
+ sync_recv = mitogen.core.Receiver(self.router)
+ recv = c1.call_async(call_func_in_sibling, c2,
+ sync_sender=sync_recv.to_sender())
+
+ wait_for_empty_output_queue(sync_recv, c2)
+ c2.shutdown(wait=True)
+
+ e = self.assertRaises(mitogen.core.CallError,
+ lambda: recv.get().unpickle())
+ s = 'mitogen.core.ChannelError: ' + self.router.respondent_disconnect_msg
+ self.assertTrue(e.args[0].startswith(s), str(e))
+
+ def test_far_sibling_disconnected(self):
+ # God mode: child of child notices child of child of parent has
+ # disconnected.
+ c1 = self.router.local()
+ c11 = self.router.local(via=c1)
+
+ c2 = self.router.local()
+ c22 = self.router.local(via=c2)
+
+ # Let c1 call functions in c2.
+ self.router.stream_by_id(c1.context_id).auth_id = mitogen.context_id
+ c11.call(mitogen.parent.upgrade_router)
+
+ sync_recv = mitogen.core.Receiver(self.router)
+ recv = c11.call_async(call_func_in_sibling, c22,
+ sync_sender=sync_recv.to_sender())
+
+ wait_for_empty_output_queue(sync_recv, c22)
+ c22.shutdown(wait=True)
+
+ e = self.assertRaises(mitogen.core.CallError,
+ lambda: recv.get().unpickle())
+ s = 'mitogen.core.ChannelError: ' + self.router.respondent_disconnect_msg
+ self.assertTrue(e.args[0].startswith(s))
if __name__ == '__main__':
diff --git a/tests/policy_function_test.py b/tests/policy_function_test.py
new file mode 100644
index 00000000..56e33b89
--- /dev/null
+++ b/tests/policy_function_test.py
@@ -0,0 +1,40 @@
+
+import mock
+import unittest2
+
+import mitogen.core
+import mitogen.parent
+
+import testlib
+
+
+class HasParentAuthorityTest(testlib.TestCase):
+ func = staticmethod(mitogen.core.has_parent_authority)
+
+ def call(self, auth_id):
+ msg = mitogen.core.Message(auth_id=auth_id)
+ return self.func(msg)
+
+ @mock.patch('mitogen.context_id', 5555)
+ @mock.patch('mitogen.parent_ids', [111, 222])
+ def test_okay(self):
+ self.assertFalse(self.call(0))
+ self.assertTrue(self.call(5555))
+ self.assertTrue(self.call(111))
+
+
+class IsImmediateChildTest(testlib.TestCase):
+ func = staticmethod(mitogen.core.has_parent_authority)
+
+ def call(self, auth_id, remote_id):
+ msg = mitogen.core.Message(auth_id=auth_id)
+ stream = mock.Mock(remote_id=remote_id)
+ return self.func(msg, stream)
+
+ def test_okay(self):
+ self.assertFalse(0, 1)
+ self.assertTrue(1, 1)
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/poller_test.py b/tests/poller_test.py
new file mode 100644
index 00000000..1d1e0cd0
--- /dev/null
+++ b/tests/poller_test.py
@@ -0,0 +1,428 @@
+
+import errno
+import os
+import select
+import socket
+import sys
+import time
+
+import unittest2
+
+import mitogen.core
+import mitogen.parent
+
+import testlib
+
+try:
+ next
+except NameError:
+ # Python 2.4
+ from mitogen.core import next
+
+
+class SockMixin(object):
+ def tearDown(self):
+ self.close_socks()
+ super(SockMixin, self).tearDown()
+
+ def setUp(self):
+ super(SockMixin, self).setUp()
+ self._setup_socks()
+
+ def _setup_socks(self):
+ # "left" and "right" side of two socket pairs. We use sockets instead
+ # of pipes since the same process can manipulate transmit/receive
+ # buffers on both sides (bidirectional IO), making it easier to test
+ # combinations of readability/writeability on the one side of a single
+ # file object.
+ self.l1_sock, self.r1_sock = socket.socketpair()
+ self.l1 = self.l1_sock.fileno()
+ self.r1 = self.r1_sock.fileno()
+
+ self.l2_sock, self.r2_sock = socket.socketpair()
+ self.l2 = self.l2_sock.fileno()
+ self.r2 = self.r2_sock.fileno()
+ for fd in self.l1, self.r1, self.l2, self.r2:
+ mitogen.core.set_nonblock(fd)
+
+ def fill(self, fd):
+ """Make `fd` unwriteable."""
+ while True:
+ try:
+ os.write(fd, mitogen.core.b('x')*4096)
+ except OSError:
+ e = sys.exc_info()[1]
+ if e.args[0] == errno.EAGAIN:
+ return
+ raise
+
+ def drain(self, fd):
+ """Make `fd` unreadable."""
+ while True:
+ try:
+ if not os.read(fd, 4096):
+ return
+ except OSError:
+ e = sys.exc_info()[1]
+ if e.args[0] == errno.EAGAIN:
+ return
+ raise
+
+ def close_socks(self):
+ for sock in self.l1_sock, self.r1_sock, self.l2_sock, self.r2_sock:
+ sock.close()
+
+
+class PollerMixin(object):
+ klass = None
+
+ def setUp(self):
+ super(PollerMixin, self).setUp()
+ self.p = self.klass()
+
+ def tearDown(self):
+ self.p.close()
+ super(PollerMixin, self).tearDown()
+
+
+class ReceiveStateMixin(PollerMixin, SockMixin):
+ def test_start_receive_adds_reader(self):
+ self.p.start_receive(self.l1)
+ self.assertEquals([(self.l1, self.l1)], self.p.readers)
+ self.assertEquals([], self.p.writers)
+
+ def test_start_receive_adds_reader_data(self):
+ data = object()
+ self.p.start_receive(self.l1, data=data)
+ self.assertEquals([(self.l1, data)], self.p.readers)
+ self.assertEquals([], self.p.writers)
+
+ def test_stop_receive(self):
+ self.p.start_receive(self.l1)
+ self.p.stop_receive(self.l1)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([], self.p.writers)
+
+ def test_stop_receive_dup(self):
+ self.p.start_receive(self.l1)
+ self.p.stop_receive(self.l1)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([], self.p.writers)
+ self.p.stop_receive(self.l1)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([], self.p.writers)
+
+ def test_stop_receive_noexist(self):
+ p = self.klass()
+ p.stop_receive(123) # should not fail
+ self.assertEquals([], p.readers)
+ self.assertEquals([], self.p.writers)
+
+
+class TransmitStateMixin(PollerMixin, SockMixin):
+ def test_start_transmit_adds_writer(self):
+ self.p.start_transmit(self.r1)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([(self.r1, self.r1)], self.p.writers)
+
+ def test_start_transmit_adds_writer_data(self):
+ data = object()
+ self.p.start_transmit(self.r1, data=data)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([(self.r1, data)], self.p.writers)
+
+ def test_stop_transmit(self):
+ self.p.start_transmit(self.r1)
+ self.p.stop_transmit(self.r1)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([], self.p.writers)
+
+ def test_stop_transmit_dup(self):
+ self.p.start_transmit(self.r1)
+ self.p.stop_transmit(self.r1)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([], self.p.writers)
+ self.p.stop_transmit(self.r1)
+ self.assertEquals([], self.p.readers)
+ self.assertEquals([], self.p.writers)
+
+ def test_stop_transmit_noexist(self):
+ p = self.klass()
+ p.stop_receive(123) # should not fail
+ self.assertEquals([], p.readers)
+ self.assertEquals([], self.p.writers)
+
+
+class CloseMixin(PollerMixin):
+ def test_single_close(self):
+ self.p.close()
+
+ def test_double_close(self):
+ self.p.close()
+ self.p.close()
+
+
+class PollMixin(PollerMixin):
+ def test_empty_zero_timeout(self):
+ t0 = time.time()
+ self.assertEquals([], list(self.p.poll(0)))
+ self.assertTrue((time.time() - t0) < .1) # vaguely reasonable
+
+ def test_empty_small_timeout(self):
+ t0 = time.time()
+ self.assertEquals([], list(self.p.poll(.2)))
+ self.assertTrue((time.time() - t0) >= .2)
+
+
+class ReadableMixin(PollerMixin, SockMixin):
+ def test_unreadable(self):
+ self.p.start_receive(self.l1)
+ self.assertEquals([], list(self.p.poll(0)))
+
+ def test_readable_before_add(self):
+ self.fill(self.r1)
+ self.p.start_receive(self.l1)
+ self.assertEquals([self.l1], list(self.p.poll(0)))
+
+ def test_readable_after_add(self):
+ self.p.start_receive(self.l1)
+ self.fill(self.r1)
+ self.assertEquals([self.l1], list(self.p.poll(0)))
+
+ def test_readable_then_unreadable(self):
+ self.fill(self.r1)
+ self.p.start_receive(self.l1)
+ self.assertEquals([self.l1], list(self.p.poll(0)))
+ self.drain(self.l1)
+ self.assertEquals([], list(self.p.poll(0)))
+
+ def test_readable_data(self):
+ data = object()
+ self.fill(self.r1)
+ self.p.start_receive(self.l1, data=data)
+ self.assertEquals([data], list(self.p.poll(0)))
+
+ def test_double_readable_data(self):
+ data1 = object()
+ data2 = object()
+ self.fill(self.r1)
+ self.p.start_receive(self.l1, data=data1)
+ self.fill(self.r2)
+ self.p.start_receive(self.l2, data=data2)
+ self.assertEquals(set([data1, data2]), set(self.p.poll(0)))
+
+
+class WriteableMixin(PollerMixin, SockMixin):
+ def test_writeable(self):
+ self.p.start_transmit(self.r1)
+ self.assertEquals([self.r1], list(self.p.poll(0)))
+
+ def test_writeable_data(self):
+ data = object()
+ self.p.start_transmit(self.r1, data=data)
+ self.assertEquals([data], list(self.p.poll(0)))
+
+ def test_unwriteable_before_add(self):
+ self.fill(self.r1)
+ self.p.start_transmit(self.r1)
+ self.assertEquals([], list(self.p.poll(0)))
+
+ def test_unwriteable_after_add(self):
+ self.p.start_transmit(self.r1)
+ self.fill(self.r1)
+ self.assertEquals([], list(self.p.poll(0)))
+
+ def test_unwriteable_then_writeable(self):
+ self.fill(self.r1)
+ self.p.start_transmit(self.r1)
+ self.assertEquals([], list(self.p.poll(0)))
+ self.drain(self.l1)
+ self.assertEquals([self.r1], list(self.p.poll(0)))
+
+ def test_double_unwriteable_then_Writeable(self):
+ self.fill(self.r1)
+ self.p.start_transmit(self.r1)
+
+ self.fill(self.r2)
+ self.p.start_transmit(self.r2)
+
+ self.assertEquals([], list(self.p.poll(0)))
+
+ self.drain(self.l1)
+ self.assertEquals([self.r1], list(self.p.poll(0)))
+
+ self.drain(self.l2)
+ self.assertEquals(set([self.r1, self.r2]), set(self.p.poll(0)))
+
+
+class MutateDuringYieldMixin(PollerMixin, SockMixin):
+ # verify behaviour when poller contents is modified in the middle of
+ # poll() output generation.
+
+ def test_one_readable_removed_before_yield(self):
+ self.fill(self.l1)
+ self.p.start_receive(self.r1)
+ p = self.p.poll(0)
+ self.p.stop_receive(self.r1)
+ self.assertEquals([], list(p))
+
+ def test_one_writeable_removed_before_yield(self):
+ self.p.start_transmit(self.r1)
+ p = self.p.poll(0)
+ self.p.stop_transmit(self.r1)
+ self.assertEquals([], list(p))
+
+ def test_one_readable_readded_before_yield(self):
+ # fd removed, closed, another fd opened, gets same fd number, re-added.
+ # event fires for wrong underlying object.
+ self.fill(self.l1)
+ self.p.start_receive(self.r1)
+ p = self.p.poll(0)
+ self.p.stop_receive(self.r1)
+ self.p.start_receive(self.r1)
+ self.assertEquals([], list(p))
+
+ def test_one_readable_readded_during_yield(self):
+ self.fill(self.l1)
+ self.p.start_receive(self.r1)
+
+ self.fill(self.l2)
+ self.p.start_receive(self.r2)
+
+ p = self.p.poll(0)
+
+ # figure out which one is consumed and which is still to-read.
+ consumed = next(p)
+ ready = (self.r1, self.r2)[consumed == self.r1]
+
+ # now remove and re-add the one that hasn't been read yet.
+ self.p.stop_receive(ready)
+ self.p.start_receive(ready)
+
+ # the start_receive() may be for a totally new underlying file object,
+ # the live loop iteration must not yield any buffered readiness event.
+ self.assertEquals([], list(p))
+
+
+class FileClosedMixin(PollerMixin, SockMixin):
+ # Verify behaviour when a registered file object is closed in various
+ # scenarios, without first calling stop_receive()/stop_transmit().
+
+ def test_writeable_then_closed(self):
+ self.p.start_transmit(self.r1)
+ self.assertEquals([self.r1], list(self.p.poll(0)))
+ self.close_socks()
+ try:
+ self.assertEquals([], list(self.p.poll(0)))
+ except select.error:
+ # a crash is also reasonable here.
+ pass
+
+ def test_writeable_closed_before_yield(self):
+ self.p.start_transmit(self.r1)
+ p = self.p.poll(0)
+ self.close_socks()
+ try:
+ self.assertEquals([], list(p))
+ except select.error:
+ # a crash is also reasonable here.
+ pass
+
+ def test_readable_then_closed(self):
+ self.fill(self.l1)
+ self.p.start_receive(self.r1)
+ self.assertEquals([self.r1], list(self.p.poll(0)))
+ self.close_socks()
+ try:
+ self.assertEquals([], list(self.p.poll(0)))
+ except select.error:
+ # a crash is also reasonable here.
+ pass
+
+ def test_readable_closed_before_yield(self):
+ self.fill(self.l1)
+ self.p.start_receive(self.r1)
+ p = self.p.poll(0)
+ self.close_socks()
+ try:
+ self.assertEquals([], list(p))
+ except select.error:
+ # a crash is also reasonable here.
+ pass
+
+
+class TtyHangupMixin(PollerMixin):
+ def test_tty_hangup_detected(self):
+ # bug in initial select.poll() implementation failed to detect POLLHUP.
+ master_fd, slave_fd = mitogen.parent.openpty()
+ try:
+ self.p.start_receive(master_fd)
+ self.assertEquals([], list(self.p.poll(0)))
+ os.close(slave_fd)
+ slave_fd = None
+ self.assertEquals([master_fd], list(self.p.poll(0)))
+ finally:
+ if slave_fd is not None:
+ os.close(slave_fd)
+ os.close(master_fd)
+
+
+class DistinctDataMixin(PollerMixin, SockMixin):
+ # Verify different data is yielded for the same FD according to the event
+ # being raised.
+
+ def test_one_distinct(self):
+ rdata = object()
+ wdata = object()
+ self.p.start_receive(self.r1, data=rdata)
+ self.p.start_transmit(self.r1, data=wdata)
+
+ self.assertEquals([wdata], list(self.p.poll(0)))
+ self.fill(self.l1) # r1 is now readable and writeable.
+ self.assertEquals(set([rdata, wdata]), set(self.p.poll(0)))
+
+
+class AllMixin(ReceiveStateMixin,
+ TransmitStateMixin,
+ ReadableMixin,
+ WriteableMixin,
+ MutateDuringYieldMixin,
+ FileClosedMixin,
+ DistinctDataMixin,
+ PollMixin,
+ TtyHangupMixin,
+ CloseMixin):
+ """
+ Helper to avoid cutpasting mixin names below.
+ """
+
+
+class SelectTest(AllMixin, testlib.TestCase):
+ klass = mitogen.core.Poller
+
+SelectTest = unittest2.skipIf(
+ condition=not hasattr(select, 'select'),
+ reason='select.select() not supported'
+)(SelectTest)
+
+
+class KqueueTest(AllMixin, testlib.TestCase):
+ klass = mitogen.parent.KqueuePoller
+
+KqueueTest = unittest2.skipIf(
+ condition=not hasattr(select, 'kqueue'),
+ reason='select.kqueue() not supported'
+)(KqueueTest)
+
+
+class EpollTest(AllMixin, testlib.TestCase):
+ klass = mitogen.parent.EpollPoller
+
+EpollTest = unittest2.skipIf(
+ condition=not hasattr(select, 'epoll'),
+ reason='select.epoll() not supported'
+)(EpollTest)
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/polyfill_functions_test.py b/tests/polyfill_functions_test.py
new file mode 100644
index 00000000..ae65eb2f
--- /dev/null
+++ b/tests/polyfill_functions_test.py
@@ -0,0 +1,103 @@
+
+import testlib
+import unittest2
+
+import mitogen.core
+from mitogen.core import b
+
+
+class BytesPartitionTest(testlib.TestCase):
+ func = staticmethod(mitogen.core.bytes_partition)
+
+ def test_no_sep(self):
+ left, sep, right = self.func(b('dave'), b('x'))
+ self.assertTrue(isinstance(left, mitogen.core.BytesType))
+ self.assertTrue(isinstance(sep, mitogen.core.BytesType))
+ self.assertTrue(isinstance(right, mitogen.core.BytesType))
+ self.assertEquals(left, b('dave'))
+ self.assertEquals(sep, b(''))
+ self.assertEquals(right, b(''))
+
+ def test_one_sep(self):
+ left, sep, right = self.func(b('davexdave'), b('x'))
+ self.assertTrue(isinstance(left, mitogen.core.BytesType))
+ self.assertTrue(isinstance(sep, mitogen.core.BytesType))
+ self.assertTrue(isinstance(right, mitogen.core.BytesType))
+ self.assertEquals(left, b('dave'))
+ self.assertEquals(sep, b('x'))
+ self.assertEquals(right, b('dave'))
+
+ def test_two_seps(self):
+ left, sep, right = self.func(b('davexdavexdave'), b('x'))
+ self.assertTrue(isinstance(left, mitogen.core.BytesType))
+ self.assertTrue(isinstance(sep, mitogen.core.BytesType))
+ self.assertTrue(isinstance(right, mitogen.core.BytesType))
+ self.assertEquals(left, b('dave'))
+ self.assertEquals(sep, b('x'))
+ self.assertEquals(right, b('davexdave'))
+
+
+class StrPartitionTest(testlib.TestCase):
+ func = staticmethod(mitogen.core.str_partition)
+
+ def test_no_sep(self):
+ left, sep, right = self.func(u'dave', u'x')
+ self.assertTrue(isinstance(left, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(sep, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(right, mitogen.core.UnicodeType))
+ self.assertEquals(left, u'dave')
+ self.assertEquals(sep, u'')
+ self.assertEquals(right, u'')
+
+ def test_one_sep(self):
+ left, sep, right = self.func(u'davexdave', u'x')
+ self.assertTrue(isinstance(left, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(sep, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(right, mitogen.core.UnicodeType))
+ self.assertEquals(left, u'dave')
+ self.assertEquals(sep, u'x')
+ self.assertEquals(right, u'dave')
+
+ def test_two_seps(self):
+ left, sep, right = self.func(u'davexdavexdave', u'x')
+ self.assertTrue(isinstance(left, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(sep, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(right, mitogen.core.UnicodeType))
+ self.assertEquals(left, u'dave')
+ self.assertEquals(sep, u'x')
+ self.assertEquals(right, u'davexdave')
+
+
+class StrRpartitionTest(testlib.TestCase):
+ func = staticmethod(mitogen.core.str_rpartition)
+
+ def test_no_sep(self):
+ left, sep, right = self.func(u'dave', u'x')
+ self.assertTrue(isinstance(left, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(sep, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(right, mitogen.core.UnicodeType))
+ self.assertEquals(left, u'')
+ self.assertEquals(sep, u'')
+ self.assertEquals(right, u'dave')
+
+ def test_one_sep(self):
+ left, sep, right = self.func(u'davexdave', u'x')
+ self.assertTrue(isinstance(left, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(sep, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(right, mitogen.core.UnicodeType))
+ self.assertEquals(left, u'dave')
+ self.assertEquals(sep, u'x')
+ self.assertEquals(right, u'dave')
+
+ def test_two_seps(self):
+ left, sep, right = self.func(u'davexdavexdave', u'x')
+ self.assertTrue(isinstance(left, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(sep, mitogen.core.UnicodeType))
+ self.assertTrue(isinstance(right, mitogen.core.UnicodeType))
+ self.assertEquals(left, u'davexdave')
+ self.assertEquals(sep, u'x')
+ self.assertEquals(right, u'dave')
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/receiver_test.py b/tests/receiver_test.py
index 5dd6273a..65c5f7ff 100644
--- a/tests/receiver_test.py
+++ b/tests/receiver_test.py
@@ -1,4 +1,6 @@
+import sys
+import threading
import unittest2
import mitogen.core
@@ -30,11 +32,128 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
class IterationTest(testlib.RouterMixin, testlib.TestCase):
def test_dead_stops_iteration(self):
recv = mitogen.core.Receiver(self.router)
- fork = self.router.fork()
+ fork = self.router.local()
ret = fork.call_async(yield_stuff_then_die, recv.to_sender())
self.assertEquals(list(range(5)), list(m.unpickle() for m in recv))
self.assertEquals(10, ret.get().unpickle())
+ def iter_and_put(self, recv, latch):
+ try:
+ for msg in recv:
+ latch.put(msg)
+ except Exception:
+ latch.put(sys.exc_info()[1])
+
+ def test_close_stops_iteration(self):
+ recv = mitogen.core.Receiver(self.router)
+ latch = mitogen.core.Latch()
+ t = threading.Thread(
+ target=self.iter_and_put,
+ args=(recv, latch),
+ )
+ t.start()
+ t.join(0.1)
+ recv.close()
+ t.join()
+ self.assertTrue(latch.empty())
+
+
+
+class CloseTest(testlib.RouterMixin, testlib.TestCase):
+ def wait(self, latch, wait_recv):
+ try:
+ latch.put(wait_recv.get())
+ except Exception:
+ latch.put(sys.exc_info()[1])
+
+ def test_closes_one(self):
+ latch = mitogen.core.Latch()
+ wait_recv = mitogen.core.Receiver(self.router)
+ t = threading.Thread(target=lambda: self.wait(latch, wait_recv))
+ t.start()
+ wait_recv.close()
+ def throw():
+ raise latch.get()
+ t.join()
+ e = self.assertRaises(mitogen.core.ChannelError, throw)
+ self.assertEquals(e.args[0], mitogen.core.Receiver.closed_msg)
+
+ def test_closes_all(self):
+ latch = mitogen.core.Latch()
+ wait_recv = mitogen.core.Receiver(self.router)
+ ts = [
+ threading.Thread(target=lambda: self.wait(latch, wait_recv))
+ for x in range(5)
+ ]
+ for t in ts:
+ t.start()
+ wait_recv.close()
+ def throw():
+ raise latch.get()
+ for x in range(5):
+ e = self.assertRaises(mitogen.core.ChannelError, throw)
+ self.assertEquals(e.args[0], mitogen.core.Receiver.closed_msg)
+ for t in ts:
+ t.join()
+
+
+class OnReceiveTest(testlib.RouterMixin, testlib.TestCase):
+ # Verify behaviour of _on_receive dead message handling. A dead message
+ # should unregister the receiver and wake all threads.
+
+ def wait(self, latch, wait_recv):
+ try:
+ latch.put(wait_recv.get())
+ except Exception:
+ latch.put(sys.exc_info()[1])
+
+ def test_sender_closes_one_thread(self):
+ latch = mitogen.core.Latch()
+ wait_recv = mitogen.core.Receiver(self.router)
+ t = threading.Thread(target=lambda: self.wait(latch, wait_recv))
+ t.start()
+ sender = wait_recv.to_sender()
+ sender.close()
+ def throw():
+ raise latch.get()
+ t.join()
+ e = self.assertRaises(mitogen.core.ChannelError, throw)
+ self.assertEquals(e.args[0], sender.explicit_close_msg)
+
+ @unittest2.skip(reason=(
+ 'Unclear if a asingle dead message received from remote should '
+ 'cause all threads to wake up.'
+ ))
+ def test_sender_closes_all_threads(self):
+ latch = mitogen.core.Latch()
+ wait_recv = mitogen.core.Receiver(self.router)
+ ts = [
+ threading.Thread(target=lambda: self.wait(latch, wait_recv))
+ for x in range(5)
+ ]
+ for t in ts:
+ t.start()
+ sender = wait_recv.to_sender()
+ sender.close()
+ def throw():
+ raise latch.get()
+ for x in range(5):
+ e = self.assertRaises(mitogen.core.ChannelError, throw)
+ self.assertEquals(e.args[0], mitogen.core.Receiver.closed_msg)
+ for t in ts:
+ t.join()
+
+ # TODO: what happens to a Select subscribed to the receiver in this case?
+
+
+class ToSenderTest(testlib.RouterMixin, testlib.TestCase):
+ klass = mitogen.core.Receiver
+
+ def test_returned_context(self):
+ myself = self.router.myself()
+ recv = self.klass(self.router)
+ self.assertEquals(myself, recv.to_sender().context)
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/requirements.txt b/tests/requirements.txt
new file mode 100644
index 00000000..327f563a
--- /dev/null
+++ b/tests/requirements.txt
@@ -0,0 +1,15 @@
+psutil==5.4.8
+coverage==4.5.1
+Django==1.6.11 # Last version supporting 2.6.
+mock==2.0.0
+pytz==2018.5
+cffi==1.11.2 # Random pin to try and fix pyparser==2.18 not having effect
+pycparser==2.18 # Last version supporting 2.6.
+faulthandler==3.1; python_version < '3.3' # used by testlib
+pytest-catchlog==1.2.2
+pytest==3.1.2
+timeoutcontext==1.2.0
+unittest2==1.1.0
+# Fix InsecurePlatformWarning while creating py26 tox environment
+# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
+urllib3[secure]; python_version < '2.7.9'
diff --git a/tests/responder_test.py b/tests/responder_test.py
index 46400fce..da4f22d7 100644
--- a/tests/responder_test.py
+++ b/tests/responder_test.py
@@ -13,11 +13,12 @@ import plain_old_module
import simple_pkg.a
-class NeutralizeMainTest(testlib.RouterMixin, unittest2.TestCase):
+class NeutralizeMainTest(testlib.RouterMixin, testlib.TestCase):
klass = mitogen.master.ModuleResponder
def call(self, *args, **kwargs):
- return self.klass(self.router).neutralize_main(*args, **kwargs)
+ router = mock.Mock()
+ return self.klass(router).neutralize_main(*args, **kwargs)
def test_missing_exec_guard(self):
path = testlib.data_path('main_with_no_exec_guard.py')
@@ -66,13 +67,16 @@ class NeutralizeMainTest(testlib.RouterMixin, unittest2.TestCase):
self.assertEquals(bits[-3:], ['def', 'main():', 'pass'])
-
-class GoodModulesTest(testlib.RouterMixin, unittest2.TestCase):
+class GoodModulesTest(testlib.RouterMixin, testlib.TestCase):
def test_plain_old_module(self):
# The simplest case: a top-level module with no interesting imports or
# package machinery damage.
context = self.router.local()
+
self.assertEquals(256, context.call(plain_old_module.pow, 2, 8))
+ self.assertEquals(1, self.router.responder.get_module_count)
+ self.assertEquals(1, self.router.responder.good_load_module_count)
+ self.assertLess(300, self.router.responder.good_load_module_size)
def test_simple_pkg(self):
# Ensure success of a simple package containing two submodules, one of
@@ -80,6 +84,10 @@ class GoodModulesTest(testlib.RouterMixin, unittest2.TestCase):
context = self.router.local()
self.assertEquals(3,
context.call(simple_pkg.a.subtract_one_add_two, 2))
+ self.assertEquals(2, self.router.responder.get_module_count)
+ self.assertEquals(3, self.router.responder.good_load_module_count)
+ self.assertEquals(0, self.router.responder.bad_load_module_count)
+ self.assertLess(450, self.router.responder.good_load_module_size)
def test_self_contained_program(self):
# Ensure a program composed of a single script can be imported
@@ -89,7 +97,7 @@ class GoodModulesTest(testlib.RouterMixin, unittest2.TestCase):
self.assertEquals(output, "['__main__', 50]\n")
-class BrokenModulesTest(unittest2.TestCase):
+class BrokenModulesTest(testlib.TestCase):
def test_obviously_missing(self):
# Ensure we don't crash in the case of a module legitimately being
# unavailable. Should never happen in the real world.
@@ -109,12 +117,21 @@ class BrokenModulesTest(unittest2.TestCase):
responder._on_get_module(msg)
self.assertEquals(1, len(router._async_route.mock_calls))
+ self.assertEquals(1, responder.get_module_count)
+ self.assertEquals(0, responder.good_load_module_count)
+ self.assertEquals(0, responder.good_load_module_size)
+ self.assertEquals(1, responder.bad_load_module_count)
+
call = router._async_route.mock_calls[0]
msg, = call[1]
self.assertEquals(mitogen.core.LOAD_MODULE, msg.handle)
self.assertEquals(('non_existent_module', None, None, None, ()),
msg.unpickle())
+ @unittest2.skipIf(
+ condition=sys.version_info < (2, 6),
+ reason='Ancient Python lacked "from . import foo"',
+ )
def test_ansible_six_messed_up_path(self):
# The copy of six.py shipped with Ansible appears in a package whose
# __path__ subsequently ends up empty, which prevents pkgutil from
@@ -138,13 +155,44 @@ class BrokenModulesTest(unittest2.TestCase):
responder._on_get_module(msg)
self.assertEquals(1, len(router._async_route.mock_calls))
+ self.assertEquals(1, responder.get_module_count)
+ self.assertEquals(0, responder.good_load_module_count)
+ self.assertEquals(0, responder.good_load_module_size)
+ self.assertEquals(1, responder.bad_load_module_count)
+
call = router._async_route.mock_calls[0]
msg, = call[1]
self.assertEquals(mitogen.core.LOAD_MODULE, msg.handle)
self.assertIsInstance(msg.unpickle(), tuple)
-class BlacklistTest(unittest2.TestCase):
+class ForwardTest(testlib.RouterMixin, testlib.TestCase):
+ def test_forward_to_nonexistent_context(self):
+ nonexistent = mitogen.core.Context(self.router, 123)
+ capture = testlib.LogCapturer()
+ capture.start()
+ self.broker.defer_sync(lambda:
+ self.router.responder.forward_modules(
+ nonexistent,
+ ['mitogen.core']
+ )
+ )
+ s = capture.stop()
+ self.assertTrue('dropping forward of' in s)
+
+ def test_stats(self):
+ # Forwarding stats broken because forwarding is broken. See #469.
+ c1 = self.router.local()
+ c2 = self.router.local(via=c1)
+
+ self.assertEquals(256, c2.call(plain_old_module.pow, 2, 8))
+ self.assertEquals(2, self.router.responder.get_module_count)
+ self.assertEquals(2, self.router.responder.good_load_module_count)
+ self.assertLess(10000, self.router.responder.good_load_module_size)
+ self.assertGreater(40000, self.router.responder.good_load_module_size)
+
+
+class BlacklistTest(testlib.TestCase):
@unittest2.skip('implement me')
def test_whitelist_no_blacklist(self):
assert 0
diff --git a/tests/router_test.py b/tests/router_test.py
index 68474e00..4e2c19ed 100644
--- a/tests/router_test.py
+++ b/tests/router_test.py
@@ -1,6 +1,5 @@
-import logging
-import subprocess
import time
+import zlib
import unittest2
@@ -35,16 +34,16 @@ def send_n_sized_reply(sender, n):
return 123
-class SourceVerifyTest(testlib.RouterMixin, unittest2.TestCase):
+class SourceVerifyTest(testlib.RouterMixin, testlib.TestCase):
def setUp(self):
super(SourceVerifyTest, self).setUp()
# Create some children, ping them, and store what their messages look
# like so we can mess with them later.
- self.child1 = self.router.fork()
+ self.child1 = self.router.local()
self.child1_msg = self.child1.call_async(ping).get()
self.child1_stream = self.router._stream_by_id[self.child1.context_id]
- self.child2 = self.router.fork()
+ self.child2 = self.router.local()
self.child2_msg = self.child2.call_async(ping).get()
self.child2_stream = self.router._stream_by_id[self.child2.context_id]
@@ -69,7 +68,7 @@ class SourceVerifyTest(testlib.RouterMixin, unittest2.TestCase):
self.assertTrue(recv.empty())
# Ensure error was logged.
- expect = 'bad auth_id: got %d via' % (self.child2_msg.auth_id,)
+ expect = 'bad auth_id: got %r via' % (self.child2_msg.auth_id,)
self.assertTrue(expect in log.stop())
def test_bad_src_id(self):
@@ -136,19 +135,18 @@ class PolicyTest(testlib.RouterMixin, testlib.TestCase):
self.sync_with_broker()
# Verify log.
- expect = '%r: policy refused message: ' % (self.router,)
- self.assertTrue(expect in log.stop())
+ self.assertTrue(self.router.refused_msg in log.stop())
# Verify message was not delivered.
self.assertTrue(recv.empty())
# Verify CallError received by reply_to target.
- e = self.assertRaises(mitogen.core.CallError,
+ e = self.assertRaises(mitogen.core.ChannelError,
lambda: reply_target.get().unpickle())
self.assertEquals(e.args[0], self.router.refused_msg)
-class CrashTest(testlib.BrokerMixin, unittest2.TestCase):
+class CrashTest(testlib.BrokerMixin, testlib.TestCase):
# This is testing both Broker's ability to crash nicely, and Router's
# ability to respond to the crash event.
klass = mitogen.master.Router
@@ -177,54 +175,100 @@ class CrashTest(testlib.BrokerMixin, unittest2.TestCase):
self.assertTrue(expect in log.stop())
-
-class AddHandlerTest(unittest2.TestCase):
+class AddHandlerTest(testlib.TestCase):
klass = mitogen.master.Router
- def test_invoked_at_shutdown(self):
+ def test_dead_message_sent_at_shutdown(self):
router = self.klass()
queue = Queue.Queue()
handle = router.add_handler(queue.put)
router.broker.shutdown()
self.assertTrue(queue.get(timeout=5).is_dead)
+ router.broker.join()
+
+ def test_cannot_double_register(self):
+ router = self.klass()
+ try:
+ router.add_handler((lambda: None), handle=1234)
+ e = self.assertRaises(mitogen.core.Error,
+ lambda: router.add_handler((lambda: None), handle=1234))
+ self.assertEquals(router.duplicate_handle_msg, e.args[0])
+ router.del_handler(1234)
+ finally:
+ router.broker.shutdown()
+ router.broker.join()
+
+ def test_can_reregister(self):
+ router = self.klass()
+ try:
+ router.add_handler((lambda: None), handle=1234)
+ router.del_handler(1234)
+ router.add_handler((lambda: None), handle=1234)
+ router.del_handler(1234)
+ finally:
+ router.broker.shutdown()
+ router.broker.join()
+
+class MyselfTest(testlib.RouterMixin, testlib.TestCase):
+ def test_myself(self):
+ myself = self.router.myself()
+ self.assertEquals(myself.context_id, mitogen.context_id)
+ # TODO: context should know its own name too.
+ self.assertEquals(myself.name, 'self')
-class MessageSizeTest(testlib.BrokerMixin, unittest2.TestCase):
+
+class MessageSizeTest(testlib.BrokerMixin, testlib.TestCase):
klass = mitogen.master.Router
def test_local_exceeded(self):
router = self.klass(broker=self.broker, max_message_size=4096)
- recv = mitogen.core.Receiver(router)
logs = testlib.LogCapturer()
logs.start()
- sem = mitogen.core.Latch()
+ # Send message and block for one IO loop, so _async_route can run.
router.route(mitogen.core.Message.pickled(' '*8192))
- router.broker.defer(sem.put, ' ') # wlil always run after _async_route
- sem.get()
+ router.broker.defer_sync(lambda: None)
expect = 'message too large (max 4096 bytes)'
self.assertTrue(expect in logs.stop())
- def test_remote_configured(self):
+ def test_local_dead_message(self):
+ # Local router should generate dead message when reply_to is set.
router = self.klass(broker=self.broker, max_message_size=4096)
- remote = router.fork()
+
+ logs = testlib.LogCapturer()
+ logs.start()
+
+ expect = router.too_large_msg % (4096,)
+
+ # Try function call. Receiver should be woken by a dead message sent by
+ # router due to message size exceeded.
+ child = router.local()
+ e = self.assertRaises(mitogen.core.ChannelError,
+ lambda: child.call(zlib.crc32, ' '*8192))
+ self.assertEquals(e.args[0], expect)
+
+ self.assertTrue(expect in logs.stop())
+
+ def test_remote_configured(self):
+ router = self.klass(broker=self.broker, max_message_size=64*1024)
+ remote = router.local()
size = remote.call(return_router_max_message_size)
- self.assertEquals(size, 4096)
+ self.assertEquals(size, 64*1024)
def test_remote_exceeded(self):
# Ensure new contexts receive a router with the same value.
- router = self.klass(broker=self.broker, max_message_size=4096)
+ router = self.klass(broker=self.broker, max_message_size=64*1024)
recv = mitogen.core.Receiver(router)
logs = testlib.LogCapturer()
logs.start()
+ remote = router.local()
+ remote.call(send_n_sized_reply, recv.to_sender(), 128*1024)
- remote = router.fork()
- remote.call(send_n_sized_reply, recv.to_sender(), 8192)
-
- expect = 'message too large (max 4096 bytes)'
+ expect = 'message too large (max %d bytes)' % (64*1024,)
self.assertTrue(expect in logs.stop())
@@ -232,16 +276,17 @@ class NoRouteTest(testlib.RouterMixin, testlib.TestCase):
def test_invalid_handle_returns_dead(self):
# Verify sending a message to an invalid handle yields a dead message
# from the target context.
- l1 = self.router.fork()
+ l1 = self.router.local()
recv = l1.send_async(mitogen.core.Message(handle=999))
msg = recv.get(throw_dead=False)
self.assertEquals(msg.is_dead, True)
self.assertEquals(msg.src_id, l1.context_id)
+ self.assertEquals(msg.data, self.router.invalid_handle_msg.encode())
recv = l1.send_async(mitogen.core.Message(handle=999))
e = self.assertRaises(mitogen.core.ChannelError,
lambda: recv.get())
- self.assertEquals(e.args[0], mitogen.core.ChannelError.remote_msg)
+ self.assertEquals(e.args[0], self.router.invalid_handle_msg)
def test_totally_invalid_context_returns_dead(self):
recv = mitogen.core.Receiver(self.router)
@@ -254,14 +299,21 @@ class NoRouteTest(testlib.RouterMixin, testlib.TestCase):
rmsg = recv.get(throw_dead=False)
self.assertEquals(rmsg.is_dead, True)
self.assertEquals(rmsg.src_id, mitogen.context_id)
+ self.assertEquals(rmsg.data, (self.router.no_route_msg % (
+ 1234,
+ mitogen.context_id,
+ )).encode())
self.router.route(msg)
e = self.assertRaises(mitogen.core.ChannelError,
lambda: recv.get())
- self.assertEquals(e.args[0], mitogen.core.ChannelError.local_msg)
+ self.assertEquals(e.args[0], (self.router.no_route_msg % (
+ 1234,
+ mitogen.context_id,
+ )))
def test_previously_alive_context_returns_dead(self):
- l1 = self.router.fork()
+ l1 = self.router.local()
l1.shutdown(wait=True)
recv = mitogen.core.Receiver(self.router)
msg = mitogen.core.Message(
@@ -273,24 +325,34 @@ class NoRouteTest(testlib.RouterMixin, testlib.TestCase):
rmsg = recv.get(throw_dead=False)
self.assertEquals(rmsg.is_dead, True)
self.assertEquals(rmsg.src_id, mitogen.context_id)
+ self.assertEquals(rmsg.data, (self.router.no_route_msg % (
+ l1.context_id,
+ mitogen.context_id,
+ )).encode())
self.router.route(msg)
e = self.assertRaises(mitogen.core.ChannelError,
lambda: recv.get())
- self.assertEquals(e.args[0], mitogen.core.ChannelError.local_msg)
+ self.assertEquals(e.args[0], self.router.no_route_msg % (
+ l1.context_id,
+ mitogen.context_id,
+ ))
class UnidirectionalTest(testlib.RouterMixin, testlib.TestCase):
def test_siblings_cant_talk(self):
self.router.unidirectional = True
- l1 = self.router.fork()
- l2 = self.router.fork()
+ l1 = self.router.local()
+ l2 = self.router.local()
logs = testlib.LogCapturer()
logs.start()
e = self.assertRaises(mitogen.core.CallError,
lambda: l2.call(ping_context, l1))
- msg = 'mitogen.core.ChannelError: Channel closed by remote end.'
+ msg = self.router.unidirectional_msg % (
+ l2.context_id,
+ l1.context_id,
+ )
self.assertTrue(msg in str(e))
self.assertTrue('routing mode prevents forward of ' in logs.stop())
@@ -298,20 +360,28 @@ class UnidirectionalTest(testlib.RouterMixin, testlib.TestCase):
self.router.unidirectional = True
# One stream has auth_id stamped to that of the master, so it should be
# treated like a parent.
- l1 = self.router.fork()
+ l1 = self.router.local()
l1s = self.router.stream_by_id(l1.context_id)
l1s.auth_id = mitogen.context_id
l1s.is_privileged = True
- l2 = self.router.fork()
- logs = testlib.LogCapturer()
- logs.start()
+ l2 = self.router.local()
e = self.assertRaises(mitogen.core.CallError,
lambda: l2.call(ping_context, l1))
- msg = 'mitogen.core.CallError: Refused by policy.'
- self.assertTrue(msg in str(e))
- self.assertTrue('policy refused message: ' in logs.stop())
+ msg = 'mitogen.core.ChannelError: %s' % (self.router.refused_msg,)
+ self.assertTrue(str(e).startswith(msg))
+
+
+class EgressIdsTest(testlib.RouterMixin, testlib.TestCase):
+ def test_egress_ids_populated(self):
+ # Ensure Stream.egress_ids is populated on message reception.
+ c1 = self.router.local()
+ stream = self.router.stream_by_id(c1.context_id)
+ self.assertEquals(set(), stream.egress_ids)
+
+ c1.call(time.sleep, 0)
+ self.assertEquals(set([mitogen.context_id]), stream.egress_ids)
if __name__ == '__main__':
diff --git a/tests/select_test.py b/tests/select_test.py
index d9345954..7e6256c8 100644
--- a/tests/select_test.py
+++ b/tests/select_test.py
@@ -6,6 +6,22 @@ import mitogen.select
import testlib
+class BoolTest(testlib.RouterMixin, testlib.TestCase):
+ klass = mitogen.select.Select
+
+ def test_receiver(self):
+ recv = mitogen.core.Receiver(self.router) # oneshot
+ select = self.klass()
+ self.assertFalse(select)
+ select.add(recv)
+ self.assertTrue(select)
+
+ recv._on_receive(mitogen.core.Message.pickled('123'))
+ self.assertTrue(select)
+ self.assertEquals('123', select.get().unpickle())
+ self.assertFalse(select)
+
+
class AddTest(testlib.RouterMixin, testlib.TestCase):
klass = mitogen.select.Select
diff --git a/tests/serialization_test.py b/tests/serialization_test.py
index 3b56e10a..23c4a2d9 100644
--- a/tests/serialization_test.py
+++ b/tests/serialization_test.py
@@ -1,23 +1,19 @@
-try:
- from io import StringIO
- from io import BytesIO
-except ImportError:
- from StringIO import StringIO as StringIO
- from StringIO import StringIO as BytesIO
-
+import pickle
import unittest2
import mitogen.core
from mitogen.core import b
+import testlib
+
def roundtrip(v):
msg = mitogen.core.Message.pickled(v)
return mitogen.core.Message(data=msg.data).unpickle()
-class BlobTest(unittest2.TestCase):
+class BlobTest(testlib.TestCase):
klass = mitogen.core.Blob
# Python 3 pickle protocol 2 does weird stuff depending on whether an empty
@@ -33,5 +29,30 @@ class BlobTest(unittest2.TestCase):
self.assertEquals(b(''), roundtrip(v))
+class ContextTest(testlib.RouterMixin, testlib.TestCase):
+ klass = mitogen.core.Context
+
+ # Ensure Context can be round-tripped by regular pickle in addition to
+ # Mitogen's hacked pickle. Users may try to call pickle on a Context in
+ # strange circumstances, and it's often used to glue pieces of an app
+ # together (e.g. Ansible).
+
+ def test_mitogen_roundtrip(self):
+ c = self.router.local()
+ r = mitogen.core.Receiver(self.router)
+ r.to_sender().send(c)
+ c2 = r.get().unpickle()
+ self.assertEquals(None, c2.router)
+ self.assertEquals(c.context_id, c2.context_id)
+ self.assertEquals(c.name, c2.name)
+
+ def test_vanilla_roundtrip(self):
+ c = self.router.local()
+ c2 = pickle.loads(pickle.dumps(c))
+ self.assertEquals(None, c2.router)
+ self.assertEquals(c.context_id, c2.context_id)
+ self.assertEquals(c.name, c2.name)
+
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/service_test.py b/tests/service_test.py
index 8e2cdac3..3869f713 100644
--- a/tests/service_test.py
+++ b/tests/service_test.py
@@ -38,29 +38,34 @@ def call_service_in(context, service_name, method_name):
class ActivationTest(testlib.RouterMixin, testlib.TestCase):
def test_parent_can_activate(self):
- l1 = self.router.fork()
+ l1 = self.router.local()
counter, id_ = l1.call_service(MyService, 'get_id')
self.assertEquals(1, counter)
self.assertTrue(isinstance(id_, int))
def test_sibling_cannot_activate_framework(self):
- l1 = self.router.fork()
- l2 = self.router.fork()
+ l1 = self.router.local()
+ l2 = self.router.local()
exc = self.assertRaises(mitogen.core.CallError,
lambda: l2.call(call_service_in, l1, MyService2.name(), 'get_id'))
self.assertTrue(mitogen.core.Router.refused_msg in exc.args[0])
def test_sibling_cannot_activate_service(self):
- l1 = self.router.fork()
- l2 = self.router.fork()
+ l1 = self.router.local()
+ l2 = self.router.local()
l1.call_service(MyService, 'get_id') # force framework activation
- exc = self.assertRaises(mitogen.core.CallError,
- lambda: l2.call(call_service_in, l1, MyService2.name(), 'get_id'))
+ capture = testlib.LogCapturer()
+ capture.start()
+ try:
+ exc = self.assertRaises(mitogen.core.CallError,
+ lambda: l2.call(call_service_in, l1, MyService2.name(), 'get_id'))
+ finally:
+ capture.stop()
msg = mitogen.service.Activator.not_active_msg % (MyService2.name(),)
self.assertTrue(msg in exc.args[0])
def test_activates_only_once(self):
- l1 = self.router.fork()
+ l1 = self.router.local()
counter, id_ = l1.call_service(MyService, 'get_id')
counter2, id_2 = l1.call_service(MyService, 'get_id')
self.assertEquals(1, counter)
@@ -70,18 +75,23 @@ class ActivationTest(testlib.RouterMixin, testlib.TestCase):
class PermissionTest(testlib.RouterMixin, testlib.TestCase):
def test_sibling_unprivileged_ok(self):
- l1 = self.router.fork()
+ l1 = self.router.local()
l1.call_service(MyService, 'get_id')
- l2 = self.router.fork()
+ l2 = self.router.local()
self.assertEquals('unprivileged!',
l2.call(call_service_in, l1, MyService.name(), 'unprivileged_op'))
def test_sibling_privileged_bad(self):
- l1 = self.router.fork()
+ l1 = self.router.local()
l1.call_service(MyService, 'get_id')
- l2 = self.router.fork()
- exc = self.assertRaises(mitogen.core.CallError, lambda:
- l2.call(call_service_in, l1, MyService.name(), 'privileged_op'))
+ l2 = self.router.local()
+ capture = testlib.LogCapturer()
+ capture.start()
+ try:
+ exc = self.assertRaises(mitogen.core.CallError, lambda:
+ l2.call(call_service_in, l1, MyService.name(), 'privileged_op'))
+ finally:
+ capture.stop()
msg = mitogen.service.Invoker.unauthorized_msg % (
u'privileged_op',
MyService.name(),
@@ -89,5 +99,18 @@ class PermissionTest(testlib.RouterMixin, testlib.TestCase):
self.assertTrue(msg in exc.args[0])
+class CloseTest(testlib.RouterMixin, testlib.TestCase):
+ klass = mitogen.service.Pool
+
+ def test_receiver_closed(self):
+ pool = self.klass(router=self.router, services=[])
+ pool.stop()
+ self.assertEquals(None, pool._receiver.handle)
+
+ e = self.assertRaises(mitogen.core.ChannelError,
+ lambda: self.router.myself().call_service(MyService, 'foobar'))
+ self.assertEquals(e.args[0], self.router.invalid_handle_msg)
+
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/show_docker_hostname.py b/tests/show_docker_hostname.py
deleted file mode 100644
index 995c744b..00000000
--- a/tests/show_docker_hostname.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-"""
-For use by the Travis scripts, just print out the hostname of the Docker
-daemon from the environment.
-"""
-
-import testlib
-print(testlib.get_docker_host())
diff --git a/tests/signals_test.py b/tests/signals_test.py
new file mode 100644
index 00000000..79b59e8a
--- /dev/null
+++ b/tests/signals_test.py
@@ -0,0 +1,45 @@
+
+import unittest2
+
+import testlib
+import mitogen.core
+
+
+class Thing:
+ pass
+
+
+class ListenFireTest(testlib.TestCase):
+ def test_no_args(self):
+ thing = Thing()
+ latch = mitogen.core.Latch()
+ mitogen.core.listen(thing, 'event',
+ lambda: latch.put('event fired'))
+
+ mitogen.core.fire(thing, 'event')
+ self.assertEquals('event fired', latch.get())
+ self.assertTrue(latch.empty())
+
+ def test_with_args(self):
+ thing = Thing()
+ latch = mitogen.core.Latch()
+ mitogen.core.listen(thing, 'event', latch.put)
+ mitogen.core.fire(thing, 'event', 'event fired')
+ self.assertEquals('event fired', latch.get())
+ self.assertTrue(latch.empty())
+
+ def test_two_listeners(self):
+ thing = Thing()
+ latch = mitogen.core.Latch()
+ latch2 = mitogen.core.Latch()
+ mitogen.core.listen(thing, 'event', latch.put)
+ mitogen.core.listen(thing, 'event', latch2.put)
+ mitogen.core.fire(thing, 'event', 'event fired')
+ self.assertEquals('event fired', latch.get())
+ self.assertEquals('event fired', latch2.get())
+ self.assertTrue(latch.empty())
+ self.assertTrue(latch2.empty())
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/ssh_test.py b/tests/ssh_test.py
index efca057d..496710b8 100644
--- a/tests/ssh_test.py
+++ b/tests/ssh_test.py
@@ -1,5 +1,6 @@
import os
import sys
+import tempfile
import mitogen
import mitogen.ssh
@@ -11,21 +12,54 @@ import testlib
import plain_old_module
-class FakeSshTest(testlib.RouterMixin, unittest2.TestCase):
+class StubSshMixin(testlib.RouterMixin):
+ """
+ Mix-in that provides :meth:`stub_ssh` executing the stub 'ssh.py'.
+ """
+ def stub_ssh(self, STUBSSH_MODE=None, **kwargs):
+ os.environ['STUBSSH_MODE'] = str(STUBSSH_MODE)
+ try:
+ return self.router.ssh(
+ hostname='hostname',
+ username='mitogen__has_sudo',
+ ssh_path=testlib.data_path('stubs/stub-ssh.py'),
+ **kwargs
+ )
+ finally:
+ del os.environ['STUBSSH_MODE']
+
+
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
def test_okay(self):
context = self.router.ssh(
hostname='hostname',
username='mitogen__has_sudo',
- ssh_path=testlib.data_path('fakessh.py'),
+ ssh_path=testlib.data_path('stubs/stub-ssh.py'),
)
#context.call(mitogen.utils.log_to_file, '/tmp/log')
#context.call(mitogen.utils.disable_site_packages)
self.assertEquals(3, context.call(plain_old_module.add, 1, 2))
-class SshTest(testlib.DockerMixin, unittest2.TestCase):
+class SshTest(testlib.DockerMixin, testlib.TestCase):
stream_class = mitogen.ssh.Stream
+ def test_debug_decoding(self):
+ # ensure filter_debug_logs() decodes the logged string.
+ capture = testlib.LogCapturer()
+ capture.start()
+ try:
+ context = self.docker_ssh(
+ username='mitogen__has_sudo',
+ password='has_sudo_password',
+ ssh_debug_level=3,
+ )
+ finally:
+ s = capture.stop()
+
+ expect = "%s: debug1: Reading configuration data" % (context.name,)
+ self.assertTrue(expect in s)
+
def test_stream_name(self):
context = self.docker_ssh(
username='mitogen__has_sudo',
@@ -105,8 +139,49 @@ class SshTest(testlib.DockerMixin, unittest2.TestCase):
context.call(plain_old_module.get_sentinel_value),
)
+ def test_enforce_unknown_host_key(self):
+ fp = tempfile.NamedTemporaryFile()
+ try:
+ e = self.assertRaises(mitogen.ssh.HostKeyError,
+ lambda: self.docker_ssh(
+ username='mitogen__has_sudo_pubkey',
+ password='has_sudo_password',
+ ssh_args=['-o', 'UserKnownHostsFile ' + fp.name],
+ check_host_keys='enforce',
+ )
+ )
+ self.assertEquals(e.args[0], mitogen.ssh.Stream.hostkey_failed_msg)
+ finally:
+ fp.close()
+
+ def test_accept_enforce_host_keys(self):
+ fp = tempfile.NamedTemporaryFile()
+ try:
+ context = self.docker_ssh(
+ username='mitogen__has_sudo',
+ password='has_sudo_password',
+ ssh_args=['-o', 'UserKnownHostsFile ' + fp.name],
+ check_host_keys='accept',
+ )
+ context.shutdown(wait=True)
+
+ fp.seek(0)
+ # Lame test, but we're about to use enforce mode anyway, which
+ # verifies the file contents.
+ self.assertTrue(len(fp.read()) > 0)
-class BannerTest(testlib.DockerMixin, unittest2.TestCase):
+ context = self.docker_ssh(
+ username='mitogen__has_sudo',
+ password='has_sudo_password',
+ ssh_args=['-o', 'UserKnownHostsFile ' + fp.name],
+ check_host_keys='enforce',
+ )
+ context.shutdown(wait=True)
+ finally:
+ fp.close()
+
+
+class BannerTest(testlib.DockerMixin, testlib.TestCase):
# Verify the ability to disambiguate random spam appearing in the SSHd's
# login banner from a legitimate password prompt.
stream_class = mitogen.ssh.Stream
@@ -124,39 +199,37 @@ class BannerTest(testlib.DockerMixin, unittest2.TestCase):
self.assertEquals(name, context.name)
-class RequirePtyTest(testlib.DockerMixin, testlib.TestCase):
- stream_class = mitogen.ssh.Stream
+class StubPermissionDeniedTest(StubSshMixin, testlib.TestCase):
+ def test_classic_prompt(self):
+ self.assertRaises(mitogen.ssh.PasswordError,
+ lambda: self.stub_ssh(STUBSSH_MODE='permdenied_classic'))
- def fake_ssh(self, FAKESSH_MODE=None, **kwargs):
- os.environ['FAKESSH_MODE'] = str(FAKESSH_MODE)
- try:
- return self.router.ssh(
- hostname='hostname',
- username='mitogen__has_sudo',
- ssh_path=testlib.data_path('fakessh.py'),
- **kwargs
- )
- finally:
- del os.environ['FAKESSH_MODE']
+ def test_openssh_75_prompt(self):
+ self.assertRaises(mitogen.ssh.PasswordError,
+ lambda: self.stub_ssh(STUBSSH_MODE='permdenied_75'))
+
+
+class StubCheckHostKeysTest(StubSshMixin, testlib.TestCase):
+ stream_class = mitogen.ssh.Stream
def test_check_host_keys_accept(self):
# required=true, host_key_checking=accept
- context = self.fake_ssh(FAKESSH_MODE='ask', check_host_keys='accept')
+ context = self.stub_ssh(STUBSSH_MODE='ask', check_host_keys='accept')
self.assertEquals('1', context.call(os.getenv, 'STDERR_WAS_TTY'))
def test_check_host_keys_enforce(self):
# required=false, host_key_checking=enforce
- context = self.fake_ssh(check_host_keys='enforce')
+ context = self.stub_ssh(check_host_keys='enforce')
self.assertEquals(None, context.call(os.getenv, 'STDERR_WAS_TTY'))
def test_check_host_keys_ignore(self):
# required=false, host_key_checking=ignore
- context = self.fake_ssh(check_host_keys='ignore')
+ context = self.stub_ssh(check_host_keys='ignore')
self.assertEquals(None, context.call(os.getenv, 'STDERR_WAS_TTY'))
def test_password_present(self):
# required=true, password is not None
- context = self.fake_ssh(check_host_keys='ignore', password='willick')
+ context = self.stub_ssh(check_host_keys='ignore', password='willick')
self.assertEquals('1', context.call(os.getenv, 'STDERR_WAS_TTY'))
diff --git a/tests/stream_test.py b/tests/stream_test.py
new file mode 100644
index 00000000..d844e610
--- /dev/null
+++ b/tests/stream_test.py
@@ -0,0 +1,33 @@
+
+import unittest2
+import mock
+
+import mitogen.core
+
+import testlib
+
+
+class ReceiveOneTest(testlib.TestCase):
+ klass = mitogen.core.Stream
+
+ def test_corruption(self):
+ broker = mock.Mock()
+ router = mock.Mock()
+
+ stream = self.klass(router, 1)
+ junk = mitogen.core.b('x') * stream.HEADER_LEN
+ stream._input_buf = [junk]
+ stream._input_buf_len = len(junk)
+
+ capture = testlib.LogCapturer()
+ capture.start()
+ ret = stream._receive_one(broker)
+ #self.assertEquals(1, broker.stop_receive.mock_calls)
+ capture.stop()
+
+ self.assertFalse(ret)
+ self.assertTrue((self.klass.corrupt_msg % (junk,)) in capture.raw())
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/su_test.py b/tests/su_test.py
new file mode 100644
index 00000000..2af17c6e
--- /dev/null
+++ b/tests/su_test.py
@@ -0,0 +1,32 @@
+
+import os
+
+import mitogen
+import mitogen.lxd
+import mitogen.parent
+
+import unittest2
+
+import testlib
+
+
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
+ su_path = testlib.data_path('stubs/stub-su.py')
+
+ def run_su(self, **kwargs):
+ context = self.router.su(
+ su_path=self.su_path,
+ **kwargs
+ )
+ argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
+ return context, argv
+
+
+ def test_basic(self):
+ context, argv = self.run_su()
+ self.assertEquals(argv[1], 'root')
+ self.assertEquals(argv[2], '-c')
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/sudo_test.py b/tests/sudo_test.py
new file mode 100644
index 00000000..5bf9f4de
--- /dev/null
+++ b/tests/sudo_test.py
@@ -0,0 +1,101 @@
+
+import os
+
+import mitogen
+import mitogen.lxd
+import mitogen.parent
+
+import unittest2
+
+import testlib
+
+
+class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
+ sudo_path = testlib.data_path('stubs/stub-sudo.py')
+
+ def run_sudo(self, **kwargs):
+ context = self.router.sudo(
+ sudo_path=self.sudo_path,
+ **kwargs
+ )
+ argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV'))
+ return context, argv
+
+
+ def test_basic(self):
+ context, argv = self.run_sudo()
+ self.assertEquals(argv[:4], [
+ self.sudo_path,
+ '-u', 'root',
+ '--'
+ ])
+
+ def test_selinux_type_role(self):
+ context, argv = self.run_sudo(
+ selinux_type='setype',
+ selinux_role='serole',
+ )
+ self.assertEquals(argv[:8], [
+ self.sudo_path,
+ '-u', 'root',
+ '-r', 'serole',
+ '-t', 'setype',
+ '--'
+ ])
+
+ def test_reparse_args(self):
+ context, argv = self.run_sudo(
+ sudo_args=['--type', 'setype', '--role', 'serole', '--user', 'user']
+ )
+ self.assertEquals(argv[:8], [
+ self.sudo_path,
+ '-u', 'user',
+ '-r', 'serole',
+ '-t', 'setype',
+ '--'
+ ])
+
+
+class NonEnglishPromptTest(testlib.DockerMixin, testlib.TestCase):
+ # Only mitogen/debian-test has a properly configured sudo.
+ mitogen_test_distro = 'debian'
+
+ def test_password_required(self):
+ ssh = self.docker_ssh(
+ username='mitogen__has_sudo',
+ password='has_sudo_password',
+ )
+ ssh.call(os.putenv, 'LANGUAGE', 'fr')
+ ssh.call(os.putenv, 'LC_ALL', 'fr_FR.UTF-8')
+ e = self.assertRaises(mitogen.core.StreamError,
+ lambda: self.router.sudo(via=ssh)
+ )
+ self.assertTrue(mitogen.sudo.Stream.password_required_msg in str(e))
+
+ def test_password_incorrect(self):
+ ssh = self.docker_ssh(
+ username='mitogen__has_sudo',
+ password='has_sudo_password',
+ )
+ ssh.call(os.putenv, 'LANGUAGE', 'fr')
+ ssh.call(os.putenv, 'LC_ALL', 'fr_FR.UTF-8')
+ e = self.assertRaises(mitogen.core.StreamError,
+ lambda: self.router.sudo(via=ssh, password='x')
+ )
+ self.assertTrue(mitogen.sudo.Stream.password_incorrect_msg in str(e))
+
+ def test_password_okay(self):
+ ssh = self.docker_ssh(
+ username='mitogen__has_sudo',
+ password='has_sudo_password',
+ )
+ ssh.call(os.putenv, 'LANGUAGE', 'fr')
+ ssh.call(os.putenv, 'LC_ALL', 'fr_FR.UTF-8')
+ e = self.assertRaises(mitogen.core.StreamError,
+ lambda: self.router.sudo(via=ssh, password='rootpassword')
+ )
+ self.assertTrue(mitogen.sudo.Stream.password_incorrect_msg in str(e))
+
+
+if __name__ == '__main__':
+ unittest2.main()
diff --git a/tests/testlib.py b/tests/testlib.py
index 63d96233..ef401a78 100644
--- a/tests/testlib.py
+++ b/tests/testlib.py
@@ -3,17 +3,26 @@ import logging
import os
import random
import re
+import signal
import socket
import subprocess
import sys
+import threading
import time
+import traceback
import unittest2
import mitogen.core
+import mitogen.fork
import mitogen.master
import mitogen.utils
+try:
+ import faulthandler
+except ImportError:
+ faulthandler = None
+
try:
import urlparse
except ImportError:
@@ -24,6 +33,11 @@ try:
except ImportError:
from io import StringIO
+try:
+ BaseException
+except NameError:
+ BaseException = Exception
+
LOG = logging.getLogger(__name__)
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
@@ -32,6 +46,17 @@ sys.path.append(DATA_DIR)
if mitogen.is_master:
mitogen.utils.log_to_file()
+if faulthandler is not None:
+ faulthandler.enable()
+
+
+def get_fd_count():
+ """
+ Return the number of FDs open by this process.
+ """
+ import psutil
+ return psutil.Process().num_fds()
+
def data_path(suffix):
path = os.path.join(DATA_DIR, suffix)
@@ -53,9 +78,17 @@ def subprocess__check_output(*popenargs, **kwargs):
raise subprocess.CalledProcessError(retcode, cmd)
return output
+
+def Popen__terminate(proc):
+ os.kill(proc.pid, signal.SIGTERM)
+
+
if hasattr(subprocess, 'check_output'):
subprocess__check_output = subprocess.check_output
+if hasattr(subprocess.Popen, 'terminate'):
+ Popen__terminate = subprocess.Popen.terminate
+
def wait_for_port(
host,
@@ -158,14 +191,77 @@ def sync_with_broker(broker, timeout=10.0):
sem.get(timeout=10.0)
+def log_fd_calls():
+ mypid = os.getpid()
+ l = threading.Lock()
+ real_pipe = os.pipe
+ def pipe():
+ l.acquire()
+ try:
+ rv = real_pipe()
+ if mypid == os.getpid():
+ sys.stdout.write('\n%s\n' % (rv,))
+ traceback.print_stack(limit=3)
+ sys.stdout.write('\n')
+ return rv
+ finally:
+ l.release()
+
+ os.pipe = pipe
+
+ real_socketpair = socket.socketpair
+ def socketpair(*args):
+ l.acquire()
+ try:
+ rv = real_socketpair(*args)
+ if mypid == os.getpid():
+ sys.stdout.write('\n%s -> %s\n' % (args, rv))
+ traceback.print_stack(limit=3)
+ sys.stdout.write('\n')
+ return rv
+ finally:
+ l.release()
+
+ socket.socketpair = socketpair
+
+ real_dup2 = os.dup2
+ def dup2(*args):
+ l.acquire()
+ try:
+ real_dup2(*args)
+ if mypid == os.getpid():
+ sys.stdout.write('\n%s\n' % (args,))
+ traceback.print_stack(limit=3)
+ sys.stdout.write('\n')
+ finally:
+ l.release()
+
+ os.dup2 = dup2
+
+ real_dup = os.dup
+ def dup(*args):
+ l.acquire()
+ try:
+ rv = real_dup(*args)
+ if mypid == os.getpid():
+ sys.stdout.write('\n%s -> %s\n' % (args, rv))
+ traceback.print_stack(limit=3)
+ sys.stdout.write('\n')
+ return rv
+ finally:
+ l.release()
+
+ os.dup = dup
+
+
class CaptureStreamHandler(logging.StreamHandler):
def __init__(self, *args, **kwargs):
- super(CaptureStreamHandler, self).__init__(*args, **kwargs)
+ logging.StreamHandler.__init__(self, *args, **kwargs)
self.msgs = []
def emit(self, msg):
self.msgs.append(msg)
- return super(CaptureStreamHandler, self).emit(msg)
+ logging.StreamHandler.emit(self, msg)
class LogCapturer(object):
@@ -203,6 +299,45 @@ class LogCapturer(object):
class TestCase(unittest2.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ # This is done in setUpClass() so we have a chance to run before any
+ # Broker() instantiations in setUp() etc.
+ mitogen.fork.on_fork()
+ cls._fd_count_before = get_fd_count()
+ super(TestCase, cls).setUpClass()
+
+ ALLOWED_THREADS = set([
+ 'MainThread',
+ 'mitogen.master.join_thread_async'
+ ])
+
+ def _teardown_check_threads(self):
+ counts = {}
+ for thread in threading.enumerate():
+ name = thread.getName()
+ # Python 2.4: enumerate() may return stopped threads.
+ assert (not thread.isAlive()) or name in self.ALLOWED_THREADS, \
+ 'Found thread %r still running after tests.' % (name,)
+ counts[name] = counts.get(name, 0) + 1
+
+ for name in counts:
+ assert counts[name] == 1, \
+ 'Found %d copies of thread %r running after tests.' % (name,)
+
+ def _teardown_check_fds(self):
+ mitogen.core.Latch._on_fork()
+ if get_fd_count() != self._fd_count_before:
+ import os; os.system('lsof -p %s' % (os.getpid(),))
+ assert 0, "%s leaked FDs. Count before: %s, after: %s" % (
+ self, self._fd_count_before, get_fd_count(),
+ )
+
+ def tearDown(self):
+ self._teardown_check_threads()
+ self._teardown_check_fds()
+ super(TestCase, self).tearDown()
+
def assertRaises(self, exc, func, *args, **kwargs):
"""Like regular assertRaises, except return the exception that was
raised. Can't use context manager because tests must run on Python2.4"""
@@ -228,13 +363,19 @@ def get_docker_host():
class DockerizedSshDaemon(object):
- image = None
+ mitogen_test_distro = os.environ.get('MITOGEN_TEST_DISTRO', 'debian')
+ if '-' in mitogen_test_distro:
+ distro, _py3 = mitogen_test_distro.split('-')
+ else:
+ distro = mitogen_test_distro
+ _py3 = None
+
+ if _py3 == 'py3':
+ python_path = '/usr/bin/python3'
+ else:
+ python_path = '/usr/bin/python'
- def get_image(self):
- if not self.image:
- distro = os.environ.get('MITOGEN_TEST_DISTRO', 'debian')
- self.image = 'mitogen/%s-test' % (distro,)
- return self.image
+ image = 'mitogen/%s-test' % (distro,)
# 22/tcp -> 0.0.0.0:32771
PORT_RE = re.compile(r'([^/]+)/([^ ]+) -> ([^:]+):(.*)')
@@ -260,7 +401,7 @@ class DockerizedSshDaemon(object):
'--privileged',
'--publish-all',
'--name', self.container_name,
- self.get_image()
+ self.image,
]
subprocess__check_output(args)
self._get_container_port()
@@ -281,13 +422,15 @@ class DockerizedSshDaemon(object):
class BrokerMixin(object):
broker_class = mitogen.master.Broker
+ broker_shutdown = False
def setUp(self):
super(BrokerMixin, self).setUp()
self.broker = self.broker_class()
def tearDown(self):
- self.broker.shutdown()
+ if not self.broker_shutdown:
+ self.broker.shutdown()
self.broker.join()
super(BrokerMixin, self).tearDown()
@@ -320,6 +463,7 @@ class DockerMixin(RouterMixin):
kwargs.setdefault('port', self.dockerized_ssh.port)
kwargs.setdefault('check_host_keys', 'ignore')
kwargs.setdefault('ssh_debug_level', 3)
+ kwargs.setdefault('python_path', self.dockerized_ssh.python_path)
return self.router.ssh(**kwargs)
def docker_ssh_any(self, **kwargs):
diff --git a/tests/two_three_compat_test.py b/tests/two_three_compat_test.py
index babbbe39..f30a233e 100644
--- a/tests/two_three_compat_test.py
+++ b/tests/two_three_compat_test.py
@@ -8,10 +8,11 @@ import mitogen.core
import mitogen.master
import testlib
+import simple_pkg.ping
-def roundtrip(*args):
- return args
+# TODO: this is a joke. 2/3 interop is one of the hardest bits to get right.
+# There should be 100 tests in this file.
class TwoThreeCompatTest(testlib.RouterMixin, testlib.TestCase):
if mitogen.core.PY3:
@@ -20,10 +21,10 @@ class TwoThreeCompatTest(testlib.RouterMixin, testlib.TestCase):
python_path = 'python3'
def test_succeeds(self):
- spare = self.router.fork()
+ spare = self.router.local()
target = self.router.local(python_path=self.python_path)
- spare2, = target.call(roundtrip, spare)
+ spare2, = target.call(simple_pkg.ping.ping, spare)
self.assertEquals(spare.context_id, spare2.context_id)
self.assertEquals(spare.name, spare2.name)
diff --git a/tests/types_test.py b/tests/types_test.py
index 4f80e076..8f120931 100644
--- a/tests/types_test.py
+++ b/tests/types_test.py
@@ -1,4 +1,6 @@
+import sys
+
try:
from io import StringIO
from io import BytesIO
@@ -11,8 +13,10 @@ import unittest2
import mitogen.core
from mitogen.core import b
+import testlib
+
-class BlobTest(unittest2.TestCase):
+class BlobTest(testlib.TestCase):
klass = mitogen.core.Blob
def make(self):
@@ -24,14 +28,14 @@ class BlobTest(unittest2.TestCase):
def test_decays_on_constructor(self):
blob = self.make()
- self.assertEquals(b('x')*128, mitogen.core.BytesType(blob))
+ self.assertEquals(b('x') * 128, mitogen.core.BytesType(blob))
def test_decays_on_write(self):
blob = self.make()
io = BytesIO()
io.write(blob)
self.assertEquals(128, io.tell())
- self.assertEquals(b('x')*128, io.getvalue())
+ self.assertEquals(b('x') * 128, io.getvalue())
def test_message_roundtrip(self):
blob = self.make()
@@ -43,7 +47,7 @@ class BlobTest(unittest2.TestCase):
mitogen.core.BytesType(blob2))
-class SecretTest(unittest2.TestCase):
+class SecretTest(testlib.TestCase):
klass = mitogen.core.Secret
def make(self):
@@ -74,5 +78,66 @@ class SecretTest(unittest2.TestCase):
mitogen.core.b(secret2))
+class KwargsTest(testlib.TestCase):
+ klass = mitogen.core.Kwargs
+
+ def test_empty(self):
+ kw = self.klass({})
+ self.assertEquals({}, kw)
+ self.assertEquals('Kwargs({})', repr(kw))
+ klass, (dct,) = kw.__reduce__()
+ self.assertTrue(klass is self.klass)
+ self.assertTrue(type(dct) is dict)
+ self.assertEquals({}, dct)
+
+ @unittest2.skipIf(condition=(sys.version_info >= (2, 6)),
+ reason='py<2.6 only')
+ def test_bytes_conversion(self):
+ kw = self.klass({u'key': 123})
+ self.assertEquals({'key': 123}, kw)
+ self.assertEquals("Kwargs({'key': 123})", repr(kw))
+
+ @unittest2.skipIf(condition=not mitogen.core.PY3,
+ reason='py3 only')
+ def test_unicode_conversion(self):
+ kw = self.klass({mitogen.core.b('key'): 123})
+ self.assertEquals({u'key': 123}, kw)
+ self.assertEquals("Kwargs({'key': 123})", repr(kw))
+ klass, (dct,) = kw.__reduce__()
+ self.assertTrue(klass is self.klass)
+ self.assertTrue(type(dct) is dict)
+ self.assertEquals({u'key': 123}, dct)
+ key, = dct
+ self.assertTrue(type(key) is mitogen.core.UnicodeType)
+
+
+class AdornedUnicode(mitogen.core.UnicodeType):
+ pass
+
+
+class ToTextTest(testlib.TestCase):
+ func = staticmethod(mitogen.core.to_text)
+
+ def test_bytes(self):
+ s = self.func(mitogen.core.b('bytes'))
+ self.assertEquals(mitogen.core.UnicodeType, type(s))
+ self.assertEquals(s, u'bytes')
+
+ def test_unicode(self):
+ s = self.func(u'text')
+ self.assertEquals(mitogen.core.UnicodeType, type(s))
+ self.assertEquals(s, u'text')
+
+ def test_adorned_unicode(self):
+ s = self.func(AdornedUnicode(u'text'))
+ self.assertEquals(mitogen.core.UnicodeType, type(s))
+ self.assertEquals(s, u'text')
+
+ def test_integer(self):
+ s = self.func(123)
+ self.assertEquals(mitogen.core.UnicodeType, type(s))
+ self.assertEquals(s, u'123')
+
+
if __name__ == '__main__':
unittest2.main()
diff --git a/tests/unix_test.py b/tests/unix_test.py
new file mode 100644
index 00000000..02dc11a4
--- /dev/null
+++ b/tests/unix_test.py
@@ -0,0 +1,149 @@
+
+import os
+import socket
+import subprocess
+import sys
+import time
+
+import unittest2
+
+import mitogen
+import mitogen.master
+import mitogen.service
+import mitogen.unix
+
+import testlib
+
+
+class MyService(mitogen.service.Service):
+ def __init__(self, latch, **kwargs):
+ super(MyService, self).__init__(**kwargs)
+ # used to wake up main thread once client has made its request
+ self.latch = latch
+
+ @classmethod
+ def name(cls):
+ # Because this is loaded from both __main__ and whatever unit2 does,
+ # specify a fixed name.
+ return 'unix_test.MyService'
+
+ @mitogen.service.expose(policy=mitogen.service.AllowParents())
+ def ping(self, msg):
+ self.latch.put(None)
+ return {
+ 'src_id': msg.src_id,
+ 'auth_id': msg.auth_id,
+ }
+
+
+class IsPathDeadTest(testlib.TestCase):
+ func = staticmethod(mitogen.unix.is_path_dead)
+ path = '/tmp/stale-socket'
+
+ def test_does_not_exist(self):
+ self.assertTrue(self.func('/tmp/does-not-exist'))
+
+ def make_socket(self):
+ if os.path.exists(self.path):
+ os.unlink(self.path)
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.bind(self.path)
+ return s
+
+ def test_conn_refused(self):
+ s = self.make_socket()
+ s.close()
+ self.assertTrue(self.func(self.path))
+
+ def test_is_alive(self):
+ s = self.make_socket()
+ s.listen(5)
+ self.assertFalse(self.func(self.path))
+ s.close()
+ os.unlink(self.path)
+
+
+class ListenerTest(testlib.RouterMixin, testlib.TestCase):
+ klass = mitogen.unix.Listener
+
+ def test_constructor_basic(self):
+ listener = self.klass(router=self.router)
+ capture = testlib.LogCapturer()
+ capture.start()
+ try:
+ self.assertFalse(mitogen.unix.is_path_dead(listener.path))
+ os.unlink(listener.path)
+ # ensure we catch 0 byte read error log message
+ self.broker.shutdown()
+ self.broker.join()
+ self.broker_shutdown = True
+ finally:
+ capture.stop()
+
+
+class ClientTest(testlib.TestCase):
+ klass = mitogen.unix.Listener
+
+ def _try_connect(self, path):
+ # give server a chance to setup listener
+ for x in range(10):
+ try:
+ return mitogen.unix.connect(path)
+ except socket.error:
+ if x == 9:
+ raise
+ time.sleep(0.1)
+
+ def _test_simple_client(self, path):
+ router, context = self._try_connect(path)
+ self.assertEquals(0, context.context_id)
+ self.assertEquals(1, mitogen.context_id)
+ self.assertEquals(0, mitogen.parent_id)
+ resp = context.call_service(service_name=MyService, method_name='ping')
+ self.assertEquals(mitogen.context_id, resp['src_id'])
+ self.assertEquals(0, resp['auth_id'])
+ router.broker.shutdown()
+ router.broker.join()
+ os.unlink(path)
+
+ @classmethod
+ def _test_simple_server(cls, path):
+ router = mitogen.master.Router()
+ latch = mitogen.core.Latch()
+ try:
+ try:
+ listener = cls.klass(path=path, router=router)
+ pool = mitogen.service.Pool(router=router, services=[
+ MyService(latch=latch, router=router),
+ ])
+ latch.get()
+ # give broker a chance to deliver service resopnse
+ time.sleep(0.1)
+ finally:
+ pool.shutdown()
+ pool.join()
+ router.broker.shutdown()
+ router.broker.join()
+ finally:
+ os._exit(0)
+
+ def test_simple(self):
+ path = mitogen.unix.make_socket_path()
+ proc = subprocess.Popen(
+ [sys.executable, __file__, 'ClientTest_server', path]
+ )
+ try:
+ self._test_simple_client(path)
+ finally:
+ # TODO :)
+ mitogen.context_id = 0
+ mitogen.parent_id = None
+ mitogen.parent_ids = []
+ proc.wait()
+
+
+if __name__ == '__main__':
+ if len(sys.argv) == 3 and sys.argv[1] == 'ClientTest_server':
+ ClientTest._test_simple_server(path=sys.argv[2])
+ else:
+ unittest2.main()
diff --git a/tests/utils_test.py b/tests/utils_test.py
index b2e0aa9e..a70b23dc 100644
--- a/tests/utils_test.py
+++ b/tests/utils_test.py
@@ -1,10 +1,18 @@
#!/usr/bin/env python
+import os
+import tempfile
+
import unittest2
+import mock
import mitogen.core
+import mitogen.parent
import mitogen.master
import mitogen.utils
+from mitogen.core import b
+
+import testlib
def func0(router):
@@ -16,7 +24,7 @@ def func(router):
return router
-class RunWithRouterTest(unittest2.TestCase):
+class RunWithRouterTest(testlib.TestCase):
# test_shutdown_on_exception
# test_shutdown_on_success
@@ -26,7 +34,7 @@ class RunWithRouterTest(unittest2.TestCase):
self.assertFalse(router.broker._thread.isAlive())
-class WithRouterTest(unittest2.TestCase):
+class WithRouterTest(testlib.TestCase):
def test_with_broker(self):
router = func()
self.assertIsInstance(router, mitogen.master.Router)
@@ -40,7 +48,7 @@ class Unicode(mitogen.core.UnicodeType): pass
class Bytes(mitogen.core.BytesType): pass
-class CastTest(unittest2.TestCase):
+class CastTest(testlib.TestCase):
def test_dict(self):
self.assertEqual(type(mitogen.utils.cast({})), dict)
self.assertEqual(type(mitogen.utils.cast(Dict())), dict)
@@ -84,7 +92,7 @@ class CastTest(unittest2.TestCase):
self.assertEqual(type(mitogen.utils.cast(Unicode())), mitogen.core.UnicodeType)
def test_bytes(self):
- self.assertEqual(type(mitogen.utils.cast(b'')), mitogen.core.BytesType)
+ self.assertEqual(type(mitogen.utils.cast(b(''))), mitogen.core.BytesType)
self.assertEqual(type(mitogen.utils.cast(Bytes())), mitogen.core.BytesType)
def test_unknown(self):
diff --git a/tox.ini b/tox.ini
index 6bf8bb53..8a4ef364 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,18 +1,41 @@
[tox]
envlist =
+ init,
py26,
py27,
py35,
py36,
+ py37,
+ report,
[testenv]
+usedevelop = True
deps =
-r{toxinidir}/dev_requirements.txt
+ -r{toxinidir}/tests/ansible/requirements.txt
commands =
{posargs:bash run_tests}
whitelist_externals =
bash
+setenv =
+ NOCOVERAGE_ERASE = 1
+ NOCOVERAGE_REPORT = 1
+
+[testenv:init]
+commands =
+ coverage erase
+deps =
+ coverage
+
+[testenv:report]
+commands =
+ coverage html
+ echo "coverage report is at file://{toxinidir}/htmlcov/index.html"
+deps =
+ coverage
+whitelist_externals =
+ echo
[testenv:docs]
basepython = python